MatrixXi Eigen_BF::find_TopK(const Eigen::Ref<const MatrixXf> &mat, int topK){
PARAM_MIPS_TOP_K = topK;
PARAM_QUERY_Q = mat.cols();
MATRIX_Q = mat;
MatrixXi matTopK = MatrixXi::Zero(PARAM_MIPS_TOP_K, PARAM_QUERY_Q);
auto startTime = chrono::high_resolution_clock::now();
float loopin_time, loopout_time = 0.0;
for (int q = 0; q < PARAM_QUERY_Q ; ++q){
auto out_startTime = chrono::high_resolution_clock::now();
priority_queue< IFPair, vector<IFPair>, greater<IFPair> > minQueTopK;
for (int n = 0; n < PARAM_DATA_N; n++){
auto in_startTime = chrono::high_resolution_clock::now();
float fValue = MATRIX_Q.col(q).dot(MATRIX_X.col(n));
// Insert into minQueue
if ((int)minQueTopK.size() < PARAM_MIPS_TOP_K)
minQueTopK.push(IFPair(n, fValue));
else
{
// Insert into minQueue
if (fValue > minQueTopK.top().m_fValue)
{
minQueTopK.pop();
minQueTopK.push(IFPair(n, fValue));
}
}
auto in_durTime = chrono::duration_cast<chrono::microseconds>(chrono::high_resolution_clock::now() - in_startTime);
loopin_time += (float)in_durTime.count() * 1e-3;
}
auto out_durTime = chrono::duration_cast<chrono::microseconds>(chrono::high_resolution_clock::now() - out_startTime);
loopout_time += (float)out_durTime.count() * 1e-3;
for (int n = PARAM_MIPS_TOP_K - 1; n >= 0; --n)
{
// Get point index
matTopK.col(q)(n) = minQueTopK.top().m_iIndex;
minQueTopK.pop();
}
}
auto durTime = chrono::duration_cast<chrono::microseconds>(chrono::high_resolution_clock::now() - startTime);
cout << "Loop in Time : "<< loopin_time << endl;
cout << "Loop out Time : "<< loopout_time << endl;
cout << "TopK Time : "<< (float)durTime.count() * 1e-3 << endl;
return matTopK;
}
In this code I counted the time inside the loop, the time outside the loop and the total time. It turns out that loop out time and total time are approximate, but loop in time and loop out time are very different. My intuition is that the loop in time should be similar to the loop out time, but this is not the case. Can anyone help me to explain the reason for this? Thank you.
ps:
When PARAM_MIPS_TOP_K = 10, PARAM_QUERY_Q = 1000, PARAM_DATA_N = 60000
without any optimization like -O3, in Debug mode
the result is loop in time : 61014ms / loop out time : 119789ms / total time : 120950ms
MatrixXi Eigen_BF::find_TopK(const Eigen::Ref<const MatrixXf> &mat, int topK){
PARAM_MIPS_TOP_K = topK;
PARAM_QUERY_Q = mat.cols();
MATRIX_Q = mat;
MatrixXi matTopK = MatrixXi::Zero(PARAM_MIPS_TOP_K, PARAM_QUERY_Q);
auto startTime = chrono::high_resolution_clock::now();
double loopin_time = 0.0;
double loopout_time = 0.0;
for (int q = 0; q < PARAM_QUERY_Q ; ++q){
auto out_startTime = chrono::high_resolution_clock::now();
priority_queue< IFPair, vector<IFPair>, greater<IFPair> > minQueTopK;
for (int n = 0; n < PARAM_DATA_N; n++){
auto in_startTime = chrono::high_resolution_clock::now();
double fValue = MATRIX_Q.col(q).dot(MATRIX_X.col(n));
// Insert into minQueue
if ((int)minQueTopK.size() < PARAM_MIPS_TOP_K)
minQueTopK.push(IFPair(n, fValue));
else
{
// Insert into minQueue
if (fValue > minQueTopK.top().m_fValue)
{
minQueTopK.pop();
minQueTopK.push(IFPair(n, fValue));
}
}
auto in_durTime = chrono::duration_cast<chrono::microseconds>(chrono::high_resolution_clock::now() - in_startTime);
loopin_time += (double)in_durTime.count() * 1e-3;
}
auto out_durTime = chrono::duration_cast<chrono::microseconds>(chrono::high_resolution_clock::now() - out_startTime);
loopout_time += (double)out_durTime.count() * 1e-3;
for (int n = PARAM_MIPS_TOP_K - 1; n >= 0; --n)
{
// Get point index
matTopK.col(q)(n) = minQueTopK.top().m_iIndex;
minQueTopK.pop();
}
}
auto durTime = chrono::duration_cast<chrono::microseconds>(chrono::high_resolution_clock::now() - startTime);
cout << "Loop in Time : "<< loopin_time << endl;
cout << "Loop out Time : "<< loopout_time << endl;
cout << "TopK Time : "<< (double)durTime.count() * 1e-3 << endl;
return matTopK;
}
I modified the code. But the difference between loopin time and loopout time is still large.
For now, loop in time : 77246ms / loop out time : 126170ms / total time : 126176ms
I am trying to find a fastest way to make square root of any float number in C++. I am using this type of function in a huge particles movement calculation like calculation distance between two particle, we need a square root etc. So If any suggestion it will be very helpful.
I have tried and below is my code
#include <math.h>
#include <iostream>
#include <chrono>
using namespace std;
using namespace std::chrono;
#define CHECK_RANGE 100
inline float msqrt(float a)
{
int i;
for (i = 0;i * i <= a;i++);
float lb = i - 1; //lower bound
if (lb * lb == a)
return lb;
float ub = lb + 1; // upper bound
float pub = ub; // previous upper bound
for (int j = 0;j <= 20;j++)
{
float ub2 = ub * ub;
if (ub2 > a)
{
pub = ub;
ub = (lb + ub) / 2; // mid value of lower and upper bound
}
else
{
lb = ub;
ub = pub;
}
}
return ub;
}
void check_msqrt()
{
for (size_t i = 0; i < CHECK_RANGE; i++)
{
msqrt(i);
}
}
void check_sqrt()
{
for (size_t i = 0; i < CHECK_RANGE; i++)
{
sqrt(i);
}
}
int main()
{
auto start1 = high_resolution_clock::now();
check_msqrt();
auto stop1 = high_resolution_clock::now();
auto duration1 = duration_cast<microseconds>(stop1 - start1);
cout << "Time for check_msqrt = " << duration1.count() << " micro secs\n";
auto start2 = high_resolution_clock::now();
check_sqrt();
auto stop2 = high_resolution_clock::now();
auto duration2 = duration_cast<microseconds>(stop2 - start2);
cout << "Time for check_sqrt = " << duration2.count() << " micro secs";
//cout << msqrt(3);
return 0;
}
output of above code showing the implemented method 4 times more slow than sqrt of math.h file.
I need faster than math.h version.
In short, I do not think it is possible to implement something generally faster than the standard library version of sqrt.
Performance is a very important parameter when implementing standard library functions and it is fair to assume that such a commonly used function as sqrt is optimized as much as possible.
Beating the standard library function would require a special case, such as:
Availability of a suitable assembler instruction - or other specialized hardware support - on the particular system for which the standard library has not been specialized.
Knowledge of the needed range or precision. The standard library function must handle all cases specified by the standard. If the application only needs a subset of that or maybe only requires an approximate result then perhaps an optimization is possible.
Making a mathematical reduction of the calculations or combine some calculation steps in a smart way so an efficient implementation can be made for that combination.
Here's another alternative to binary search. It may not be as fast as std::sqrt, haven't tested it. But it will definitely be faster than your binary search.
auto
Sqrt(float x)
{
using F = decltype(x);
if (x == 0 || x == INFINITY || isnan(x))
return x;
if (x < 0)
return F{NAN};
int e;
x = std::frexp(x, &e);
if (e % 2 != 0)
{
++e;
x /= 2;
}
auto y = (F{-160}/567*x + F{2'848}/2'835)*x + F{155}/567;
y = (y + x/y)/2;
y = (y + x/y)/2;
return std::ldexp(y, e/2);
}
After getting +/-0, nan, inf, and negatives out of the way, it works by decomposing the float into a mantissa in the range of [1/4, 1) times 2e where e is an even integer. The answer is then sqrt(mantissa)* 2e/2.
Finding the sqrt of the mantissa can be guessed at with a least squares quadratic curve fit in the range [1/4, 1]. Then that good guess is refined by two iterations of Newton–Raphson. This will get you within 1 ulp of the correctly rounded result. A good std::sqrt will typically get that last bit correct.
I have also tried with the algorithm mention in https://en.wikipedia.org/wiki/Fast_inverse_square_root, but not found desired result, please check
#include <math.h>
#include <iostream>
#include <chrono>
#include <bit>
#include <limits>
#include <cstdint>
using namespace std;
using namespace std::chrono;
#define CHECK_RANGE 10000
inline float msqrt(float a)
{
int i;
for (i = 0;i * i <= a;i++);
float lb = i - 1; //lower bound
if (lb * lb == a)
return lb;
float ub = lb + 1; // upper bound
float pub = ub; // previous upper bound
for (int j = 0;j <= 20;j++)
{
float ub2 = ub * ub;
if (ub2 > a)
{
pub = ub;
ub = (lb + ub) / 2; // mid value of lower and upper bound
}
else
{
lb = ub;
ub = pub;
}
}
return ub;
}
/* mentioned here -> https://en.wikipedia.org/wiki/Fast_inverse_square_root */
inline float Q_sqrt(float number)
{
union Conv {
float f;
uint32_t i;
};
Conv conv;
conv.f= number;
conv.i = 0x5f3759df - (conv.i >> 1);
conv.f *= 1.5F - (number * 0.5F * conv.f * conv.f);
return 1/conv.f;
}
void check_Qsqrt()
{
for (size_t i = 0; i < CHECK_RANGE; i++)
{
Q_sqrt(i);
}
}
void check_msqrt()
{
for (size_t i = 0; i < CHECK_RANGE; i++)
{
msqrt(i);
}
}
void check_sqrt()
{
for (size_t i = 0; i < CHECK_RANGE; i++)
{
sqrt(i);
}
}
int main()
{
auto start1 = high_resolution_clock::now();
check_msqrt();
auto stop1 = high_resolution_clock::now();
auto duration1 = duration_cast<microseconds>(stop1 - start1);
cout << "Time for check_msqrt = " << duration1.count() << " micro secs\n";
auto start2 = high_resolution_clock::now();
check_sqrt();
auto stop2 = high_resolution_clock::now();
auto duration2 = duration_cast<microseconds>(stop2 - start2);
cout << "Time for check_sqrt = " << duration2.count() << " micro secs\n";
auto start3 = high_resolution_clock::now();
check_Qsqrt();
auto stop3 = high_resolution_clock::now();
auto duration3 = duration_cast<microseconds>(stop3 - start3);
cout << "Time for check_Qsqrt = " << duration3.count() << " micro secs\n";
//cout << Q_sqrt(3);
//cout << sqrt(3);
//cout << msqrt(3);
return 0;
}
I've recently learnt the basics of threading and throught i'd take it for a spin. However doing some tests, it seems that the threadded version of the code is actually slower than the serial code. Can anybody spot any problems with the following program? If there are none (highly doubtful) can you propose a strategy where I do observe speed ups. The other thing that crossed my mind is that this is a simple problem, so maybe the overhead incurred by threading isn't worth the effort. In reality the BealeFunction below will be a system of ODE's, so computational expensive to evaluate.
Here are the results:
/**
* Results
* -------
*
* In serial: fitness = 163.179; computation took: 4085 microseonds
* In serial: fitness = 163.179; computation took: 3606 microseonds
* In serial: fitness = 163.179; computation took: 4288 microseonds
*
* With threading: fitness = 163.179; computation took: 16893 microseonds
* With threading: fitness = 163.179; computation took: 14333 microseonds
* With threading: fitness = 163.179; computation took: 13636 microseonds
*
*/
And the code to generate them:
#include <chrono>
#include <random>
#include <iostream>
#include <thread>
#include <future>
#include <mutex>
#include <vector>
double BealeFunction(double *parameters) {
double x = parameters[0];
double y = parameters[1];
double first = pow(1.5 - x + x * y, 2);
double second = pow(2.25 - x + x * pow(y, 2), 2);
double third = pow(2.625 - x + x * pow(y, 3), 2);
return first + second + third;
};
double inSerial(std::vector<std::vector<double>> matrix){
double total = 0;
for (auto & i : matrix){
total += BealeFunction(i.data());
}
return total;
}
double withThreading(std::vector<std::vector<double>> matrix){
double total = 0;
std::mutex mtx;
std::vector<std::shared_future<double>> futures;
auto compute1 = [&](int startIndex, int endIndex) {
double sum = 0;
for (int i = startIndex; i <= endIndex; i++) {
sum += BealeFunction(matrix[i].data());
}
return sum;
};
// deal with situation where population size < hardware_concurrency.
int numThreads = 0;
if (matrix.size() < (int) std::thread::hardware_concurrency() - 1) {
numThreads = matrix.size() - 1; // account for 0 index
} else {
numThreads = (int) std::thread::hardware_concurrency() - 1; // account for main thread
}
int numPerThread = floor(matrix.size() / numThreads);
int remainder = matrix.size() % numThreads;
int startIndex = 0;
int endIndex = numPerThread;
for (int i = 0; i < numThreads; i++) {
if (i < remainder) {
// we need to add one more job for these threads
startIndex = i * (numPerThread + 1);
endIndex = startIndex + numPerThread;
} else {
startIndex = i * numPerThread + remainder;
endIndex = startIndex + (numPerThread - 1);
}
std::cout << "thread " << i << "; start index: " << startIndex << "; end index: " << endIndex << std::endl;
std::shared_future<double> f = std::async(std::launch::async, compute1, startIndex, endIndex);
futures.push_back(f);
}
// now collect the results from futures
for (auto &future : futures) {
total += future.get();
}
return total;
}
int main() {
auto start = std::chrono::steady_clock::now();
int N = 2000;
int M = 2;
// (setup code)
std::vector<std::vector<double>> matrix(N, std::vector<double>(M));
int seed = 5;
std::default_random_engine e(seed);
std::uniform_real_distribution<double> dist1(2.9, 3.1);
std::uniform_real_distribution<double> dist2(0.4, 0.6);
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
matrix[i][0] = dist1(e);
matrix[i][1] = dist2(e);
}
}
double total = withThreading(matrix);
// double total = inSerial(matrix);
auto end = std::chrono::steady_clock::now();
std::cout << "fitness: " << total << std::endl;
std::cout << "computation took: " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count()
<< " microseonds" << std::endl;
}
Since c++17 std library support parallel algorithm, I thought it would be the go-to option for us, but after comparing with tbb and openmp, I changed my mind, I found the std library is much slower.
By this post, I want to ask for professional advice about whether I should abandon the std library's parallel algorithm, and use tbb or openmp, thanks!
Env:
Mac OSX, Catalina 10.15.7
GNU g++-10
Benchmark code:
#include <algorithm>
#include <cmath>
#include <chrono>
#include <execution>
#include <iostream>
#include <tbb/parallel_for.h>
#include <vector>
const size_t N = 1000000;
double std_for() {
auto values = std::vector<double>(N);
size_t n_par = 5lu;
auto indices = std::vector<size_t>(n_par);
std::iota(indices.begin(), indices.end(), 0lu);
size_t stride = static_cast<size_t>(N / n_par) + 1;
std::for_each(
std::execution::par,
indices.begin(),
indices.end(),
[&](size_t index) {
int begin = index * stride;
int end = (index+1) * stride;
for (int i = begin; i < end; ++i) {
values[i] = 1.0 / (1 + std::exp(-std::sin(i * 0.001)));
}
});
double total = 0;
for (double value : values)
{
total += value;
}
return total;
}
double tbb_for() {
auto values = std::vector<double>(N);
tbb::parallel_for(
tbb::blocked_range<int>(0, values.size()),
[&](tbb::blocked_range<int> r) {
for (int i=r.begin(); i<r.end(); ++i) {
values[i] = 1.0 / (1 + std::exp(-std::sin(i * 0.001)));
}
});
double total = 0;
for (double value : values) {
total += value;
}
return total;
}
double omp_for()
{
auto values = std::vector<double>(N);
#pragma omp parallel for
for (int i=0; i<values.size(); ++i) {
values[i] = 1.0 / (1 + std::exp(-std::sin(i * 0.001)));
}
double total = 0;
for (double value : values) {
total += value;
}
return total;
}
double seq_for()
{
auto values = std::vector<double>(N);
for (int i=0; i<values.size(); ++i) {
values[i] = 1.0 / (1 + std::exp(-std::sin(i * 0.001)));
}
double total = 0;
for (double value : values) {
total += value;
}
return total;
}
void time_it(double(*fn_ptr)(), const std::string& fn_name) {
auto t1 = std::chrono::high_resolution_clock::now();
auto rez = fn_ptr();
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
std::cout << fn_name << ", rez = " << rez << ", dur = " << duration << std::endl;
}
int main(int argc, char** argv) {
std::string op(argv[1]);
if (op == "std_for") {
time_it(&std_for, op);
} else if (op == "omp_for") {
time_it(&omp_for, op);
} else if (op == "tbb_for") {
time_it(&tbb_for, op);
} else if (op == "seq_for") {
time_it(&seq_for, op);
}
}
Compile options:
g++ --std=c++17 -O3 b.cpp -ltbb -I /usr/local/include -L /usr/local/lib -fopenmp
Results:
std_for, rez = 500106, dur = 11119
tbb_for, rez = 500106, dur = 7372
omp_for, rez = 500106, dur = 4781
seq_for, rez = 500106, dur = 27910
We can see that std_for is faster than seq_for(sequential for-loop), but it's still much slower than tbb and openmp.
UPDATE
As people suggested in comments, I run each for separately to be fair. The above code is updated, and results as follows,
>>> ./a.out seq_for
seq_for, rez = 500106, dur = 29885
>>> ./a.out tbb_for
tbb_for, rez = 500106, dur = 10619
>>> ./a.out omp_for
omp_for, rez = 500106, dur = 10052
>>> ./a.out std_for
std_for, rez = 500106, dur = 12423
And like ppl said, running the 4 versions in a row is not fair, compared to the previous results.
You already found that it matters what exactly is to be measured and how this is done. Your final task will certainty be quite different from this simple exercise and not entirely reflect the results found here.
Besides caching and warming-up that are affected by the sequence of doing tasks (you studied this explicitly in your updated question) there is also another issue in your example you should consider.
The actual parallel code is what matters. If this does not determine your performance/runtime than parallelization is not the right solution. But in your example you measure also resource allocation, initialization and final computation. If those drive the real costs in your final application, again, parallelization is not the silver bullet. Thus, for a fair comparison and to really measure the actual parallel code execution performance. I suggest to modify your code along this line (sorry, I don't have openmp installed) and continue your studies:
#include <algorithm>
#include <cmath>
#include <chrono>
#include <execution>
#include <iostream>
#include <tbb/parallel_for.h>
#include <vector>
const size_t N = 10000000; // #1
void std_for(std::vector<double>& values,
std::vector<size_t> const& indices,
size_t const stride) {
std::for_each(
std::execution::par,
indices.begin(),
indices.end(),
[&](size_t index) {
int begin = index * stride;
int end = (index+1) * stride;
for (int i = begin; i < end; ++i) {
values[i] = 1.0 / (1 + std::exp(-std::sin(i * 0.001)));
}
});
}
void tbb_for(std::vector<double>& values) {
tbb::parallel_for(
tbb::blocked_range<int>(0, values.size()),
[&](tbb::blocked_range<int> r) {
for (int i=r.begin(); i<r.end(); ++i) {
values[i] = 1.0 / (1 + std::exp(-std::sin(i * 0.001)));
}
});
}
/*
double omp_for()
{
auto values = std::vector<double>(N);
#pragma omp parallel for
for (int i=0; i<values.size(); ++i) {
values[i] = 1.0 / (1 + std::exp(-std::sin(i * 0.001)));
}
double total = 0;
for (double value : values) {
total += value;
}
return total;
}
*/
void seq_for(std::vector<double>& values)
{
for (int i=0; i<values.size(); ++i) {
values[i] = 1.0 / (1 + std::exp(-std::sin(i * 0.001)));
}
}
void time_it(void(*fn_ptr)(std::vector<double>&), const std::string& fn_name) {
std::vector<double> values = std::vector<double>(N);
auto t1 = std::chrono::high_resolution_clock::now();
fn_ptr(values);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
double total = 0;
for (double value : values) {
total += value;
}
std::cout << fn_name << ", res = " << total << ", dur = " << duration << std::endl;
}
void time_it_std(void(*fn_ptr)(std::vector<double>&, std::vector<size_t> const&, size_t const), const std::string& fn_name) {
std::vector<double> values = std::vector<double>(N);
size_t n_par = 5lu; // #2
auto indices = std::vector<size_t>(n_par);
std::iota(indices.begin(), indices.end(), 0lu);
size_t stride = static_cast<size_t>(N / n_par) + 1;
auto t1 = std::chrono::high_resolution_clock::now();
fn_ptr(values, indices, stride);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
double total = 0;
for (double value : values) {
total += value;
}
std::cout << fn_name << ", res = " << total << ", dur = " << duration << std::endl;
}
int main(int argc, char** argv) {
std::string op(argv[1]);
if (op == "std_for") {
time_it_std(&std_for, op);
// } else if (op == "omp_for") {
//time_it(&omp_for, op);
} else if (op == "tbb_for") {
time_it(&tbb_for, op);
} else if (op == "seq_for") {
time_it(&seq_for, op);
}
}
On my (slow) system this results in:
std_for, res = 5.00046e+06, dur = 66393
tbb_for, res = 5.00046e+06, dur = 51746
seq_for, res = 5.00046e+06, dur = 196156
I note here that the difference from seq_for to tbb_for has further increased. It is now ~4x while in your example it looks more like ~3x. And std_for is still about 20..30% slower than tbb_for.
However, there are further parameters. After increasing N (see #1) by a factor of 10 (ok, this is not very important) and n_par (see #2) from 5 to 100 (this is important) the results are
tbb_for, res = 5.00005e+07, dur = 486179
std_for, res = 5.00005e+07, dur = 479306
Here std_for is on-par with tbb_for!
Thus, to answer your question: I clearly would NOT discard c++17 std parallelization right away.
Perhaps you already know, but something I don't see mentioned here is the fact that (at least for gcc and clang) the PSTL is actually implemented using/backended by TBB, OpenMP (currently on clang, only, I believe), or a sequential version of it.
I'm guessing you're using libc++ since you are on Mac; as far as I know, for Linux at least, the LLVM distributions do not come with the PSTL enabled, and if building PSTL and libcxx/libcxxabi from source, it defaults to a sequential backend.
https://github.com/llvm/llvm-project/blob/main/pstl/CMakeLists.txt
https://github.com/gcc-mirror/gcc/blob/master/libstdc%2B%2B-v3/include/pstl/pstl_config.h
OpenMp is good for straight forward parallel codding.
On the other hand TBB use work-stealing mechanism which can give you
better performance for loops that are imbalance and nested.
I prefer TBB for complex and nested parallelism over OpenMP.(OpenMP
has a huge over-head for the nested parallelism)
I am trying to view what the run-time on my code is. The code is my attempt at Project Euler Problem 5. When I try to output the run time it gives 0ns.
#define MAX_DIVISOR 20
bool isDivisible(long, int);
int main() {
auto begin = std::chrono::high_resolution_clock::now();
int d = 2;
long inc = 1;
long i = 1;
while (d < (MAX_DIVISOR + 1)) {
if ((i % d) == 0) {
inc = i;
i = inc;
d++;
}
else {
i += inc;
}
}
auto end = std::chrono::high_resolution_clock::now();
printf("Run time: %llu ns\n", (std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count())); // Gives 0 here.
std::cout << "ANS: " << i << std::endl;
system("pause");
return 0;
}
The timing resolulution of std::chrono::high_resolution_clock::now() is system dependent.
You can find out an order of magnitude with the small piece of code here (edit: here you have a more accurate version):
chrono::nanoseconds mn(1000000000); // asuming the resolution is higher
for (int i = 0; i < 5; i++) {
using namespace std::chrono;
nanoseconds dt;
long d = 1000 * pow(10, i);
for (long e = 0; e < 10; e++) {
long j = d + e*pow(10, i)*100;
cout << j << " ";
auto begin = high_resolution_clock::now();
while (j>0)
k = ((j-- << 2) + 1) % (rand() + 100);
auto end = high_resolution_clock::now();
dt = duration_cast<nanoseconds>(end - begin);
cout << dt.count() << "ns = "
<< duration_cast<milliseconds>(dt).count() << " ms" << endl;
if (dt > nanoseconds(0) && dt < mn)
mn = dt;
}
}
cout << "Minimum resolution observed: " << mn.count() << "ns\n";
where k is a global volatile long k; in order to avoid optimizer to interfere too much.
Under windows, I obtain here 15ms. Then you have platform specific alternatives. For windows, there is a high performance cloeck that enables you to measure timebelow 10µs range (see here http://msdn.microsoft.com/en-us/library/windows/desktop/dn553408%28v=vs.85%29.aspx) but still not in the nanosecond range.
If you want to time your code very accurately, you could reexecute it a big loop, and dividint the total time by the number of iterations.
Estimation you are going to do is not precise, better approach is to measure CPU time consumption of you program (because other processes are also running concurrently with you process, so time that you are trying to measure can be greatly affected if CPU intensitive tasks are running in parallel with you process).
So my advise use already implemented profilers if you want to estimate your code performance.
Considering your task, OS if doesn`t provide needed precision for time, you need to increase total time your are trying to estimate, the esiest way - run program n times & calculate the avarage, this method provides such advantage that by avareging - you can eleminate errors that arose from CPU intensitive tasks running concurrently with you process.
Here is code snippet of how I see the possible implementation:
#include <iostream>
using namespace std;
#define MAX_DIVISOR 20
bool isDivisible(long, int);
void doRoutine()
{
int d = 2;
long inc = 1;
long i = 1;
while (d < (MAX_DIVISOR + 1))
{
if (isDivisible(i, d))
{
inc = i;
i = inc;
d++;
}
else
{
i += inc;
}
}
}
int main() {
auto begin = std::chrono::high_resolution_clock::now();
const int nOfTrials = 1000000;
for (int i = 0; i < nOfTrials; ++i)
doRoutine();
auto end = std::chrono::high_resolution_clock::now();
printf("Run time: %llu ns\n", (std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count()/ nOfTrials)); // Gives 0 here.
std::cout << "ANS: " << i << std::endl;
system("pause");
return 0;