This question already has answers here:
Measuring execution time of a function in C++
(14 answers)
Closed 6 years ago.
I want to know the time of the quickSort function in my code, I used clock() and it get 0 .. and I also tried to use chrono and it still get me 0 .
I also made my array is large .
I don't know if my code is wrong or not !
this is my code
#include <iostream>
#include <chrono>
#include <ctime>
#include <ratio>
using namespace std;
using namespace chrono;
void quick_sort(int *arr,int left,int right){
int i=left,j=right;
int pivot=arr[(left+right)/2];
while(i<=j){
while(arr[i]<pivot)
i++;
while(arr[j]>pivot)
j--;
if(i<=j){
swap(arr[i],arr[j]);
i++;
j--;
}
}
if(left<j)
quick_sort(arr,left,j);
if(right>i)
quick_sort(arr,i,right);
}
int main()
{
int arr[30]={4,2,5,3,8,9,7,10,54,23,65,78,10,44,56,91,75,79,42,81,10,57,23,651,78,100,47,50,71,715};
high_resolution_clock::time_point t1 = high_resolution_clock::now();
quick_sort(arr,0,29);
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double>time_span = duration_cast<duration<double> >(t2-t1);
cout<<"it takes "<<time_span.count()<<" seconds"<<endl;
return 0;
}
Simple implementation:
#include <iostream>
#include <iomanip>
#include <string>
// benchmak
#include <limits>
#include <random>
#include <chrono>
#include <algorithm>
#include <functional>
class Clock
{
std::chrono::time_point<std::chrono::steady_clock> _start;
public:
static inline std::chrono::time_point<std::chrono::steady_clock> now() { return std::chrono::steady_clock::now(); }
Clock() : _start(now())
{
}
template<class DurationUnit>
std::size_t end()
{
return std::chrono::duration_cast<DurationUnit>(now() - _start).count();
}
};
Usage:
int main()
{
{
Clock clock;
business();
const double unit_time = clock.end<std::chrono::nanoseconds>();
std::cout << std::setw(40) << "business(): " << std::setprecision(3) << unit_time << " ns\n";
}
}
Related
I'm doing a benchmark on boost::interprocess:vector and std::vector, since I'm gonna use shared memory in my program but I'm concerned with any potential performance issues.
My benchmark is simply random accessing a vector, and it turned out that std::vector is almost 2x faster than boost::interprocess::vector.
Note: in the benchmark I only have a single process, and I don't do any synchronization manually.
I don't know where is the bottleneck....I have three guess:
shared memory make it slower
boost vector has a slower implementation
boost shared memory as well as its containers have some overhead somehow, i.e., if I use mmap and do things in a plain way, it will be better
And what further experiment should I do to figure out this? Or tune something to make it faster? Any idea?
Here is the benchmark code:
for boost::interprocess::vector
#include <boost/interprocess/allocators/allocator.hpp>
#include <boost/interprocess/containers/vector.hpp>
#include <boost/interprocess/managed_shared_memory.hpp>
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <random>
#include <string>
#include <vector>
using namespace boost::interprocess;
typedef allocator<double, managed_shared_memory::segment_manager> ShmemAllocator;
typedef vector<double, ShmemAllocator> MyVector;
const int total_size = 2000 * 2000;
const int mem_size = 2000 * 2000 * 8 * 2;
const int query_size = 100000;
int main(int argc, char *argv[]) {
std::uniform_real_distribution<double> unif(0, 10000);
std::default_random_engine re;
std::vector<double> data;
data.reserve(total_size);
for (int i = 0; i < total_size; ++i) {
data.push_back(unif(re));
}
std::vector<int> query;
query.reserve(query_size);
for (int i = 0; i < query_size; ++i) {
query.push_back(rand() % total_size);
}
struct shm_remove {
shm_remove() { shared_memory_object::remove("MySharedMemory"); }
~shm_remove() { shared_memory_object::remove("MySharedMemory"); }
} remover;
managed_shared_memory segment(create_only, "MySharedMemory", mem_size);
const ShmemAllocator alloc_inst(segment.get_segment_manager());
MyVector *myvector = segment.construct<MyVector>("MyVector")(alloc_inst);
myvector->reserve(total_size);
for (auto d : data) myvector->push_back(d);
auto t1 = std::chrono::high_resolution_clock::now();
for (auto q : query) {
double res = (*myvector)[q];
}
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() << std::endl;
return 0;
}
for std::vector
#include <boost/interprocess/allocators/allocator.hpp>
#include <boost/interprocess/containers/vector.hpp>
#include <boost/interprocess/managed_shared_memory.hpp>
#include <chrono>
#include <cstdlib> //std::system
#include <iostream>
#include <random>
#include <string>
#include <vector>
const int total_size = 2000 * 2000;
const int mem_size = 2000 * 2000 * 8 * 8;
const int query_size = 100000;
int main(int argc, char *argv[]) {
std::uniform_real_distribution<double> unif(0, 10000);
std::default_random_engine re;
std::vector<double> data;
data.reserve(total_size);
for (int i = 0; i < total_size; ++i) {
data.push_back(unif(re));
}
std::vector<int> query;
query.reserve(query_size);
for (int i = 0; i < query_size; ++i) {
query.push_back(rand() % total_size);
}
std::vector<double> myvector;
myvector.reserve(total_size);
for (auto d : data) myvector.push_back(d);
auto t1 = std::chrono::high_resolution_clock::now();
for (auto q : query) {
double res = myvector[q];
}
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() << std::endl;
return 0;
}
For this small code:
#include <thread>
#include <iostream>
#include <memory>
#include <vector>
#include <algorithm>
using namespace std;
void do_work(int id)
{
cout<<this_thread::get_id()<<" "<<id<<endl;
}
int main()
{
vector<thread> threads;
for(int i = 0; i < 20; i++)
threads.push_back(thread(do_work, i));
for_each(begin(threads), end(threads), [](thread & t){
t.join();
});
}
Option 1:
for_each(begin(threads), end(threads), [](thread & t){
t.join();
});
Option2:
for_each(begin(threads), end(threads), mem_fn(&thread::join));
Question:
1. which usage is better?
2. what's the usage of mem_fn? could u give a easy explantioan?
For fun im making a singleton that is used to shuffle an array. Im trying to shuffle an array in c++ with std::shuffle, but the shuffle produces the same result everytime I run the program.
The code:
// Example program
#include <iostream>
#include <string>
#pragma once
#include <memory>
#include <random>
#include <ctime>
#include <algorithm>
class RandomPicker
{
public:
~RandomPicker();
std::default_random_engine getRandomEngine();
static std::shared_ptr<RandomPicker> getInstance();
private:
std::default_random_engine dre = std::default_random_engine(time(0));
RandomPicker();
static std::shared_ptr<RandomPicker> instance;
};
std::shared_ptr<RandomPicker> RandomPicker::instance = nullptr;
RandomPicker::RandomPicker()
{
}
RandomPicker::~RandomPicker()
{
}
std::default_random_engine RandomPicker::getRandomEngine()
{
return std::default_random_engine();
}
std::shared_ptr<RandomPicker> RandomPicker::getInstance()
{
if (instance == nullptr)
{
instance.reset(new RandomPicker);
}
return instance;
}
int main()
{
std::array<int,5> foo {1,2,3,4,5};
std::shared_ptr<RandomPicker> r = RandomPicker::getInstance();
shuffle (foo.begin(), foo.end(), r->getRandomEngine());
std::cout << "shuffled elements:";
for (int& x: foo) std::cout << ' ' << x;
std::cout << '\n';
}
Link to code showing it:
I thought using
std::default_random_engine dre = std::default_random_engine(time(0));
//notice the time(0)
would get a different result everytime, but the result equals to.
shuffled elements: 3 1 5 4 2
Why is the array not sorted in a different way everytime the program runs?
std::default_random_engine RandomPicker::getRandomEngine()
{
return std::default_random_engine();
}
You getRandomEngine() always returns an engine with default seed.
Your getRandomEngine() should probably return dre:
std::default_random_engine RandomPicker::getRandomEngine()
{
return dre;
}
Even just trying to get started, I get an error with this code:
note: candidate template ignored: could not match 'double' against 'long'
::
#include <numeric>
#include <chrono>
using namespace std::chrono_literals;
// how to write a function that will take any duration and turn it
// into a float representation of seconds?
template <class T>
void go(std::chrono::duration<double, T> d) {
// what I want to do (that may not work because I haven't gotten this far):
float seconds = std::chrono::duration_cast<std::chrono::seconds>(d);
}
int main()
{
go(1ms);
go(1s);
}
I can only guess at what you're trying to accomplish, and here is my best guess:
#include <chrono>
#include <iostream>
using namespace std::chrono_literals;
void go(std::chrono::duration<float> d) {
std::cout << d.count() << '\n';
}
int main()
{
go(1ms);
go(1s);
}
This outputs:
0.001
1
Cast to float and call count():
#include <iostream>
#include <numeric>
#include <chrono>
template< class T, class P >
float to_secs(std::chrono::duration< T, P > t)
{
std::chrono::duration< float > f = t;
return f.count();
}
int main(int argc, char*argv[])
{
std::cout << to_secs(std::chrono::milliseconds(1)) << std::endl;
std::cout << to_secs(std::chrono::minutes(1)) << std::endl;
std::cout << to_secs(std::chrono::hours(1)) << std::endl;
// output:
// 0.001
// 60
// 3600
return 0;
}
template<typename duration_t> float seconds(const duration_t &d);
template<class Rep, class Period>
float seconds(const std::chrono::duration<Rep, Period> &d)
{
typedef std::chrono::duration<Rep, Period> duration_t;
auto one_second=std::chrono::duration_cast<duration_t>
(std::chrono::seconds(1)).count();
if (one_second == 0)
return d.count() *
std::chrono::duration_cast<std::chrono::seconds>
(duration_t(1)).count();
else
return float(d.count())/one_second;
}
I ported a Java GC test program to C++ (see the code below) as well as Python. The Java and Python performance is much greater than C++ and I was thinking this was due to all the calls to new that have to be done to create the strings each time. I've tried using Boost's fast_pool_allocator but that actually worsened performance from 700ms to 1200ms. Am I using the allocator wrong, or is there something else I should be doing?
EDIT: Compiled with g++ -O3 -march=native --std=c++11 garbage.cpp -lboost_system. g++ is version 4.8.1
One iteration takes in Python is about 300ms and with Java about 50ms. std::allocator gives about 700ms and boost::fast_pool_allocator gives about 1200ms.
#include <string>
#include <vector>
#include <chrono>
#include <list>
#include <iostream>
#include <boost/pool/pool_alloc.hpp>
#include <memory>
//#include <gc/gc_allocator.h>
using namespace std;
#include <sstream>
typedef boost::fast_pool_allocator<char> c_allocator;
//typedef std::allocator<char> c_allocator;
typedef basic_string<char, char_traits<char>, c_allocator> pool_string;
namespace patch {
template <typename T> pool_string to_string(const T& in) {
std::basic_stringstream<char, char_traits<char>, c_allocator> stm;
stm << in;
return stm.str();
}
}
#include "mytime.hpp"
class Garbage {
public:
vector<pool_string> outer;
vector<pool_string> old;
const int nThreads = 1;
//static auto time = chrono::high_resolution_clock();
void go() {
// outer.resize(1000000);
//old.reserve(1000000);
auto tt = mytime::msecs();
for (int i = 0; i < 10; ++i) {
if (i % 100 == 0) {
cout << "DOING AN OLD" << endl;
doOld();
tt = mytime::msecs();
}
for (int j = 0; j < 1000000/nThreads; ++j)
outer.push_back(patch::to_string(j));
outer.clear();
auto t = mytime::msecs();
cout << (t - tt) << endl;
tt = t;
}
}
void doOld() {
old.clear();
for (int i = 0; i < 1000000/nThreads; ++i)
old.push_back(patch::to_string(i));
}
};
int main() {
Garbage().go();
}
The problem is you're using a new string stream each time to convert an integer.
Fix it:
namespace patch {
template <typename T> pool_string to_string(const T& in) {
return boost::lexical_cast<pool_string>(in);
}
}
Now the timings are:
DOING AN OLD
0.175462
0.0670085
0.0669926
0.0687969
0.0692518
0.0669318
0.0669196
0.0669187
0.0668962
0.0669185
real 0m0.801s
user 0m0.784s
sys 0m0.016s
See it Live On Coliru
Full code for reference:
#include <boost/pool/pool_alloc.hpp>
#include <chrono>
#include <iostream>
#include <list>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include <boost/lexical_cast.hpp>
//#include <gc/gc_allocator.h>
using string = std::string;
namespace patch {
template <typename T> string to_string(const T& in) {
return boost::lexical_cast<string>(in);
}
}
class Timer
{
typedef std::chrono::high_resolution_clock clock;
clock::time_point _start;
public:
Timer() { reset(); }
void reset() { _start = now(); }
double elapsed()
{
using namespace std::chrono;
auto e = now() - _start;
return duration_cast<nanoseconds>(e).count()*1.0e-9;
}
clock::time_point now()
{
return clock::now();
}
};
class Garbage {
public:
std::vector<string> outer;
std::vector<string> old;
const int nThreads = 1;
void go() {
outer.resize(1000000);
//old.reserve(1000000);
Timer timer;
for (int i = 0; i < 10; ++i) {
if (i % 100 == 0) {
std::cout << "DOING AN OLD" << std::endl;
doOld();
}
for (int j = 0; j < 1000000/nThreads; ++j)
outer.push_back(patch::to_string(j));
outer.clear();
std::cout << timer.elapsed() << std::endl;
timer.reset();
}
}
void doOld() {
old.clear();
for (int i = 0; i < 1000000/nThreads; ++i)
old.push_back(patch::to_string(i));
}
};
int main() {
Garbage().go();
}
Since I don't use boost on my machine, I simplified the code to use standard C++11 to_string (thus accidentally "fixing" the problem sehe found), and got this:
#include <string>
#include <vector>
#include <chrono>
#include <list>
#include <iostream>
#include <memory>
//#include <gc/gc_allocator.h>
#include <sstream>
using namespace std;
class Timer
{
typedef std::chrono::high_resolution_clock clock;
clock::time_point _start;
public:
Timer() { reset(); }
void reset() { _start = now(); }
double elapsed()
{
using namespace std::chrono;
auto e = now() - _start;
return duration_cast<nanoseconds>(e).count()*1.0e-9;
}
clock::time_point now()
{
return clock::now();
}
};
class Garbage {
public:
vector<string> outer;
vector<string> old;
const int nThreads = 1;
Timer timer;
void go() {
// outer.resize(1000000);
//old.reserve(1000000);
for (int i = 0; i < 10; ++i) {
if (i % 100 == 0) {
cout << "DOING AN OLD" << endl;
doOld();
}
for (int j = 0; j < 1000000/nThreads; ++j)
outer.push_back(to_string(j));
outer.clear();
cout << timer.elapsed() << endl;
timer.reset();
}
}
void doOld() {
old.clear();
for (int i = 0; i < 1000000/nThreads; ++i)
old.push_back(to_string(i));
}
};
int main() {
Garbage().go();
}
Compiling with:
$ g++ -O3 -std=c++11 gc.cpp
$ ./a.out
DOING AN OLD
0.414637
0.189082
0.189143
0.186336
0.184449
0.18504
0.186302
0.186055
0.183123
0.186835
clang 3.5 build with source from Friday 18th of April 2014 gives similar results with the same compiler options.
My processor is a AMD Phenom(tm) II X4 965, running at 3.6GHz (if I remember right).