I have the following code, which should get the default llvm::Target.
auto const targetTriple = llvm::sys::getDefaultTargetTriple();
llvm_module.setTargetTriple(targetTriple);
std::string error;
auto const * target = llvm::TargetRegistry::lookupTarget(targetTriple, error);
if (target == nullptr) {
auto targets = llvm::TargetRegistry::targets();
size_t targetCount = 0;
for (auto const & _ : targets) {
++targetCount;
}
ERROR(Unknown, "llvm::TargetRegistry::lookupTarget failed for " + targetTriple + ". llvm::TargetRegistry::targets() contains " + std::to_string(targetCount) + " elements.");
}
This code produces this error message:
llvm::TargetRegistry::lookupTarget failed for i686-pc-windows-msvc.
llvm::TargetRegistry::targets() contains 0 elements
Am I missing a step?
Related
I'm making a code for cipher that reads alphabet in it's binary code. Is there a way to implement custom alphabet (not using ASCII)? For example alphabet={a,b,c,d,e,...,z,],[,.,..., ,-} and for each character there's a number 0,1,...,63. So, the bijetion will be from element of alphabet to 6 bit number.
How to make this implementation using simple functions in C++? I tried to make a strings length 1 and corresponding number using if statements and then plug them into .txt file, but it didn't work out.
string str1, ..., str63;
string sometext;
str1 = 'a';
// ...
cin >> sometext;
int k;
k = sometext.length();
string res;
ofstream out;
out.open("cipher.txt");
for (int i = 0; i < k; i++) {
res = sometext.substr(i, 1);
if (res == str1) {
res = '000000';
}
// ...
if (res == str63) {
res = '111111';
}
out << res;
}
I made a simple class Alphabet achieving your task. It uses std::unordered_map to store mapping between characters and binary representation, and uses this mapping to convert between those two representations. Also it computes binary representation. Class can be given any alphabet.
For testing I do two conversions between char and binary and output results to console. If requested values are out of range then std::exception is thrown.
Try it online!
#include <string>
#include <unordered_map>
#include <cmath>
#include <stdexcept>
#include <iostream>
class Alphabet {
public:
Alphabet(std::string const & _chars)
: chars(_chars) {
size_t num_bits = std::ceil(std::log(std::max(size_t(1), chars.size()))
/ std::log(2) - 1e-6);
for (size_t i = 0; i < chars.size(); ++i) {
std::string bin;
for (ptrdiff_t j = num_bits - 1; j >= 0; --j)
bin += i & (1 << j) ? "1" : "0";
c2b[chars[i]] = bin;
b2c[bin] = chars[i];
}
}
std::string ToBin(char c) const {
auto it = c2b.find(c);
if (it == c2b.end())
throw std::runtime_error("Character '" +
std::string(1, c) + "' not in alphabet!");
return it->second;
}
char ToChar(std::string const & bin) const {
auto it = b2c.find(bin);
if (it == b2c.end())
throw std::runtime_error("Binary '" + bin + "' is out of range!");
return it->second;
}
std::string const & Chars() const {
return chars;
}
private:
std::string chars;
std::unordered_map<char, std::string> c2b;
std::unordered_map<std::string, char> b2c;
};
int main() {
try {
Alphabet alph("abcdef{}123");
std::cout << alph.ToBin('f') << std::endl;
std::cout << alph.ToChar("0011") << std::endl;
std::cout << alph.Chars() << std::endl;
return 0;
} catch (std::exception const & ex) {
std::cout << "Exception: " << ex.what() << std::endl;
return -1;
}
}
Output:
0101
d
abcdef{}123
I have the following Neural Network code, I'm just trying to work my way up from basic problems, such as the XOR problem, while building up a codebase. This is a hobby project.
#include <iostream>
#include <array>
#include <random>
#include <chrono>
#include <iomanip>
#include <fstream>
#include <algorithm>
#include <iomanip>
typedef float DataType;
typedef DataType (*ActivationFuncPtr)(const DataType&);
static DataType learningRate = 0.02;
static std::size_t numberEpochs = 1000000;
DataType sigmoid(const DataType& x)
{
return DataType(1) / (DataType(1) + std::exp(-x));
}
template<typename T>
class Random
{
public:
T operator()()
{
return m_dis(m_mt);
}
protected:
static std::mt19937 m_mt;
static std::uniform_real_distribution<T> m_dis;
};
template<typename T> std::mt19937 Random<T>::m_mt(std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count());
template<typename T> std::uniform_real_distribution<T> Random<T>::m_dis(0,1);
template<std::size_t NumInputs>
class Neuron
{
public:
Neuron(ActivationFuncPtr activationFunction)
:
m_activationFunction(activationFunction)
{
Random<DataType> r;
std::generate(m_weights.begin(),m_weights.end(),[&]()
{
return r();
});
m_biasWeight = r();
}
void FeedForward(const std::array<DataType,NumInputs>& inputValues)
{
DataType sum = m_biasWeight;
for(std::size_t i = 0; i < inputValues.size(); ++i)
sum += inputValues[i] * m_weights[i];
m_output = m_activationFunction(sum);
m_netInput = sum;
}
DataType GetOutput() const
{
return m_output;
}
DataType GetNetInput() const
{
return m_netInput;
}
std::array<DataType,NumInputs> Backpropagate(const DataType& error,
const std::array<DataType,NumInputs>& inputValues,
std::array<DataType,NumInputs+1>& weightAdjustments)
{
DataType errorOverOutput = error;
DataType outputOverNetInput = m_output * (DataType(1) - m_output); // sigmoid derivative
std::array<DataType,NumInputs> netInputOverWeight;
for(std::size_t i = 0; i < NumInputs; ++i)
{
netInputOverWeight[i] = inputValues[i];
}
DataType netInputOverBias = DataType(1);
std::array<DataType,NumInputs> errorOverWeight;
for(std::size_t i = 0; i < NumInputs; ++i)
{
errorOverWeight[i] = errorOverOutput * outputOverNetInput * netInputOverWeight[i];
}
DataType errorOverBias = errorOverOutput * outputOverNetInput * netInputOverBias;
for(std::size_t i = 0; i < NumInputs; ++i)
{
weightAdjustments[i] = errorOverWeight[i];
}
weightAdjustments[NumInputs] = errorOverBias;
DataType errorOverNetInput = errorOverOutput * outputOverNetInput;
std::array<DataType,NumInputs> errorWeights;
for(std::size_t i = 0; i < NumInputs; ++i)
{
errorWeights[i] = errorOverNetInput * m_weights[i];
}
return errorWeights;
}
void AdjustWeights(const std::array<DataType,NumInputs+1>& adjustments)
{
for(std::size_t i = 0; i < NumInputs; ++i)
m_weights[i] = m_weights[i] - learningRate * adjustments[i];
m_biasWeight = m_biasWeight - learningRate * adjustments[NumInputs];
}
const std::array<DataType,NumInputs> GetWeights() const {return m_weights;}
const DataType& GetBiasWeight() const { return m_biasWeight; }
protected:
std::array<DataType,NumInputs> m_weights;
DataType m_biasWeight;
ActivationFuncPtr m_activationFunction;
DataType m_output;
DataType m_netInput;
};
main()
{
std::array<std::array<DataType,2>,4> inputData = {{{0,0},{0,1},{1,0},{1,1}}};
std::array<std::array<DataType,1>,4> desiredOutputs = {{{0},{1},{1},{0}}};
std::array<Neuron<2>*,2> hiddenLayer1 = {{ new Neuron<2>(sigmoid), new Neuron<2>(sigmoid) }};
std::array<Neuron<2>*,1> outputLayer = {{ new Neuron<2>(sigmoid) }};
std::cout << std::fixed << std::setprecision(80);
DataType minError = std::numeric_limits<DataType>::max();
bool minErrorFound = false;
std::size_t epochNumber = 0;
while(epochNumber < numberEpochs && !minErrorFound)
{
DataType epochMSE = 0;
for(std::size_t row = 0; row < inputData.size(); ++row)
{
const std::array<DataType,2>& dataRow = inputData[row];
const std::array<DataType,1>& outputRow = desiredOutputs[row];
// Feed the values through to the output layer
hiddenLayer1[0]->FeedForward(dataRow);
hiddenLayer1[1]->FeedForward(dataRow);
DataType output0 = hiddenLayer1[0]->GetOutput();
DataType output1 = hiddenLayer1[1]->GetOutput();
outputLayer[0]->FeedForward({output0,output1});
DataType finalOutput0 = outputLayer[0]->GetOutput();
// if there was more than 1 output neuron these errors need to be summed together first to create total error
DataType totalError = 0.5 * std::pow(outputRow[0] - finalOutput0,2.f);
epochMSE += totalError * totalError;
DataType propagateError = -(outputRow[0] - finalOutput0);
std::array<DataType,3> weightAdjustmentsOutput;
std::array<DataType,2> outputError = outputLayer[0]->Backpropagate(propagateError,
{output0,output1},
weightAdjustmentsOutput);
std::array<DataType,3> weightAdjustmentsHidden1;
hiddenLayer1[0]->Backpropagate(outputError[0],dataRow,weightAdjustmentsHidden1);
std::array<DataType,3> weightAdjustmentsHidden2;
hiddenLayer1[1]->Backpropagate(outputError[1],dataRow,weightAdjustmentsHidden2);
outputLayer[0]->AdjustWeights(weightAdjustmentsOutput);
hiddenLayer1[0]->AdjustWeights(weightAdjustmentsHidden1);
hiddenLayer1[1]->AdjustWeights(weightAdjustmentsHidden2);
}
epochMSE *= DataType(1) / inputData.size();
if(epochMSE >= minError + 0.00000001)
{
minErrorFound = true;
}
else
minError = epochMSE;
++epochNumber;
}
std::cout << std::fixed << std::setprecision(80)
<< "\n\n====================================\n"
<< " TRAINING COMPLETE"
<< "\n\n====================================" << std::endl;
std::cout << "Minimum error: " << minError << std::endl;
std::cout << "Number epochs: " << epochNumber << "/" << numberEpochs << std::endl;
// output tests
std::cout << std::fixed << std::setprecision(2)
<< "\n\n====================================\n"
<< " FINAL TESTS"
<< "\n\n====================================" << std::endl;
for(std::size_t row = 0; row < inputData.size(); ++row)
{
const std::array<DataType,2>& dataRow = inputData[row];
const std::array<DataType,1>& outputRow = desiredOutputs[row];
std::cout << dataRow[0] << "," << dataRow[1] << " (" << outputRow[0] << ") : ";
// Feed the values through to the output layer
hiddenLayer1[0]->FeedForward(dataRow);
hiddenLayer1[1]->FeedForward(dataRow);
DataType output0 = hiddenLayer1[0]->GetOutput();
DataType output1 = hiddenLayer1[1]->GetOutput();
outputLayer[0]->FeedForward({output0,output1});
DataType finalOutput0 = outputLayer[0]->GetOutput();
std::cout << finalOutput0 << std::endl;
}
return 0;
}
Most of the time, the output looks like this, and I think "great! success!"
====================================
TRAINING COMPLETE
====================================
Minimum error: 0.00000000106923325748908837340422905981540679931640625000000000000000000000000000
Number epochs: 1000000/1000000
====================================
FINAL TESTS
====================================
0.00,0.00 (0.00) : 0.01
0.00,1.00 (1.00) : 0.99
1.00,0.00 (1.00) : 0.99
1.00,1.00 (0.00) : 0.01
Process returned 0 (0x0) execution time : 0.992 s
Press any key to continue.
But then the following is the output occassionally, which I want to understand, is this overfitting, or underfitting, or have I done something wrong somewhere? How can I prevent this?
====================================
TRAINING COMPLETE
====================================
Minimum error: 0.00787912402302026748657226562500000000000000000000000000000000000000000000000000
Number epochs: 1000000/1000000
====================================
FINAL TESTS
====================================
0.00,0.00 (0.00) : 0.01
0.00,1.00 (1.00) : 0.50
1.00,0.00 (1.00) : 0.99
1.00,1.00 (0.00) : 0.50
Process returned 0 (0x0) execution time : 1.024 s
Press any key to continue.
I have tried using more or less epochs along with a higher or lower learning rate, but I still ocassionally get a result as above (not always the exact same as above but similar). For example, with a learning rate of 0.002 and 1000000 epochs, I get the following ocassionally:
====================================
TRAINING COMPLETE
====================================
Minimum error: 0.01417684461921453475952148437500000000000000000000000000000000000000000000000000
Number epochs: 176477/1000000
====================================
FINAL TESTS
====================================
0.00,0.00 (0.00) : 0.29
0.00,1.00 (1.00) : 0.59
1.00,0.00 (1.00) : 0.59
1.00,1.00 (0.00) : 0.63
Process returned 0 (0x0) execution time : 0.225 s
Press any key to continue.
I see how it exited early because the error grew rather than shrank, but is that because I exited early when I shouldn't have?
You have done nothing wrong. Notice that you get different results even after training your network with the same amount of epochs and training data. Overfitting would be the cause if you would have used more epochs and/or training data in the network that works wrong. Underfitting is the opposite of that. You don't have underfitting, and you don't have overfitting. You could try to lower your learning rate by an order of magnitude or ar least by half, increase it, change training function or add momentum. It is important for you to know that neural networks is a very empirical process, if your trained network passes validation then it's ok, if not then tweak it a bit and retrain or just retrain. There is no closed form formula, solution or recipe for their design.
I am trying to copy a matrix in parallel. Below is the code that I am working with. Currently, it works as expected with char, but it seg faults when I use shorts. I assume that the bug is in copying outside of the memory outside of the vector. I have tried to debug my assumption without success.
CMakeLists.txt
cmake_minimum_required(VERSION 3.0)
project(memcpy CXX)
find_package (Threads)
add_executable(memcpy main.cpp)
set_property(TARGET memcpy PROPERTY CXX_STANDARD 17)
target_link_libraries (memcpy ${CMAKE_THREAD_LIBS_INIT})
main.cpp
#include <cassert>
#include <condition_variable>
#include <cstring>
#include <iostream>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
class Barrier {
public:
explicit Barrier(std::size_t const count) : m_threshold(count), m_remaining(count), m_generation(0) {}
void wait() {
auto local = std::unique_lock<std::mutex>{m_mutex};
auto current_generation = m_generation;
m_remaining--;
if (!m_remaining) {
m_generation++;
m_remaining = m_threshold;
m_condition.notify_all();
} else {
m_condition.wait(local, [this, current_generation] { return current_generation != m_generation; });
}
}
private:
std::mutex m_mutex;
std::condition_variable m_condition;
std::size_t m_threshold;
std::size_t m_remaining;
std::size_t m_generation;
};
template <typename T>
class Matrix {
using reference = typename std::vector<T>::reference;
using const_reference = typename std::vector<T>::const_reference;
public:
Matrix(std::size_t rows, std::size_t cols) : m_rows(rows), m_cols(cols), m_data(m_cols * m_rows) {}
Matrix(std::size_t rows, std::size_t cols, T const& default_val) : m_rows(rows), m_cols(cols), m_data(m_cols * m_rows, default_val) {}
constexpr std::size_t get_columns() const { return m_cols; }
constexpr std::size_t get_rows() const { return m_rows; }
constexpr std::size_t get_element_count() const {
assert(m_cols * m_rows == m_data.size());
return m_cols * m_rows;
}
T* data() { return m_data.data(); }
T const* data() const { return m_data.data(); }
reference operator()(std::size_t const column_x, std::size_t const row_y) {
assert(0 <= column_x);
assert(column_x < get_columns());
assert(0 <= row_y);
assert(row_y < get_rows());
return m_data[row_y * m_cols + column_x];
}
const_reference operator()(std::size_t const column_x, std::size_t const row_y) const {
assert(0 <= column_x);
assert(column_x < get_columns());
assert(0 <= row_y);
assert(row_y < get_rows());
return m_data[row_y * m_cols + column_x];
}
private:
std::size_t const m_rows;
std::size_t const m_cols;
std::vector<T> m_data;
};
static_assert(false, "FIX ME");
using T = char;
// using T = short;
// using T = int;
// using T = double;
void run(std::size_t const my_rank, std::size_t const num_threads, Barrier& barrier, Matrix<T> const& from_data, Matrix<T>& to_data) {
auto n = from_data.get_element_count();
std::string str;
if (my_rank == 0) {
std::cerr << "bytes to copy: " << (n * sizeof(T)) << '\n';
}
// initialization
std::size_t segment_size = n / num_threads;
std::size_t start = (my_rank * segment_size) * sizeof(T);
std::size_t end = ((my_rank + 1) * segment_size) * sizeof(T);
std::size_t distance = end - start;
str += " my_rank: " + std::to_string(my_rank);
str += " segment_size: " + std::to_string(segment_size);
str += " start: " + std::to_string(start);
str += " end: " + std::to_string(end);
str += " distance: " + std::to_string(distance);
str += " rank: " + std::to_string(my_rank);
str += " start: " + std::to_string(start);
str += " end: " + std::to_string(end);
str += " distance: " + std::to_string(distance);
str += " e: " + std::to_string(start + distance);
str += "\n";
std::cerr << str;
barrier.wait();
std::memcpy(to_data.data() + start, from_data.data() + start, distance);
barrier.wait();
if (my_rank == 0)
for (auto y = 0; y < from_data.get_rows(); y++) {
for (auto x = 0; x < from_data.get_columns(); x++) {
if (to_data(x, y) != from_data(x, y)) {
std::cerr << "x: " << x << '\t' << "y: " << y << "\t\t";
std::cerr << "to: " << to_data(x, y) << '\t' << "from: " << from_data(x, y) << '\n';
}
}
}
barrier.wait();
}
int main() {
auto const num_threads = 1;
// auto const num_threads = 4;
// auto const width = 64;
// auto const height = 64;
auto const width = 97;
auto const height = 101;
auto from_data = Matrix<T>(width, height, 70);
auto to_data = Matrix<T>(width, height, 84);
std::vector<std::thread> threads;
auto barrier = Barrier{num_threads};
for (auto i = 0; i < num_threads; i++) {
threads.emplace_back(run, i, num_threads, std::ref(barrier), std::ref(from_data), std::ref(to_data));
}
for (auto& thread : threads) {
thread.join();
}
}
std::memcpy(to_data.data() + start, from_data.data() + start, distance)
std::vector<T>::data() returns a T* so if you add an integral value foo to it, you effectively add foo * sizeof T bytes ... but you allready multiplied with sizeof(T) earlier when calculating start and end. Also, std::memcpy() won't work for Ts that are not PODs.
Better use std::copy().
I want to get default template argument from a template specialization with clang AST, but can find no way.
Can anyone help me?
template<typename TT0>
struct DefaultArg
{
typedef char TypeT;
};
template<typename TT0,
typename TT1 = typename DefaultArg<TT0>::TypeT >
struct Template0
{
};
Template0<int> s; // Is there any way to get 'DefaultArg<int>::TypeT'
// ( neither DefaultArg<TT0>::TypeT nor 'char' )
// as default template argument
// for this template specialization?
AST dump for the sample
Short answer: not to my knowledge.
Work around solution...
Here is example code that does the trick:
(it only works for depth=0)
bool CheckSpecializedTemplate(const clang::QualType& Type)
{
// ignore types that are not class or struct records
if (!Type->isRecordType() || !Type->isStructureOrClassType())
return false;
// get the underlying record and make sure it is a
// specialization of a template type
const clang::CXXRecordDecl* Record = Type->getAsCXXRecordDecl();
if (Record->getKind() != clang::Decl::Kind::ClassTemplateSpecialization)
return false;
return true;
}
bool TrySubstitutionPrint(
const clang::TemplateParameterList* TemplateList,
const clang::TemplateArgumentList& ArgumentsList,
const clang::QualType& Type)
{
//
// Hack to patch in the type substitutions
// (AST doesn't seem to preserve dependent template specialization)
//
auto& out = llvm::outs();
// from the dependent type, get the nested type or return
const clang::Type* Ptr = Type.getTypePtr();
if (Ptr->getTypeClass() != clang::Type::TypeClass::DependentName)
return false;
const clang::DependentNameType* DnPtr =
static_cast<const clang::DependentNameType*>(Ptr);
const clang::NestedNameSpecifier* Nns = DnPtr->getQualifier();
if (!Nns) return false;
if (Nns->getKind() != clang::NestedNameSpecifier::SpecifierKind::TypeSpec)
return false;
const clang::Type* NestedPtr = Nns->getAsType();
if (NestedPtr->getTypeClass() != clang::Type::TypeClass::TemplateSpecialization)
return false;
const clang::TemplateSpecializationType* TsPtr =
static_cast<const clang::TemplateSpecializationType*>(NestedPtr);
const clang::TemplateDecl* Temp = TsPtr->getTemplateName().getAsTemplateDecl();
out << DnPtr->getKeywordName(DnPtr->getKeyword()).str();
out << " " << Temp->getNameAsString() << "<";
// match the args to their respective parameters
const clang::TemplateParameterList* List = Temp->getTemplateParameters();
bool Multi = false;
for (auto Token = List->begin(); Token != List->end(); Token++) {
if (Multi) out << ", "; else Multi = true;
out << (*Token)->getNameAsString();
// brute force search
int Index = 0;
for (auto T2 = TemplateList->begin(); T2 != TemplateList->end(); T2++) {
if ((*T2)->getNameAsString() == (*Token)->getNameAsString()) {
out << " = " << ArgumentsList[Index].getAsType().getAsString();
break;
}
Index++;
}
}
out << ">::";
// print the dependent type name
const clang::IdentifierInfo* identifier = DnPtr->getIdentifier();
out << identifier->getName().str();
return true;
}
bool VisitVarDecl(clang::VarDecl* Decl)
{
auto& out = llvm::outs();
clang::QualType Type = Decl->getType();
// let's only look for variables with name "s"
auto varname = Decl->getNameAsString();
if (varname != "s") return true;
// we only want specialized templates
if (!CheckSpecializedTemplate(Type)) return true;
// convert the record to a specialization and
// get the underlying template decl
const clang::CXXRecordDecl* Record = Type->getAsCXXRecordDecl();
const clang::ClassTemplateSpecializationDecl* Special =
static_cast<const clang::ClassTemplateSpecializationDecl*>(Record);
const clang::ClassTemplateDecl* Template = Special->getSpecializedTemplate();
// iterate over the list of template parameters and print them out
const clang::TemplateArgumentList& ArgsList =
Special->getTemplateArgs();
const clang::TemplateParameterList* TemplateList =
Template->getTemplateParameters();
int Index = 0;
for (clang::TemplateParameterList::const_iterator
TemplateToken = TemplateList->begin();
TemplateToken != TemplateList->end(); TemplateToken++)
{
switch ((*TemplateToken)->getKind()) {
case clang::Decl::Kind::TemplateTemplateParm: {
const clang::TemplateArgument& c = ArgsList[Index];
out << "class = " << c.getAsType().getAsString();
} break;
case clang::Decl::Kind::TemplateTypeParm: {
const clang::TemplateTypeParmDecl* ttpd =
static_cast<const clang::TemplateTypeParmDecl*>(*TemplateToken);
if (ttpd->hasDefaultArgument()) {
clang::QualType DefaultArg = ttpd->getDefaultArgument();
if (!TrySubstitutionPrint(TemplateList, ArgsList, DefaultArg))
out << DefaultArg.getAsString() << "+";
}
else out << "template " << ttpd->getNameAsString();
const clang::TemplateArgument& c = ArgsList[Index];
out << " = " << c.getAsType().getAsString();
} break;
}
out << "\n";
Index++;
}
// All done!
out << "\n";
return true;
}
The output for your code snippet would be
template TT0 = int
typename DefaultArg<TT0 = int>::TypeT = char
I am trying to create a program which reads and writes buffer sizes, but without standard libraries. so far, I have written a main with header files, however whenever I compile my main, I keep getting errors,such as
In file included from main.cc:6:0:
DefaultClock.cpp: In member function ‘double AccurateClock::Time::getTotalMilliseconds() const’:
DefaultClock.cpp:10:14: error: ‘milliseconds’ does not name a type
auto milliseconds =
^
DefaultClock.cpp:13:16: error: ‘milliseconds’ was not declared in this scope
return milliseconds;
^
and so on for the next 30+ lines. Kind of stuck here, what could be the cause of the errors? I have the classes defined.
#include "DefaultClock.h"
namespace AccurateClock {
Time::Time(const timespec &timeValue) : timeValue(timeValue) {
}
double Time::getTotalMilliseconds() const {
const double oneThousand = 1000;
auto milliseconds =
(timeValue.tv_sec * oneThousand) +
(timeValue.tv_nsec / oneThousand);
return milliseconds;
}
void Time::print(std::ostream &out) const {
const double oneThousand = 1000;
auto milliseconds = getTotalMilliseconds();
auto secondsPart = (long)(milliseconds / oneThousand);
auto millisecondsPart = (long)(milliseconds - (secondsPart * oneThousand));
auto nsPart = (long)(
(milliseconds * oneThousand) -
(secondsPart * oneThousand * oneThousand) -
(millisecondsPart * oneThousand));
out << secondsPart << "sec, " << millisecondsPart << "ms, " << nsPart << "ns since the computer booted";
}
const Duration Time::operator-(const Time &a) const {
auto difference = getTotalMilliseconds()
- a.getTotalMilliseconds();
auto duration = Duration(difference);
return duration;
}
const Time DefaultClock::getTime() {
auto value = timespec();
clock_gettime(_CLOCK_REALTIME, &value);
auto time = Time(value);
return time;
}
Duration::Duration(double value) : valueInMilliseconds(value) {
}
void Duration::print(std::ostream &out) const {
out << valueInMilliseconds << "ms";
}
}