Weird different results between debug and run mode in Clion - c++

I found a weird problem when I was solving a dynamic programming problem.
Code
// Problem 01
// A child is running up a staircase with n steps and can hop either 1 step, 2 steps, or 3 steps at a time. Implement a method to count how many possible ways the child can run up the stairs.
#include <cstdio>
#include <iostream>
#include <sstream>
#include <vector>
using namespace std;
// 1st method
// top-down dynamic programming
long steps(int n, vector<long>& d) {
if (d[n] > 0L)
return d[n];
//
if (n == 1)
return 1L;
else if (n == 2) // 1,1 2
return 2L;
else if (n == 3) // 1,1,1 1,2 2,1 3
return 4L;
//
long s = steps(n-1, d) + steps(n-2, d) + steps(n-3, d);
d[n] = s;
return s;
}
long solve(int n) {
vector<long> d;
for (int i = 0; i < n; ++i) {
d.push_back(-1L);
}
long r = steps(n, d);
return r;
}
// 2nd method
// bottom-up dynamic programming
// and remove the redundant array
long steps2(int n) {
if (n == 1)
return 1L;
else if (n == 2) // 1,1 2
return 2L;
else if (n == 3) // 1,1,1 1,2 2,1 3
return 4L;
//
long s = 0L;
long s1 = 4L;
long s2 = 2L;
long s3 = 1L;
for (int i = 4; i < n; ++i) {
s = s1 + s2 + s3;
s3 = s2;
s2 = s1;
s1 = s;
}
s = s1 + s2 + s3;
return s;
}
long solve2(int n) {
return steps2(n);
}
////////////////////// Test ////////////////////////
class Test {
public:
Test() {
basicTests();
}
private:
int num_fail = 0;
void basicTests() {
printf("C++ version: %ld\n", __cplusplus);
// customize your own tests here
printf("%d, %ld\n", 20, solve(20));
printf("%d, %ld\n", 20, solve2(20));
}
};
////////////////////// Main ////////////////////////
int main() {
Test t = Test(); // change the method you want to test here.
return 0;
}
Results
When I run the code, both methods give the same results, seem correct.
C++ version: 201103
20, 121415
20, 121415
But when I using debug mode, sometimes the first result seems wrong, like this.
C++ version: 201103
20, 4210418572061733749
20, 121415
What's the problem?

In solve you fill vector d with n values, index 0...n-1. And in steps you immediately checking for d[n] which would be n+1 value.
You're using undefined value - got garbage result

Related

How can I approach this CP task?

The task (from a Bulgarian judge, click on "Език" to change it to English):
I am given the size of the first (S1 = A) of N corals. The size of every subsequent coral (Si, where i > 1) is calculated using the formula (B*Si-1 + C)%D, where A, B, C and D are some constants. I am told that Nemo is nearby the Kth coral (when the sizes of all corals are sorted in ascending order).
What is the size of the above-mentioned Kth coral ?
I will have T tests and for every one of them I will be given N, K, A, B, C and D and prompted to output the size of the Kth coral.
The requirements:
1 ≤ T ≤ 3
1 ≤ K ≤ N ≤ 107
0 ≤ A < D ≤ 1018
1 ≤ C, B*D ≤ 1018
Memory available is 64 MB
Time limit is 1.9 sec
The problem I have:
For the worst case scenario I will need 107*8B which is 76 MB.
The solution If the memory available was at least 80 MB would be:
#include <iostream>
#include <vector>
#include <iterator>
#include <algorithm>
using biggie = long long;
int main() {
int t;
std::cin >> t;
int i, n, k, j;
biggie a, b, c, d;
std::vector<biggie>::iterator it_ans;
for (i = 0; i != t; ++i) {
std::cin >> n >> k >> a >> b >> c >> d;
std::vector<biggie> lut{ a };
lut.reserve(n);
for (j = 1; j != n; ++j) {
lut.emplace_back((b * lut.back() + c) % d);
}
it_ans = std::next(lut.begin(), k - 1);
std::nth_element(lut.begin(), it_ans, lut.end());
std::cout << *it_ans << '\n';
}
return 0;
}
Question 1: How can I approach this CP task given the requirements listed above ?
Question 2: Is it somehow possible to use std::nth_element to solve it since I am not able to store all N elements ? I mean using std::nth_element in a sliding window technique (If this is possible).
# Christian Sloper
#include <iostream>
#include <queue>
using biggie = long long;
int main() {
int t;
std::cin >> t;
int i, n, k, j, j_lim;
biggie a, b, c, d, prev, curr;
for (i = 0; i != t; ++i) {
std::cin >> n >> k >> a >> b >> c >> d;
if (k < n - k + 1) {
std::priority_queue<biggie, std::vector<biggie>, std::less<biggie>> q;
q.push(a);
prev = a;
for (j = 1; j != k; ++j) {
curr = (b * prev + c) % d;
q.push(curr);
prev = curr;
}
for (; j != n; ++j) {
curr = (b * prev + c) % d;
if (curr < q.top()) {
q.pop();
q.push(curr);
}
prev = curr;
}
std::cout << q.top() << '\n';
}
else {
std::priority_queue<biggie, std::vector<biggie>, std::greater<biggie>> q;
q.push(a);
prev = a;
for (j = 1, j_lim = n - k + 1; j != j_lim; ++j) {
curr = (b * prev + c) % d;
q.push(curr);
prev = curr;
}
for (; j != n; ++j) {
curr = (b * prev + c) % d;
if (curr > q.top()) {
q.pop();
q.push(curr);
}
prev = curr;
}
std::cout << q.top() << '\n';
}
}
return 0;
}
This gets accepted (Succeeds all 40 tests. Largest time 1.4 seconds, for a test with T=3 and D≤10^9. Largest time for a test with larger D (and thus T=1) is 0.7 seconds.).
#include <iostream>
using biggie = long long;
int main() {
int t;
std::cin >> t;
int i, n, k, j;
biggie a, b, c, d;
for (i = 0; i != t; ++i) {
std::cin >> n >> k >> a >> b >> c >> d;
biggie prefix = 0;
for (int shift = d > 1000000000 ? 40 : 20; shift >= 0; shift -= 20) {
biggie prefix_mask = ((biggie(1) << (40 - shift)) - 1) << (shift + 20);
int count[1 << 20] = {0};
biggie s = a;
int rank = 0;
for (j = 0; j != n; ++j) {
biggie s_vs_prefix = s & prefix_mask;
if (s_vs_prefix < prefix)
++rank;
else if (s_vs_prefix == prefix)
++count[(s >> shift) & ((1 << 20) - 1)];
s = (b * s + c) % d;
}
int i = -1;
while (rank < k)
rank += count[++i];
prefix |= biggie(i) << shift;
}
std::cout << prefix << '\n';
}
return 0;
}
The result is a 60 bits number. I first determine the high 20 bits with one pass through the numbers, then the middle 20 bits in another pass, then the low 20 bits in another.
For the high 20 bits, generate all the numbers and count how often each high 20 bits pattern occurrs. After that, add up the counts until you reach K. The pattern where you reach K, that pattern covers the K-th largest number. In other words, that's the result's high 20 bits.
The middle and low 20 bits are computed similarly, except we take the by then known prefix (the high 20 bits or high+middle 40 bits) into account. As a little optimization, when D is small, I skip computing the high 20 bits. That got me from 2.1 seconds down to 1.4 seconds.
This solution is like user3386109 described, except with bucket size 2^20 instead of 10^6 so I can use bit operations instead of divisions and think of bit patterns instead of ranges.
For the memory constraint you hit:
(B*Si-1 + C)%D
requires only the value (Si-2) before itself. So you can compute them in pairs, to use only 1/2 of total you need. This only needs indexing even values and iterating once for odd values. So you can just use half-length LUT and compute the odd value in-flight. Modern CPUs are fast enough to do extra calculations like these.
std::vector<biggie> lut{ a_i,a_i_2,a_i_4,... };
a_i_3=computeOddFromEven(lut[1]);
You can make a longer stride like 4,8 too. If dataset is large, RAM latency is big. So it's like having checkpoints in whole data search space to balance between memory and core usage. 1000-distance checkpoints would put a lot of cpu cycles into re-calculations but then the array would fit CPU's L2/L1 cache which is not bad. When sorting, the maximum re-calc iteration per element would be n=1000 now. O(1000 x size) maybe it's a big constant but maybe somehow optimizable by compiler if some constants really const?
If CPU performance becomes problem again:
write a compiling function that writes your source code with all the "constant" given by user to a string
compile the code using command-line (assuming target computer has some accessible from command line like g++ from main program)
run it and get results
Compiler should enable more speed/memory optimizations when those are really constant in compile-time rather than depending on std::cin.
If you really need to add a hard-limit to the RAM usage, then implement a simple cache with the backing-store as your heavy computations with brute-force O(N^2) (or O(L x N) with checkpoints every L elements as in first method where L=2 or 4, or ...).
Here's a sample direct-mapped cache with 8M long-long value space:
int main()
{
std::vector<long long> checkpoints = {
a_0, a_16, a_32,...
};
auto cacheReadMissFunction = [&](int key){
// your pure computational algorithm here, helper meant to show variable
long long result = checkpoints[key/16];
for(key - key%16 times)
result = iterate(result);
return result;
};
auto cacheWriteMissFunction = [&](int key, long long value){
/* not useful for your algorithm as it doesn't change behavior per element */
// backing_store[key] = value;
};
// due to special optimizations, size has to be 2^k
int cacheSize = 1024*1024*8;
DirectMappedCache<int, long long> cache(cacheSize,cacheReadMissFunction,cacheWriteMissFunction);
std::cout << cache.get(20)<<std::endl;
return 0;
}
If you use a cache-friendly sorting-algorithm, a direct cache access would make a lot of re-use for nearly all the elements in comparisons if you fill the output buffer/terminal with elements one by one by following something like a bitonic-sort-path (that is known in compile-time). If that doesn't work, then you can try accessing files as a "backing-store" of cache for sorting whole array at once. Is file system prohibited for use? Then the online-compiling method above won't work either.
Implementation of a direct mapped cache (don't forget to call flush() after your algorithm finishes, if you use any cache.set() method):
#ifndef DIRECTMAPPEDCACHE_H_
#define DIRECTMAPPEDCACHE_H_
#include<vector>
#include<functional>
#include<mutex>
#include<iostream>
/* Direct-mapped cache implementation
* Only usable for integer type keys in range [0,maxPositive-1]
*
* CacheKey: type of key (only integers: int, char, size_t)
* CacheValue: type of value that is bound to key (same as above)
*/
template< typename CacheKey, typename CacheValue>
class DirectMappedCache
{
public:
// allocates buffers for numElements number of cache slots/lanes
// readMiss: cache-miss for read operations. User needs to give this function
// to let the cache automatically get data from backing-store
// example: [&](MyClass key){ return redis.get(key); }
// takes a CacheKey as key, returns CacheValue as value
// writeMiss: cache-miss for write operations. User needs to give this function
// to let the cache automatically set data to backing-store
// example: [&](MyClass key, MyAnotherClass value){ redis.set(key,value); }
// takes a CacheKey as key and CacheValue as value
// numElements: has to be integer-power of 2 (e.g. 2,4,8,16,...)
DirectMappedCache(CacheKey numElements,
const std::function<CacheValue(CacheKey)> & readMiss,
const std::function<void(CacheKey,CacheValue)> & writeMiss):size(numElements),sizeM1(numElements-1),loadData(readMiss),saveData(writeMiss)
{
// initialize buffers
for(size_t i=0;i<numElements;i++)
{
valueBuffer.push_back(CacheValue());
isEditedBuffer.push_back(0);
keyBuffer.push_back(CacheKey()-1);// mapping of 0+ allowed
}
}
// get element from cache
// if cache doesn't find it in buffers,
// then cache gets data from backing-store
// then returns the result to user
// then cache is available from RAM on next get/set access with same key
inline
const CacheValue get(const CacheKey & key) noexcept
{
return accessDirect(key,nullptr);
}
// only syntactic difference
inline
const std::vector<CacheValue> getMultiple(const std::vector<CacheKey> & key) noexcept
{
const int n = key.size();
std::vector<CacheValue> result(n);
for(int i=0;i<n;i++)
{
result[i]=accessDirect(key[i],nullptr);
}
return result;
}
// thread-safe but slower version of get()
inline
const CacheValue getThreadSafe(const CacheKey & key) noexcept
{
std::lock_guard<std::mutex> lg(mut);
return accessDirect(key,nullptr);
}
// set element to cache
// if cache doesn't find it in buffers,
// then cache sets data on just cache
// writing to backing-store only happens when
// another access evicts the cache slot containing this key/value
// or when cache is flushed by flush() method
// then returns the given value back
// then cache is available from RAM on next get/set access with same key
inline
void set(const CacheKey & key, const CacheValue & val) noexcept
{
accessDirect(key,&val,1);
}
// thread-safe but slower version of set()
inline
void setThreadSafe(const CacheKey & key, const CacheValue & val) noexcept
{
std::lock_guard<std::mutex> lg(mut);
accessDirect(key,&val,1);
}
// use this before closing the backing-store to store the latest bits of data
void flush()
{
try
{
std::lock_guard<std::mutex> lg(mut);
for (size_t i=0;i<size;i++)
{
if (isEditedBuffer[i] == 1)
{
isEditedBuffer[i]=0;
auto oldKey = keyBuffer[i];
auto oldValue = valueBuffer[i];
saveData(oldKey,oldValue);
}
}
}catch(std::exception &ex){ std::cout<<ex.what()<<std::endl; }
}
// direct mapped access
// opType=0: get
// opType=1: set
CacheValue const accessDirect(const CacheKey & key,const CacheValue * value, const bool opType = 0)
{
// find tag mapped to the key
CacheKey tag = key & sizeM1;
// compare keys
if(keyBuffer[tag] == key)
{
// cache-hit
// "set"
if(opType == 1)
{
isEditedBuffer[tag]=1;
valueBuffer[tag]=*value;
}
// cache hit value
return valueBuffer[tag];
}
else // cache-miss
{
CacheValue oldValue = valueBuffer[tag];
CacheKey oldKey = keyBuffer[tag];
// eviction algorithm start
if(isEditedBuffer[tag] == 1)
{
// if it is "get"
if(opType==0)
{
isEditedBuffer[tag]=0;
}
saveData(oldKey,oldValue);
// "get"
if(opType==0)
{
const CacheValue && loadedData = loadData(key);
valueBuffer[tag]=loadedData;
keyBuffer[tag]=key;
return loadedData;
}
else /* "set" */
{
valueBuffer[tag]=*value;
keyBuffer[tag]=key;
return *value;
}
}
else // not edited
{
// "set"
if(opType == 1)
{
isEditedBuffer[tag]=1;
}
// "get"
if(opType == 0)
{
const CacheValue && loadedData = loadData(key);
valueBuffer[tag]=loadedData;
keyBuffer[tag]=key;
return loadedData;
}
else // "set"
{
valueBuffer[tag]=*value;
keyBuffer[tag]=key;
return *value;
}
}
}
}
private:
const CacheKey size;
const CacheKey sizeM1;
std::mutex mut;
std::vector<CacheValue> valueBuffer;
std::vector<unsigned char> isEditedBuffer;
std::vector<CacheKey> keyBuffer;
const std::function<CacheValue(CacheKey)> loadData;
const std::function<void(CacheKey,CacheValue)> saveData;
};
#endif /* DIRECTMAPPEDCACHE_H_ */
You can solve this problem using a Max-heap.
Insert the first k elements into the max-heap. The largest element of these k will now be at the root.
For each remaining element e:
Compare e to the root.
If e is larger than the root, discard it.
If e is smaller than the root, remove the root and insert e into the heap structure.
After all elements have been processed, the k-th smallest element is at the root.
This method uses O(K) space and O(n log n) time.
There’s an algorithm that people often call LazySelect that I think would be perfect here.
With high probability, we make two passes. In the first pass, we save a random sample of size n much less than N. The answer will be around index (K/N)n in the sorted sample, but due to the randomness, we have to be careful. Save the values a and b at (K/N)n ± r instead, where r is the radius of the window. In the second pass, we save all of the values in [a, b], count the number of values less than a (let it be L), and select the value with index K−L if it’s in the window (otherwise, try again).
The theoretical advice on choosing n and r is fine, but I would be pragmatic here. Choose n so that you use most of the available memory; the bigger the sample, the more informative it is. Choose r fairly large as well, but not quite as aggressively due to the randomness.
C++ code below. On the online judge, it’s faster than Kelly’s (max 1.3 seconds on the T=3 tests, 0.5 on the T=1 tests).
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <iostream>
#include <limits>
#include <optional>
#include <random>
#include <vector>
namespace {
class LazySelector {
public:
static constexpr std::int32_t kTargetSampleSize = 1000;
explicit LazySelector() { sample_.reserve(1000000); }
void BeginFirstPass(const std::int32_t n, const std::int32_t k) {
sample_.clear();
mask_ = n / kTargetSampleSize;
mask_ |= mask_ >> 1;
mask_ |= mask_ >> 2;
mask_ |= mask_ >> 4;
mask_ |= mask_ >> 8;
mask_ |= mask_ >> 16;
}
void FirstPass(const std::int64_t value) {
if ((gen_() & mask_) == 0) {
sample_.push_back(value);
}
}
void BeginSecondPass(const std::int32_t n, const std::int32_t k) {
sample_.push_back(std::numeric_limits<std::int64_t>::min());
sample_.push_back(std::numeric_limits<std::int64_t>::max());
const double p = static_cast<double>(sample_.size()) / n;
const double radius = 2 * std::sqrt(sample_.size());
const auto lower =
sample_.begin() + std::clamp<std::int32_t>(std::floor(p * k - radius),
0, sample_.size() - 1);
const auto upper =
sample_.begin() + std::clamp<std::int32_t>(std::ceil(p * k + radius), 0,
sample_.size() - 1);
std::nth_element(sample_.begin(), upper, sample_.end());
std::nth_element(sample_.begin(), lower, upper);
lower_ = *lower;
upper_ = *upper;
sample_.clear();
less_than_lower_ = 0;
equal_to_lower_ = 0;
equal_to_upper_ = 0;
}
void SecondPass(const std::int64_t value) {
if (value < lower_) {
++less_than_lower_;
} else if (upper_ < value) {
} else if (value == lower_) {
++equal_to_lower_;
} else if (value == upper_) {
++equal_to_upper_;
} else {
sample_.push_back(value);
}
}
std::optional<std::int64_t> Select(std::int32_t k) {
if (k < less_than_lower_) {
return std::nullopt;
}
k -= less_than_lower_;
if (k < equal_to_lower_) {
return lower_;
}
k -= equal_to_lower_;
if (k < sample_.size()) {
const auto kth = sample_.begin() + k;
std::nth_element(sample_.begin(), kth, sample_.end());
return *kth;
}
k -= sample_.size();
if (k < equal_to_upper_) {
return upper_;
}
return std::nullopt;
}
private:
std::default_random_engine gen_;
std::vector<std::int64_t> sample_ = {};
std::int32_t mask_ = 0;
std::int64_t lower_ = std::numeric_limits<std::int64_t>::min();
std::int64_t upper_ = std::numeric_limits<std::int64_t>::max();
std::int32_t less_than_lower_ = 0;
std::int32_t equal_to_lower_ = 0;
std::int32_t equal_to_upper_ = 0;
};
} // namespace
int main() {
int t;
std::cin >> t;
for (int i = t; i > 0; --i) {
std::int32_t n;
std::int32_t k;
std::int64_t a;
std::int64_t b;
std::int64_t c;
std::int64_t d;
std::cin >> n >> k >> a >> b >> c >> d;
std::optional<std::int64_t> ans = std::nullopt;
LazySelector selector;
do {
{
selector.BeginFirstPass(n, k);
std::int64_t s = a;
for (std::int32_t j = n; j > 0; --j) {
selector.FirstPass(s);
s = (b * s + c) % d;
}
}
{
selector.BeginSecondPass(n, k);
std::int64_t s = a;
for (std::int32_t j = n; j > 0; --j) {
selector.SecondPass(s);
s = (b * s + c) % d;
}
}
ans = selector.Select(k - 1);
} while (!ans);
std::cout << *ans << '\n';
}
}

Finding the median value of a vector using C++

I'm a programming student, and for a project I'm working on, on of the things I have to do is compute the median value of a vector of int values and must be done by passing it through functions. Also the vector is initially generated randomly using the C++ random generator mt19937 which i have already written down in my code.I'm to do this using the sort function and vector member functions such as .begin(), .end(), and .size().
I'm supposed to make sure I find the median value of the vector and then output it
And I'm Stuck, below I have included my attempt. So where am I going wrong? I would appreciate if you would be willing to give me some pointers or resources to get going in the right direction.
Code:
#include<iostream>
#include<vector>
#include<cstdlib>
#include<ctime>
#include<random>
#include<vector>
#include<cstdlib>
#include<ctime>
#include<random>
using namespace std;
double find_median(vector<double>);
double find_median(vector<double> len)
{
{
int i;
double temp;
int n=len.size();
int mid;
double median;
bool swap;
do
{
swap = false;
for (i = 0; i< len.size()-1; i++)
{
if (len[i] > len[i + 1])
{
temp = len[i];
len[i] = len[i + 1];
len[i + 1] = temp;
swap = true;
}
}
}
while (swap);
for (i=0; i<len.size(); i++)
{
if (len[i]>len[i+1])
{
temp=len[i];
len[i]=len[i+1];
len[i+1]=temp;
}
mid=len.size()/2;
if (mid%2==0)
{
median= len[i]+len[i+1];
}
else
{
median= (len[i]+0.5);
}
}
return median;
}
}
int main()
{
int n,i;
cout<<"Input the vector size: "<<endl;
cin>>n;
vector <double> foo(n);
mt19937 rand_generator;
rand_generator.seed(time(0));
uniform_real_distribution<double> rand_distribution(0,0.8);
cout<<"original vector: "<<" ";
for (i=0; i<n; i++)
{
double rand_num=rand_distribution(rand_generator);
foo[i]=rand_num;
cout<<foo[i]<<" ";
}
double median;
median=find_median(foo);
cout<<endl;
cout<<"The median of the vector is: "<<" ";
cout<<median<<endl;
}
The median is given by
const auto median_it = len.begin() + len.size() / 2;
std::nth_element(len.begin(), median_it , len.end());
auto median = *median_it;
For even numbers (size of vector) you need to be a bit more precise. E.g., you can use
assert(!len.empty());
if (len.size() % 2 == 0) {
const auto median_it1 = len.begin() + len.size() / 2 - 1;
const auto median_it2 = len.begin() + len.size() / 2;
std::nth_element(len.begin(), median_it1 , len.end());
const auto e1 = *median_it1;
std::nth_element(len.begin(), median_it2 , len.end());
const auto e2 = *median_it2;
return (e1 + e2) / 2;
} else {
const auto median_it = len.begin() + len.size() / 2;
std::nth_element(len.begin(), median_it , len.end());
return *median_it;
}
There are of course many different ways how we can get element e1. We could also use max or whatever we want. But this line is important because nth_element only places the nth element correctly, the remaining elements are ordered before or after this element, depending on whether they are larger or smaller. This range is unsorted.
This code is guaranteed to have linear complexity on average, i.e., O(N), therefore it is asymptotically better than sort, which is O(N log N).
Regarding your code:
for (i=0; i<len.size(); i++){
if (len[i]>len[i+1])
This will not work, as you access len[len.size()] in the last iteration which does not exist.
std::sort(len.begin(), len.end());
double median = len[len.size() / 2];
will do it. You might need to take the average of the middle two elements if size() is even, depending on your requirements:
0.5 * (len[len.size() / 2 - 1] + len[len.size() / 2]);
Instead of trying to do everything at once, you should start with simple test cases and work upwards:
#include<vector>
double find_median(std::vector<double> len);
// Return the number of failures - shell interprets 0 as 'success',
// which suits us perfectly.
int main()
{
return find_median({0, 1, 1, 2}) != 1;
}
This already fails with your code (even after fixing i to be an unsigned type), so you could start debugging (even 'dry' debugging, where you trace the code through on paper; that's probably enough here).
I do note that with a smaller test case, such as {0, 1, 2}, I get a crash rather than merely failing the test, so there's something that really needs to be fixed.
Let's replace the implementation with one based on overseas's answer:
#include <algorithm>
#include <limits>
#include <vector>
double find_median(std::vector<double> len)
{
if (len.size() < 1)
return std::numeric_limits<double>::signaling_NaN();
const auto alpha = len.begin();
const auto omega = len.end();
// Find the two middle positions (they will be the same if size is odd)
const auto i1 = alpha + (len.size()-1) / 2;
const auto i2 = alpha + len.size() / 2;
// Partial sort to place the correct elements at those indexes (it's okay to modify the vector,
// as we've been given a copy; otherwise, we could use std::partial_sort_copy to populate a
// temporary vector).
std::nth_element(alpha, i1, omega);
std::nth_element(i1, i2, omega);
return 0.5 * (*i1 + *i2);
}
Now, our test passes. We can write a helper method to allow us to create more tests:
#include <iostream>
bool test_median(const std::vector<double>& v, double expected)
{
auto actual = find_median(v);
if (abs(expected - actual) > 0.01) {
std::cerr << actual << " - expected " << expected << std::endl;
return true;
} else {
std::cout << actual << std::endl;
return false;
}
}
int main()
{
return test_median({0, 1, 1, 2}, 1)
+ test_median({5}, 5)
+ test_median({5, 5, 5, 0, 0, 0, 1, 2}, 1.5);
}
Once you have the simple test cases working, you can manage more complex ones. Only then is it time to create a large array of random values to see how well it scales:
#include <ctime>
#include <functional>
#include <random>
int main(int argc, char **argv)
{
std::vector<double> foo;
const int n = argc > 1 ? std::stoi(argv[1]) : 10;
foo.reserve(n);
std::mt19937 rand_generator(std::time(0));
std::uniform_real_distribution<double> rand_distribution(0,0.8);
std::generate_n(std::back_inserter(foo), n, std::bind(rand_distribution, rand_generator));
std::cout << "Vector:";
for (auto v: foo)
std::cout << ' ' << v;
std::cout << "\nMedian = " << find_median(foo) << std::endl;
}
(I've taken the number of elements as a command-line argument; that's more convenient in my build than reading it from cin). Notice that instead of allocating n doubles in the vector, we simply reserve capacity for them, but don't create any until needed.
For fun and kicks, we can now make find_median() generic. I'll leave that as an exercise; I suggest you start with:
typename<class Iterator>
auto find_median(Iterator alpha, Iterator omega)
{
using value_type = typename Iterator::value_type;
if (alpha == omega)
return std::numeric_limits<value_type>::signaling_NaN();
}

How to convert Biginteger to string

I have a vector with digits of number, vector represents big integer in system with base 2^32. For example:
vector <unsigned> vec = {453860625, 469837947, 3503557200, 40}
This vector represent this big integer:
base = 2 ^ 32
3233755723588593872632005090577 = 40 * base ^ 3 + 3503557200 * base ^ 2 + 469837947 * base + 453860625
How to get this decimal representation in string?
Here is an inefficient way to do what you want, get a decimal string from a vector of word values representing an integer of arbitrary size.
I would have preferred to implement this as a class, for better encapsulation and so math operators could be added, but to better comply with the question, this is just a bunch of free functions for manipulating std::vector<unsigned> objects. This does use a typedef BiType as an alias for std::vector<unsigned> however.
Functions for doing the binary division make up most of this code. Much of it duplicates what can be done with std::bitset, but for bitsets of arbitrary size, as vectors of unsigned words. If you want to improve efficiency, plug in a division algorithm which does per-word operations, instead of per-bit. Also, the division code is general-purpose, when it is only ever used to divide by 10, so you could replace it with special-purpose division code.
The code generally assumes a vector of unsigned words and also that the base is the maximum unsigned value, plus one. I left a comment wherever things would go wrong for smaller bases or bases which are not a power of 2 (binary division requires base to be a power of 2).
Also, I only tested for 1 case, the one you gave in the OP -- and this is new, unverified code, so you might want to do some more testing. If you find a problem case, I'll be happy to fix the bug here.
#include <iostream>
#include <string>
#include <vector>
#include <algorithm>
namespace bigint {
using BiType = std::vector<unsigned>;
// cmp compares a with b, returning 1:a>b, 0:a==b, -1:a<b
int cmp(const BiType& a, const BiType& b) {
const auto max_size = std::max(a.size(), b.size());
for(auto i=max_size-1; i+1; --i) {
const auto wa = i < a.size() ? a[i] : 0;
const auto wb = i < b.size() ? b[i] : 0;
if(wa != wb) { return wa > wb ? 1 : -1; }
}
return 0;
}
bool is_zero(BiType& bi) {
for(auto w : bi) { if(w) return false; }
return true;
}
// canonize removes leading zero words
void canonize(BiType& bi) {
const auto size = bi.size();
if(!size || bi[size-1]) return;
for(auto i=size-2; i+1; --i) {
if(bi[i]) {
bi.resize(i + 1);
return;
}
}
bi.clear();
}
// subfrom subtracts b from a, modifying a
// a >= b must be guaranteed by caller
void subfrom(BiType& a, const BiType& b) {
unsigned borrow = 0;
for(std::size_t i=0; i<b.size(); ++i) {
if(b[i] || borrow) {
// TODO: handle error if i >= a.size()
const auto w = a[i] - b[i] - borrow;
// this relies on the automatic w = w (mod base),
// assuming unsigned max is base-1
// if this is not the case, w must be set to w % base here
borrow = w >= a[i];
a[i] = w;
}
}
for(auto i=b.size(); borrow; ++i) {
// TODO: handle error if i >= a.size()
borrow = !a[i];
--a[i];
// a[i] must be set modulo base here too
// (this is automatic when base is unsigned max + 1)
}
}
// binary division and its helpers: these require base to be a power of 2
// hi_bit_set is base/2
// the definition assumes CHAR_BIT == 8
const auto hi_bit_set = unsigned(1) << (sizeof(unsigned) * 8 - 1);
// shift_right_1 divides bi by 2, truncating any fraction
void shift_right_1(BiType& bi) {
unsigned carry = 0;
for(auto i=bi.size()-1; i+1; --i) {
const auto next_carry = (bi[i] & 1) ? hi_bit_set : 0;
bi[i] >>= 1;
bi[i] |= carry;
carry = next_carry;
}
// if carry is nonzero here, 1/2 was truncated from the result
canonize(bi);
}
// shift_left_1 multiplies bi by 2
void shift_left_1(BiType& bi) {
unsigned carry = 0;
for(std::size_t i=0; i<bi.size(); ++i) {
const unsigned next_carry = !!(bi[i] & hi_bit_set);
bi[i] <<= 1; // assumes high bit is lost, i.e. base is unsigned max + 1
bi[i] |= carry;
carry = next_carry;
}
if(carry) { bi.push_back(1); }
}
// sets an indexed bit in bi, growing the vector when required
void set_bit_at(BiType& bi, std::size_t index, bool set=true) {
std::size_t widx = index / (sizeof(unsigned) * 8);
std::size_t bidx = index % (sizeof(unsigned) * 8);
if(bi.size() < widx + 1) { bi.resize(widx + 1); }
if(set) { bi[widx] |= unsigned(1) << bidx; }
else { bi[widx] &= ~(unsigned(1) << bidx); }
}
// divide divides n by d, returning the result and leaving the remainder in n
// this is implemented using binary division
BiType divide(BiType& n, BiType d) {
if(is_zero(d)) {
// TODO: handle divide by zero
return {};
}
std::size_t shift = 0;
while(cmp(n, d) == 1) {
shift_left_1(d);
++shift;
}
BiType result;
do {
if(cmp(n, d) >= 0) {
set_bit_at(result, shift);
subfrom(n, d);
}
shift_right_1(d);
} while(shift--);
canonize(result);
canonize(n);
return result;
}
std::string get_decimal(BiType bi) {
std::string dec_string;
// repeat division by 10, using the remainder as a decimal digit
// this will build a string with digits in reverse order, so
// before returning, it will be reversed to correct this.
do {
const auto next_bi = divide(bi, {10});
const char digit_value = static_cast<char>(bi.size() ? bi[0] : 0);
dec_string.push_back('0' + digit_value);
bi = next_bi;
} while(!is_zero(bi));
std::reverse(dec_string.begin(), dec_string.end());
return dec_string;
}
}
int main() {
bigint::BiType my_big_int = {453860625, 469837947, 3503557200, 40};
auto dec_string = bigint::get_decimal(my_big_int);
std::cout << dec_string << '\n';
}
Output:
3233755723588593872632005090577

How to convert recursive to iterative solution

I've managed to write my algorithm in recursive way:
int fib(int n) {
if(n == 1)
return 3
elseif (n == 2)
return 2
else
return fib(n – 2) + fib(n – 1)
}
Currently I'm trying to convert it to iterative approach without success:
int fib(int n) {
int i = 0, j = 1, k, t;
for (k = 1; k <= n; ++k)
{
if(n == 1) {
j = 3;
}
else if(n == 2) {
j = 2;
}
else {
t = i + j;
i = j;
j = t;
}
}
return j;
}
So how can I rectify my code to reach my goal?
Solving this problem by a general convert-to-iterative is a bad idea. But, that is what you asked.
None of these are good ways to solve fib: there are closed form solutions for fib, and/or iterative solutions that are cleaner, and/or recursive memoized solutions. Rather, I'm showing relatively mechanical techniques for taking a recursive function (that isn't tail-recursive or otherwise simple to solve), and solving it without using the automatic storage stack (recursion).
I have had code that does too deep a recursive nesting and blows the stack in medium-high complexity cases; when refactored to iterative, the problem went away. These are the kinds of solutions required when what you have is a recursive solution you half understand, and you need it to be iterative.
The general means to convert a recursive to an iterative solution is to manage the stack manually.
In this case, I'll also memoize return values.
We cache the return values in retvals.
If we cannot immediately solve a problem, we state what problems we first need to solve in order to solve our problem (in particular, the n-1 and n-2 cases). Then we queue up solving our problem again (by which point, we will have what we need ready go).
int fib( int n ) {
std::map< int, int > retvals {
{1,3},
{2,2}
};
std::vector<int> arg;
arg.push_back(n);
while( !arg.empty() ) {
int n = arg.back();
arg.pop_back();
// have we solved this already? If so, stop.
if (retvals.count(n)>0)
continue;
// are we done? If so, calculate the result:
if (retvals.count(n-1)>0 && retvals.count(n-2)>0) {
retvals[n] = retvals[n-1] + retvals[n-2];
continue;
}
// to calculate n, first calculate n-1 and n-2:
arg.push_back(n); arg.push_back(n-1); arg.push_back(n-2);
}
return retvals[n];
}
No recursion, just a loop.
A "dumber" way to do this is to take the function and make it a pseudo-coroutine.
First, rewrite your recursive code to do one thing per line:
int fib(int n) {
if(n == 1)
return 3
if (n == 2)
return 2
int a = fib(n-2);
int b = fib(n-1);
return a+b;
}
Next, create a struct with all of the functions' state:
struct fib_data {
int n, a, b, r;
};
and add labels at each point where we make a recursive call, and an enum with similar names:
enum Calls {
e1, e2
};
int fib(int n) {
fib_data d;
d.n = n;
if(d.n == 1)
return 3
if (d.n == 2)
return 2
d.a = fib(n-2);
CALL1:
d.b = fib(n-1);
CALL2:
d.r = d.a+d.b;
return d.r;
}
add CALLS to your fib_data.
Next create a stack of fib_data:
enum Calls {
e0, e1, e2
};
struct fib_data {
Calls loc = Calls::e0;
int n, a, b, r;
};
int fib(int n) {
std::vector<fib_data> stack;
stack.push_back({n});
if(stack.back().n == 1)
return 3
if (stack.back().n == 2)
return 2
stack.back().a = fib(stack.back().n-2);
CALL1:
stack.back().b = fib(stack.back().n-1);
CALL2:
stack.back().r = stack.back().a + stack.back().b;
return stack.back().r;
}
now create a loop. Instead of recursively calling, set the return location in your fib_data, push a fib_data onto the stack with an n and an e0 location, then continue the loop. At the top of the loop, switch on the top of the stack's location.
To return: Create a function local variable r to store return values. To return, set r, pop the stack, and continue the loop.
If the stack is empty at the start of the loop, return r from the function.
enum Calls {
e0, e1, e2
};
struct fib_data {
int n, a, b, r;
Calls loc = Calls::e0;
};
int fib(int n) {
std::vector<fib_data> stack;
stack.push_back({n});
int r;
while (!stack.empty()) {
switch(stack.back().loc) {
case e0: break;
case e1: goto CALL1;
case e2: goto CALL2;
};
if(stack.back().n == 1) {
r = 3;
stack.pop_back();
continue;
}
if (stack.back().n == 2){
r = 2;
stack.pop_back();
continue;
}
stack.back().loc = e1;
stack.push_back({stack.back().n-2});
continue;
CALL1:
stack.back().a = r;
stack.back().loc = e2;
stack.push_back({stack.back().n-1});
continue;
CALL2:
stack.back().b = r;
stack.back().r = stack.back().a + stack.back().b;
r = stack.back().r;
stack.pop_back();
continue;
}
}
Then note that b and r do not have to be in the stack -- remove it, and make it local.
This "dumb" transformation emulates what the C++ compiler does when you recurse, but the stack is stored in the free store instead of automatic storage, and can reallocate.
If pointers to the local variables need to persist, using a std::vector for the stack won't work. Replace the pointers with offsets into the standard vector, and it will work.
This should be fib(0) = 0, fib(1) = 1, fib(2) = 1, fib(3) = 2, fib(4) = 3, fib(5) = 5, fib(6) = 8, ... .
fib(n)
{
int f0, f1, t;
if(n < 2)
return n;
n -= 2;
f0 = 1;
f1 = 1;
while(n--){
t = f1+f0;
f0 = f1;
f1 = t;
}
return f1;
}
or you can unfold the loop a bit, and get rid of the temp variable:
int fib(int n)
{
int f0, f1;
if(n < 2)
return n;
f0 = 1-(n&1);
f1 = 1;
while(0 < (n -= 2)){
f0 += f1;
f1 += f0;
}
return f1;
}
This is a classic problem. you can not simply get rid of the recursion if you are given n and you want to calculate down.
the solution is Dynamic programming. basically you want to create an array of size n, then starting from index 0 fill it up until you reach index n-1;
something like this:
int fib(int n)
{
int buffer[n+1];
buffer[0]=3;
buffer[1]=2;
for(int i=2;i<=n; ++i)
{
buffer[i] = buffer[i-1] + buffer[i-2];
}
return buffer[n];
}
alternatively to save memory and not use a big array you can use:
int fib(int n)
{
int buffer [2];
buffer[0] = 3;
buffer[1] = 2;
for(int i=3; i<=n; i++)
{
int tmp = buffer[0] + buffer[1];
buffer[0] = buffer[1];
buffer[1] = temp;
}
return buffer[1];
}
For a sake of completeness here is the iterative solution with O(1) space complexity:
int fib(n)
{
int i;
int a0 = 3;
int a1 = 2;
int tmp;
if (n == 1)
return a0;
for (i = 3; i <=n; i++ )
{
tmp = a0 + a1;
a0 = a1;
a1 = tmp;
}
return a1;
}

Thread 1: EXC_Bad_Access (Code=1, address=0x7fff55056148) While Trying to Use Ofstream and an integer data type named count

I am currently working to solve Project Euler's Problem #60: http://projecteuler.net/problem=60 (Just in case if you want to try and follow my logic).
The issue is that after I build my code (Which it completes without errors) and then run it, I get the error code "Thread 1: EXC_Bad_Access (Code=1, address=0x7fff55056148)" from the IDE I was using while running it (The IDE's built in debugger I think). More Specifically the error occurs only within my "Combinations" Function. The lines that get highlighted are disabled with "//" comment lines within my combinations function. Thus, currently, my code will run without any errors because all the error-causing lines are disabled as comments. if you de-comment any of those lines or any combination of those lines thereof, the code runs into the same error code listed above.
Personal Comments from Experimentation:
What I found was that any line that has something to do with either ofstream or the integer that I initialized called count causes the error. ofstream kind of makes sense, but even after disabling all lines of code related to ofstream, suddenly the integer count starts creating the error.
Any help would be much appreciated! I am still a beginner with C++, (started about two to three weeks ago.)
#include <iostream>
#include <cmath>
#include <fstream>
using namespace std;
/* double x = 2 , y = 2 , b = 3, s = 2; */
/* int z, c = 1, v = 3000; */
int AllPrimes[3000];
/* int AllCombos[2018257871250650][5]; */ // disabled this line for now.
//Used to be within Combinations; Moved here to circumvent "Bad Access" Error
int FindPrimes();
int TestforPrime(double y);
int Combinations();
int WriteArrayToFile(int *ArrayPointer,int ArrayLength, string FileName, char Append);
int main()
{
cout<<FindPrimes();
cout<<Combinations();
}
int Combinations() {
int i1, i2, i3, i4, i5, /* ai */ bi, ci, di, ei;
int ZeroPointBreaker=0;
//ofstream BufferFlushed ("/Users/Yash/Xcode/Projects/Project Euler Programs/Project Euler Problem 60 (Weird Prime Cocatenate Property Problem)/I:O Files/");
int count=0;
int Buffer[9000000][5];
for (i1=0; i1<2996; i1++) {
count++;
// cout<<"Index 1 Iteration: "<<i1<<" || Count Value: "<<count<<"\n";
bi = i1 + 1;
for (i2=bi; i2<2997; i2++) {
count++;
// cout<<"Index 2 Iteration: "<<i2<<" || Count Value: "<<count<<"\n";
ci = i2+ 1;
for (i3=ci; i3<2998; i3++) {
count++;
di = i3 + 1;
for (i4=di; i4<2999; i4++) {
count++;
ei = i4 + 1;
for (i5=ei; i5<3000; i5++) {
count++;
// Buffer[count][0]=AllPrimes[i1];
// Buffer[count][1]=AllPrimes[i2];
// Buffer[count][2]=AllPrimes[i3];
// Buffer[count][3]=AllPrimes[i4];
// Buffer[count][4]=AllPrimes[i5];
}
}
}
//Flush Here
// count=0;
/* for (int i=0; i<9000000; i++) {
if (Buffer[i][1]==0) {ZeroPointBreaker=i; break;}
} */
// for (int i=0; i<ZeroPointBreaker; i++) {
// BufferFlushed<<Buffer[i][1]<<','<<Buffer[i][2]<<','<<Buffer[i][3]<<','<<Buffer[i][4]<<','<<Buffer[i][5]<<'\n';
// }
}
}
//End of Code Statements
//BufferFlushed.close();
return 0;
}
int FindPrimes() {
cout.precision(0);
AllPrimes[0]=2;
double b = 3, s = 2;
int z, c = 1, v = 3000;
while ( c != v ) {
z = TestforPrime(b);
if ( z == 1 ) {
AllPrimes[c]=b;
c = c + 1;
s = s + b;
if ( c == v ) {
cout<<fixed<<" Prime="<<b<<" Count="<<c<<" "<<"Sum="<<s<<"\n";
int success = WriteArrayToFile(AllPrimes,3000,"/Users/Yash/Xcode/Projects/Project Euler Programs/Project Euler Problem 60 (Weird Prime Cocatenate Property Problem)/I:O Files/AllPrimes.txt",'n');
cout<<"\n Write Success (0=Successful): "<<success<<"\n";
if (success == 0) {return 0;}
else {return 1;}
}
else {
};
}
else {
};
b = b + 2;
}
}
int WriteArrayToFile(int *ArrayPointer,int ArrayLength, string FileName, char Append) {
if (Append == 'y') {
ofstream OutputFile (FileName, ios::app);
for ( unsigned long long i1=0 ; i1 < ArrayLength ; i1++) {
OutputFile<<ArrayPointer[i1]<<"\n";
}
OutputFile.close();
return 0;}
else if (Append == 'n') {
ofstream OutputFile (FileName);
for ( unsigned long long i1=0 ; i1 < ArrayLength ; i1++) {
OutputFile<<ArrayPointer[i1]<<"\n";
}
OutputFile.close();
return 0;}
}
int TestforPrime (double y) {
double x = 2;
while ( x <= y ) {
if ( (( y / x ) - int( y / x )) == 0 ) {
if ( y == x ) {
return 1;
}
else {
return 0;
}
}
x = x + 1;
}
}
This variable:
int Buffer[9000000][5];
takes up 45000000 * 4 Bytes. That's 180MB. You can't fit that on the stack. Use a global variable or dynamic allocation (or, more likely, another solution - I haven't looked at the problem itself, so don't know if your solution is "right").