Large interval upper bound of a CGAL lazy number - c++

I have an example of a sum of the form s = a_1/b_1 + a_2/b_2 + ... where the a_i and the b_i are CGAL lazy numbers, such that the interval returned by s.interval() is very large. Its lower bound is close to the expected value (and close to CGAL::as_double(s)) but its upper bound is huge (even infinite).
But if I use mydivision(a_i, b_i) (defined below) instead of a_i/b_i, then the interval is thin as expected. Here is mydivision:
typedef CGAL::Quotient<CGAL::MP_Float> Quotient;
typedef CGAL::Lazy_exact_nt<Quotient> lazyScalar;
lazyScalar mydivision(lazyScalar x1, lazyScalar x2) {
Quotient q1 = x1.exact();
Quotient q2 = x2.exact();
lazyScalar q = lazyScalar(Quotient(
q1.numerator() * q2.denominator(), q1.denominator() * q2.numerator())
);
return q;
}
This is the sum (it goes to pi/2 - 1):
This is the full code:
#include <vector>
#include <CGAL/number_utils.h>
#include <CGAL/Lazy_exact_nt.h>
#include <CGAL/MP_Float.h>
#include <CGAL/Quotient.h>
#include <CGAL/Interval_nt.h>
typedef CGAL::Quotient<CGAL::MP_Float> Quotient;
typedef CGAL::Lazy_exact_nt<Quotient> lazyScalar;
typedef std::vector<lazyScalar> lazyVector;
// "my" division
lazyScalar mydivision(lazyScalar x1, lazyScalar x2) {
Quotient q1 = x1.exact();
Quotient q2 = x2.exact();
lazyScalar q = lazyScalar(Quotient(
q1.numerator() * q2.denominator(), q1.denominator() * q2.numerator())
);
return q;
}
// sum the elements of a vector of lazy numbers
lazyScalar lazySum(lazyVector lv) {
const size_t n = lv.size();
lazyScalar sum(0);
for(size_t i = 0; i < n; i++) {
sum += lv[i];
}
return sum;
}
// element-wise division of two vectors of lazy numbers
lazyVector lv1_dividedby_lv2(lazyVector lv1, lazyVector lv2) {
const size_t n = lv1.size();
lazyVector lv(n);
for(size_t i = 0; i < n; i++) {
lv[i] = lv1[i] / lv2[i];
}
return lv;
}
// same as above using "my" division
lazyVector lv1_mydividedby_lv2(lazyVector lv1, lazyVector lv2) {
const size_t n = lv1.size();
lazyVector lv(n);
for(size_t i = 0; i < n; i++) {
lv[i] = mydivision(lv1[i], lv2[i]);
}
return lv;
}
// cumulative products of the elements of a vector of lazy numbers
lazyVector lazyCumprod(lazyVector lvin) {
const size_t n = lvin.size();
lazyVector lv(n);
lazyScalar prod(1);
for(size_t i = 0; i < n; i++) {
prod *= lvin[i];
lv[i] = prod;
}
return lv;
}
// the series with the ordinary division
lazyScalar Euler(int n) {
lazyVector lv1(n);
for(int i = 0; i < n; i++) {
lv1[i] = lazyScalar(i + 1);
}
lazyVector lv2(n);
for(int i = 0; i < n; i++) {
lv2[i] = lazyScalar(2*i + 3);
}
return lazySum(lv1_dividedby_lv2(lazyCumprod(lv1), lazyCumprod(lv2)));
}
// the series with "my" division
lazyScalar myEuler(int n) {
lazyVector lv1(n);
for(int i = 0; i < n; i++) {
lv1[i] = lazyScalar(i + 1);
}
lazyVector lv2(n);
for(int i = 0; i < n; i++) {
lv2[i] = lazyScalar(2*i + 3);
}
return lazySum(lv1_mydividedby_lv2(lazyCumprod(lv1), lazyCumprod(lv2)));
}
// test - the difference starts to occur for n=170
int main() {
lazyScalar euler = Euler(171);
CGAL::Interval_nt<false> interval = euler.approx();
std::cout << "lower bound: " << interval.inf() << "\n"; // prints 0.57
std::cout << "upper bound: " << interval.sup() << "\n"; // prints inf
lazyScalar myeuler = myEuler(171);
CGAL::Interval_nt<false> myinterval = myeuler.approx();
std::cout << "lower bound: " << myinterval.inf() << "\n"; // prints 0.57
std::cout << "upper bound: " << myinterval.sup() << "\n"; // prints 0.57
return 0;
}
Why this large upper bound?

You are computing factorials and similar products. Once they exceed the maximum value for a double, there isn't much an interval represented as a pair of double can do.
At least it gives you a safe answer, and you can still get the true answer using exact() at the end. Your division would be simpler as return exact(x1/x2);, but you don't even need to call exact that often, just exact(Euler(171))
Computing the value as (1/3)*(2/5)*(3/7) instead of (1*2*3)/(3*5*7) would avoid having an overflow so soon.

Related

Finding maximum

Given an integer n and array a. Finding maximum of (a[i]+a[j])*(j-i) with 1<=i<=n-1 and i+1<=j<=n
Example:
Input
5
1 3 2 5 4
Output
21
Explanation :With i=2 and j=5, we have the maximum of (a[i]+a[j])*(j-i) is (3+4)*(5-2)=21
Constraints:
n<=10^6
a[i]>0 with 1<=i<=n
I can solve this problem with n<=10^4, but what should I do if n is too large, like the constraints?
First, let's reference the "brute force" force algorithm. This will have some issues, that I will call out below, but it is a correct solution.
struct Result
{
size_t i;
size_t j;
int64_t value;
};
Result findBestBruteForce(const vector<int>& a)
{
size_t besti = 0;
size_t bestj = 0;
int64_t bestvalue = INT64_MIN;
for (size_t i = 0; i < a.size(); i++)
{
for (size_t j = i + 1; j < a.size(); j++)
{
// do the math in 64-bit space to avoid overflow
int64_t value = (a[i] + (int64_t)a[j]) * (j - i);
if (value > bestvalue)
{
bestvalue = value;
besti = i;
bestj = j;
}
}
}
return { besti, bestj, bestvalue };
}
The problem with the above code is that it runs at O(N²). Or more precisely, for the the N iterations of the outer for-loop (where i goes from 0 to N), there are an average of N/2 iterations on the inner for-loop. If N is small, this isn't a problem.
On my PC, with full optimizations turned on. When is N under 20000, the run time is less than a second. Once N approaches 100000, it takes several seconds to process the 5 billion iterations. Let's just go with a "billion operations per second" as an expected rate. If N were to 1000000, the maximum as the OP outlined, it would probably take 500 seconds. Such is the nature of a N-squared algorithm.
So how can we speed it up? Here's an interesting observation. Let's say our array was this:
10 5 4 15 13 100 101 6
On the first iteration of the outer loop above, where i=0, we'd be computing this on each iteration of the inner loop:
for each j: (a[0]+a[j])(j-0)
for each j: (10+a[j])(j-0)
for each j: [15*1, 14*2, 25*3, 23*4, 1000*5, 1010*6, 16*6]
= [15, 28, 75, 92, 5000, 6060, 96]
Hence, for when i=0, a[i] = 15 and the largest value computed from that set is 6060.
Since A[0] is 15, and we're tracking a current "best" value, there's no incentive to iterate all the values again for i=1 since a[1]==14 is less than 15. There's no j index that would compute a value of (a[1]+a[j])*(j-1) larger than what's already been found. Because (14+a[j])*(j-1) will always be less than (15+a[j])*(j-1). (Assumes all values in the array are non-negative).
So to generalize, the outer loop can skip over any index of i where A[best_i] > A[i]. And that's a real simple alteration to our above code:
Result findBestOptimized(const std::vector<int>& a)
{
if (a.size() < 2)
{
return {0,0,INT64_MIN};
}
size_t besti = 0;
size_t bestj = 0;
int64_t bestvalue = INT64_MIN;
int minimum = INT_MIN;
for (size_t i = 0; i < a.size(); i++)
{
if (a[i] <= minimum)
{
continue;
}
for (size_t j = i + 1; j < a.size(); j++)
{
int64_t value = (a[i] + (int64_t)a[j]) * (j - i);
if (value > bestvalue)
{
bestvalue = value;
besti = i;
bestj = j;
minimum = a[i];
}
}
}
return { besti, bestj, bestvalue };
}
Above, we introduce a minimum value for A[i] to be before considering doing the full inner loop enumeration.
I benchmarked this with build optimizations on. On a random array of a million items, it runs in under a second.
But wait... there's another optimization!
If the inner loop fails to find an index j such that value > bestvalue, then we already know that the current A[i] is greater than minimum. Hence, we can increment minimum to A[i] regardless at the end of the inner loop.
Now, I'll present the final solution:
Result findBestOptimizedEvenMore(const std::vector<int>& a)
{
if (a.size() < 2)
{
return { 0,0,INT64_MIN };
}
size_t besti = 0;
size_t bestj = 0;
int64_t bestvalue = INT64_MIN;
int minimum = INT_MIN;
for (size_t i = 0; i < a.size(); i++)
{
if (a[i] <= minimum)
{
continue;
}
for (size_t j = i + 1; j < a.size(); j++)
{
int64_t value = (a[i] + (int64_t)a[j]) * (j - i);
if (value > bestvalue)
{
bestvalue = value;
besti = i;
bestj = j;
}
}
minimum = a[i]; // since we know a[i] > minimum, we can do this
}
return { besti, bestj, bestvalue };
}
I benchmarked the above solution on different array sizes from N=100 to N=1000000. It does all iterations in under 25 milliseconds.
In the above solution, there's likely a worst case runtime of O(N²) again when all the items in the array are in ascending order. But I believe the average case should be on the order of O(N lg N) or better. I'll do some more analysis later if anyone is interested.
Note: Some notation for variables and the Result class in the code have been copied from #selbie's excellent answer.
Here's another O(n^2) worst-case solution with (likely provable) O(n) expected performance on random permutations and room for optimization.
Suppose [i, j] are our array bounds for an optimal pair. By the problem definition, this means all elements left of i must be strictly less than A[i], and all elements right of j must be strictly less than A[j].
This means we can compute the left-maxima of A: all elements strictly greater than all previous elements, as well as the right-maxima of A. Then, we only need to consider left endpoints from the left-maxima and right endpoints from the right-maxima.
I don't know the expectation of the product of the sizes of left and right maxima sets, but we can get an upper bound. The size of left maxima is at most the size of the longest increasing subsequence (LIS) of A. The right maxima are at most the size of the longest decreasing subsequence. These aren't independent, but I'm taking as an (unproven) assumption that the LIS and LDS lengths are inversely correlated with each other for random permutations. The right-maxima must start after the left-maxima end, so this seems like a safe assumption.
The length of the LIS for random permutations follows the Tracy-Widom distribution, so it has mean sqrt(2N) and standard deviation N^(-1/6). The expected square of the size is therefore 2N + 1/(N^1/3) so ~2N. This isn't exactly the proof we wanted, since you'd need to sum over the partial density function to be rigorous, but the LIS is already an upper bound on the left-maxima size, so I think the conclusion is still true.
C++ code (Result class and some variable names taken from selbie's post, as mentioned):
struct Result
{
size_t i;
size_t j;
int64_t value;
};
Result find_best_sum_size_product(const std::vector<int>& nums)
{
/* Given: list of positive integers nums
Returns: Tuple with (best_i, best_j, best_product)
where best_i and best_j maximize the product
(nums[i]+nums[j])*(j-i) over 0 <= i < j < n
Runtime: O(n^2) worst case,
O(n) average on random permutations.
*/
int n = nums.size();
if (n < 2)
{
return {0,0,INT64_MIN};
}
std::vector<int> left_maxima_indices;
left_maxima_indices.push_back(0);
for (int i = 1; i < n; i++){
if (nums.at(i) > nums.at(left_maxima_indices.back())) {
left_maxima_indices.push_back(i);
}
}
std::vector<int> right_maxima_indices;
right_maxima_indices.push_back(n-1);
for (int i = n-1; i >= 0; i--){
if (nums.at(i) > nums.at(right_maxima_indices.back())) {
right_maxima_indices.push_back(i);
}
}
size_t best_i = 0;
size_t best_j = 0;
int64_t best_product = INT64_MIN;
int i = 0;
int j = 0;
for (size_t left_idx = 0;
left_idx < left_maxima_indices.size();
left_idx++)
{
i = left_maxima_indices.at(left_idx);
for (size_t right_idx = 0;
right_idx < right_maxima_indices.size();
right_idx++)
{
j = right_maxima_indices.at(right_idx);
if (i == j) continue;
int64_t value = (nums.at(i) + (int64_t)nums.at(j)) * (j - i);
if (value > best_product)
{
best_product = value;
best_i = i;
best_j = j;
}
}
}
return { best_i, best_j, best_product };
}
I started from the two excellent answers by #selbie and #kcsquared.
Their solutions gave impressive results for random inputs. What was not clear is the worst case behavior.
What sequence would correspsond to the worst case?
I finally found a critial sequence for these two answers, a triangle sequence: this sequence slightly increases up to a max, and then slightly decrease. With such a sequence and n=10^5 for example, these answers take more than 10s.
My solutions starts from #selbie solution and add two improvements:
I add #kcsquared's trick: on the right (of j), they can be only lower elements
When considering a new left element a[i], it is useless to start from i + 1 to get the second element. We can start from the current best_j
With these tricks, I was able to improve the two posted answer performances a little bit. However, it still
fails to solve the triangle sequence issue: about 10s for n = 10^5.
#include <iostream>
#include <vector>
#include <string>
#include <cstdlib>
#include <ctime>
#include <chrono>
struct Result {
size_t i;
size_t j;
int64_t value;
};
void print (const Result& res, const std::string& prefix = "") {
std::cout << prefix;
std::cout << "(" << res.i << ", " << res.j << ") -> " << res.value << std::endl;
}
Result findBest(const std::vector<int>& a) {
if (a.size() < 2) {
return { 0, 0, INT64_MIN };
}
int n = a.size();
std::vector<int> next_max(n, -1);
int current_max = n-1;
for (int i = n-1; i >= 0; --i) {
if (a[i] > a[current_max]) {
current_max = i;
}
next_max[i] = current_max;
}
size_t besti = 0;
size_t bestj = 0;
int64_t bestvalue = INT64_MIN;
int minimum = INT_MIN;
for (size_t i = 0; i < a.size(); i++) {
if (a[i] <= minimum) {
continue;
}
minimum = a[i];
size_t jmin = (bestj > i) ? bestj : i+1;
for (size_t j = jmin; j < a.size(); j++) {
j = next_max[j];
value = (a[i] + (int64_t)a[j]) * (j - i);
if (value > bestvalue) {
bestvalue = value;
besti = i;
bestj = j;
}
}
}
return { besti, bestj, bestvalue };
}
int main() {
int n = 1000000;
int vmax = 100000000;
std::vector<int> A (n);
std::srand(std::time(0));
for (int i = 0; i < n; ++i) {
A[i] = rand() % vmax + 1;
}
std::cout << "n = " << n << std::endl;
auto t0 = std::chrono::high_resolution_clock::now();
auto res = findBest (A);
auto t1 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count();
print (res, "Random: ");
std::cout << "time = " << duration/1000 << " ms" << std::endl;
int i_max = n/2;
for (int i = 0; i < i_max; ++i) A[i] = i+1;
A[i_max] = 10 * i_max;
for (int i = i_max+1; i < n; ++i) {
A[i] = 2*i_max - i;
}
t0 = std::chrono::high_resolution_clock::now();
res = findBest (A);
t1 = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count();
print (res, "Triangle sequence: ");
std::cout << "time = " << duration/1000 << " ms" << std::endl;
return 0;
}

Trying to implement Durand-Kerner-Method in C++ using Matrices

My implementation of the Durand-Kerner-Method (https://en.wikipedia.org/wiki/Durand%E2%80%93Kerner_method) does not seem to work. I believe (see following code) that I am not calculating new approximation correctly in the algorithm part itself. I cannot seem to be able to fix the problem. Very grateful for any advice.
#include <complex>
#include <cmath>
#include <vector>
#include <iostream>
#include "DurandKernerWeierstrass.h"
using namespace std;
using Complex = complex<double>;
using vec = vector<Complex>;
using Matrix = vector<vector<Complex>>;
//PRE: Recieves input value of polynomial, degree and coefficients
//POST: Outputs y(x) value
Complex Polynomial(vec Z, int n, Complex x) {
Complex y = pow(x, n);
for (int i = 0; i < n; i++){
y += Z[i] * pow(x, (n - i - 1));
}
return y;
}
/*PRE: Takes a test value, degree of polynomial, vector of coefficients and the desired
precision of polynomial roots to calculate the roots*/
//POST: Outputs the roots of Polynomial
Matrix roots(vec Z, int n, int iterations, const double precision) {
Complex z = Complex(0.4, 0.9);
Matrix P(iterations, vec(n, 0));
Complex w;
//Creating Matrix with initial starting values
for (int i = 0; i < n; i++) {
P[0][i] = pow(z, i);
}
//Durand Kerner Algorithm
for (int col = 0; col < iterations; col++) {
*//I believe this is the point where everything is going wrong*
for (int row = 0; row < n; row++) {
Complex g = Polynomial(Z, n, P[col][row]);
for (int k = 0; k < n; k++) {
if (k != row) {
g = g / (P[col][row] - P[col][k]);
}
}
P[col][row] -= g;
}
return P;
}
}
The following Code is the code I am using to test the function:
int main() {
//Initializing section
vec A = {1, -3, 3,-5 };
int n = 3;
int iterations = 10;
const double precision = 1.0e-10;
Matrix p = roots(A, n, iterations,precision);
for (int i = 0; i < iterations; i++) {
for (int j = 0; j < n; j++) {
cout << "p[" << i << "][" << j << "] = " << p[i][j] << " ";
}
cout << endl;
}
return 0;
}
Important to note the Durand-Kerner-Algorithm is connected to a header file which is not included in this code.
Your problem is that you do not transcribe the new values into the next data record with index col+1. Thus in the next loop you start again with a data set of zero entries. Change to
P[col+1][row] = P[col][row] - g;
If you want to use the new improved approximation immediately for all following approximations, then use
P[col+1][row] = (P[col][row] -= g);
Then the data sets all contain the next approximations, especially the first one will no longer contain the initially set powers.

How to distribute numbers by a certain percentage randomly within a matr

I want to distribute numbers by a certain percentage randomly within a matrix.For example I have a matrix 150*150 and I want to Fill out with 0,1,2
randomly with percentage like this 10% for 0 30% for 2 and 60% for 1.What should I do.actually I did something but without percentage but it didn't work perfectly.
for (int i = 0; i < 151 i++) {
for (int j = 0; j <151; j++) {
if (random(100) < 10) {
Array(i, j) = 1;
}
if (random(50) < 10) {
Array(i, j) = 2;
}
}
}
Since C++11, the Standard Library provides the function std::discrete_distribution, defined in header <random>, which
produces random integers on the interval [0, n), where the probability of each individual integer i is defined as w
i/S, that is the weight of the ith integer divided by the sum of all n weights.
Given OP's percentages:
std::discrete_distribution<int> d({10, 60, 30});
HERE, a testable code snippet.
I'm quite sure that this is not the most efficient way to approach this problem and my beginner c++ code should never be used as it's, but still if you want a reference, this is how i did it :
#include <iostream>
#include <random>
#include <tuple>
#include <vector>
#define ROWS 5
#define COLS 5
using tuple = std::tuple<int, int>;
const int Percentage(const int value)
{
const int percent = std::round((value / 100.0) * (ROWS * COLS));
std::cout << value << "% of " << ROWS * COLS << " : " << percent << std::endl;
return percent;
}
const int RandomIndex(const int& size)
{
std::mt19937 range;
range.seed(std::random_device()());
std::uniform_int_distribution<std::mt19937::result_type> dist(0, size);
return dist(range);
}
void FillMatrix(int matr[][COLS], std::vector<tuple>& num)
{
// holds the numbers, from which a random number
// will be stored to the matrix
std::vector<int> fillers;
// holds the random index among the fillers
uint8_t random_index;
for (int i = 0; i < ROWS; i++) {
for (int j = 0; j < COLS; j++) {
/*
* check if the count of a particular number to be added to
* the matrix is zero or not.
* if zero : then dont append to filler vector
* else : append to filler vector
*/
for (tuple item : num) {
if (std::get<1>(item) != 0) {
fillers.emplace_back(std::get<0>(item));
}
}
// get the index of a random item in fillers vector
random_index = RandomIndex(fillers.size() - 1);
// insert this random element to matrix
matr[i][j] = fillers[random_index];
/*
* find the percentage value(or count) of the number
* corresponding to the random number and decrement it
* so as to denote that it has been used.
*/
for (tuple& item : num) {
if (std::get<0>(item) == fillers[random_index]) {
std::get<1>(item) -= 1;
}
}
// clear the current fillers vector
fillers.clear();
}
}
}
int main()
{
int matrix[ROWS][COLS];
// each tuple has a number and it's corresponding percentage
std::vector<tuple> numbers = {tuple(0, Percentage(10)),
tuple(1, Percentage(30)),
tuple(2, Percentage(60))};
// fill the matrix with values provided in the vector
FillMatrix(matrix, numbers);
// print the matrix
for (int i = 0; i < ROWS; i++) {
for (int j = 0; j < COLS; j++) {
std::cout << matrix[i][j] << "\t";
}
std::cout << "\n";
}
return 0;
}
define ROWS and COLS as 150 in your case.

Algorithm to compute mode

I'm trying to devise an algorithm in the form of a function that accepts two parameters, an array and the size of the array. I want it to return the mode of the array and if there are multiple modes, return their average. My strategy was to take the array and first sort it. Then count all the occurrences of a number. while that number is occurring, add one to counter and store that count in an array m. So m is holding all the counts and another array q is holding the last value we were comparing.
For example: is my list is {1, 1, 1, 1, 2, 2, 2}
then i would have m[0] = 4 q[0] = 1
and then m[1] = 3 and q[1] = 2.
so the mode is q[0] = 1;
unfortunately i have had no success thus far. hoping someone could help.
float mode(int x[],int n)
{
//Copy array and sort it
int y[n], temp, k = 0, counter = 0, m[n], q[n];
for(int i = 0; i < n; i++)
y[i] = x[i];
for(int pass = 0; pass < n - 1; pass++)
for(int pos = 0; pos < n; pos++)
if(y[pass] > y[pos]) {
temp = y[pass];
y[pass] = y[pos];
y[pos] = temp;
}
for(int i = 0; i < n;){
for(int j = 0; j < n; j++){
while(y[i] == y[j]) {
counter++;
i++;
}
}
m[k] = counter;
q[k] = y[i];
i--; //i should be 1 less since it is referring to an array subscript
k++;
counter = 0;
}
}
Even though you have some good answers already, I decided to post another. I'm not sure it really adds a lot that's new, but I'm not at all sure it doesn't either. If nothing else, I'm pretty sure it uses more standard headers than any of the other answers. :-)
#include <vector>
#include <algorithm>
#include <unordered_map>
#include <map>
#include <iostream>
#include <utility>
#include <functional>
#include <numeric>
int main() {
std::vector<int> inputs{ 1, 1, 1, 1, 2, 2, 2 };
std::unordered_map<int, size_t> counts;
for (int i : inputs)
++counts[i];
std::multimap<size_t, int, std::greater<size_t> > inv;
for (auto p : counts)
inv.insert(std::make_pair(p.second, p.first));
auto e = inv.upper_bound(inv.begin()->first);
double sum = std::accumulate(inv.begin(),
e,
0.0,
[](double a, std::pair<size_t, int> const &b) {return a + b.second; });
std::cout << sum / std::distance(inv.begin(), e);
}
Compared to #Dietmar's answer, this should be faster if you have a lot of repetition in the numbers, but his will probably be faster if the numbers are mostly unique.
Based on the comment, it seems you need to find the values which occur most often and if there are multiple values occurring the same amount of times, you need to produce the average of these. It seems, this can easily be done by std::sort() following by a traversal finding where values change and keeping a few running counts:
template <int Size>
double mode(int const (&x)[Size]) {
std::vector<int> tmp(x, x + Size);
std::sort(tmp.begin(), tmp.end());
int size(0); // size of the largest set so far
int count(0); // number of largest sets
double sum(0); // sum of largest sets
for (auto it(tmp.begin()); it != tmp.end(); ) {
auto end(std::upper_bound(it, tmp.end(), *it));
if (size == std::distance(it, end)) {
sum += *it;
++count;
}
else if (size < std::distance(it, end)) {
size = std::distance(it, end);
sum = *it;
count = 1;
}
it = end;
}
return sum / count;
}
If you simply wish to count the number of occurences then I suggest you use a std::map or std::unordered_map.
If you're mapping a counter to each distinct value then it's an easy task to count occurences using std::map as each key can only be inserted once. To list the distinct numbers in your list simply iterate over the map.
Here's an example of how you could do it:
#include <cstddef>
#include <map>
#include <algorithm>
#include <iostream>
std::map<int, int> getOccurences(const int arr[], const std::size_t len) {
std::map<int, int> m;
for (std::size_t i = 0; i != len; ++i) {
m[arr[i]]++;
}
return m;
}
int main() {
int list[7]{1, 1, 1, 1, 2, 2, 2};
auto occurences = getOccurences(list, 7);
for (auto e : occurences) {
std::cout << "Number " << e.first << " occurs ";
std::cout << e.second << " times" << std::endl;
}
auto average = std::accumulate(std::begin(list), std::end(list), 0.0) / 7;
std::cout << "Average is " << average << std::endl;
}
Output:
Number 1 occurs 4 times
Number 2 occurs 3 times
Average is 1.42857
Here's a working version of your code. m stores the values in the array and q stores their counts. At the end it runs through all the values to get the maximal count, the sum of the modes, and the number of distinct modes.
float mode(int x[],int n)
{
//Copy array and sort it
int y[n], temp, j = 0, k = 0, m[n], q[n];
for(int i = 0; i < n; i++)
y[i] = x[i];
for(int pass = 0; pass < n - 1; pass++)
for(int pos = 0; pos < n; pos++)
if(y[pass] > y[pos]) {
temp = y[pass];
y[pass] = y[pos];
y[pos] = temp;
}
for(int i = 0; i < n;){
j = i;
while (y[j] == y[i]) {
j++;
}
m[k] = y[i];
q[k] = j - i;
k++;
i = j;
}
int max = 0;
int modes_count = 0;
int modes_sum = 0;
for (int i=0; i < k; i++) {
if (q[i] > max) {
max = q[i];
modes_count = 1;
modes_sum = m[i];
} else if (q[i] == max) {
modes_count += 1;
modes_sum += m[i];
}
}
return modes_sum / modes_count;
}

Gauss Elimination for NxM matrix

/* Program to demonstrate gaussian <strong class="highlight">elimination</strong>
on a set of linear simultaneous equations
*/
#include <iostream>
#include <cmath>
#include <vector>
using namespace std;
const double eps = 1.e-15;
/*Preliminary pivoting strategy
Pivoting function
*/
double pivot(vector<vector<double> > &a, vector<double> &b, int i)
{
int n = a.size();
int j=i;
double t=0;
for(int k=i; k<n; k+=1)
{
double aki = fabs(a[k][i]);
if(aki>t)
{
t=aki;
j=k;
}
}
if(j>i)
{
double dummy;
for(int L=0; L<n; L+=1)
{
dummy = a[i][L];
a[i][L]= a[j][L];
a[j][L]= dummy;
}
double temp = b[j];
b[i]=b[j];
b[j]=temp;
}
return a[i][i];
}
/* Forward <strong class="highlight">elimination</strong> */
void triang(vector<vector<double> > &a, vector<double> &b)
{
int n = a.size();
for(int i=0; i<n-1; i+=1)
{
double diag = pivot(a,b,i);
if(fabs(diag)<eps)
{
cout<<"zero det"<<endl;
return;
}
for(int j=i+1; j<n; j+=1)
{
double mult = a[j][i]/diag;
for(int k = i+1; k<n; k+=1)
{
a[j][k]-=mult*a[i][k];
}
b[j]-=mult*b[i];
}
}
}
/*
DOT PRODUCT OF TWO VECTORS
*/
double dotProd(vector<double> &u, vector<double> &v, int k1,int k2)
{
double sum = 0;
for(int i = k1; i <= k2; i += 1)
{
sum += u[i] * v[i];
}
return sum;
}
/*
BACK SUBSTITUTION STEP
*/
void backSubst(vector<vector<double> > &a, vector<double> &b, vector<double> &x)
{
int n = a.size();
for(int i = n-1; i >= 0; i -= 1)
{
x[i] = (b[i] - dotProd(a[i], x, i + 1, n-1))/ a[i][i];
}
}
/*
REFINED GAUSSIAN <strong class="highlight">ELIMINATION</strong> PROCEDURE
*/
void gauss(vector<vector<double> > &a, vector<double> &b, vector<double> &x)
{
triang(a, b);
backSubst(a, b, x);
}
// EXAMPLE MAIN PROGRAM
int main()
{
int n;
cin >> n;
vector<vector<double> > a;
vector<double> x;
vector<double> b;
for (int i = 0; i < n; i++) {
vector<double> temp;
for (int j = 0; j < n; j++) {
int no;
cin >> no;
temp.push_back(no);
}
a.push_back(temp);
b.push_back(0);
x.push_back(0);
}
/*
for (int i = 0; i < n; i++) {
int no;
cin >> no;
b.push_back(no);
x.push_back(0);
}
*/
gauss(a, b, x);
for (size_t i = 0; i < x.size(); i++) {
cout << x[i] << endl;
}
return 0;
}
The above gaussian eleimination algorithm works fine on NxN matrices. But I need it to work on NxM matrix. Can anyone help me to do it? I am not very good at maths. I got this code on some website and i am stuck at it.
(optional) Understand this. Do some examples on paper.
Don't write code for Gaussian elimination yourself. Without some care, the naive gauss pivoting is unstable. You have to scale the lines and take care of pivoting with the greatest element, a starting point is there. Note that this advice holds for most linear algebra algorithms.
If you want to solve systems of equations, LU decomposition, QR decomposition (stabler than LU, but slower), Cholesky decomposition (in the case the system is symmetric) or SVD (in the case the system is not square) are almost always better choices. Gaussian elimination is best for computing determinants however.
Use the algorithms from LAPACK for the problems which need Gaussian elimination (eg. solving systems, or computing determinants). Really. Don't roll your own. Since you are doing C++, you may be interested in Armadillo which takes care of a lot of things for you.
If you must roll your own for pedagogical reasons, have a look first at Numerical Recipes, version 3. Version 2 can be found online for free if you're low on budget / have no access to a library.
As a general advice, don't code algorithms you don't understand.
You just cannot apply Gaussian elimination directly to an NxM problem. If you have more equations than unknowns, the your problem is over-determined and you have no solution, which means you need to use something like the least squares method. Say that you have A*x = b, then instead of having x = inv(A)*b (when N=M), then you have to do x = inv(A^T*A)*A^T*b.
In the case where you have less equations then unknowns, then your problem is underdetermined and you have an infinity of solutions. In that case, you either pick one at random (e.g. setting some of the unknowns to an arbitrary value), or you need to use regularization, which means trying adding some extra constraints.
You can apply echelon reduction, like in this snippet
#include <iostream>
#include <algorithm>
#include <vector>
#include <iomanip>
using namespace std;
/*
A rectangular matrix is in echelon form(or row echelon form) if it has the following
three properties :
1. All nonzero rows are above any rows of all zeros.
2. Each leading entry of a row is in a column to the right of the leading entry of
the row above it.
3. All entries in a column below a leading entry are zeros.
If a matrix in echelon form satisfies the following additional conditions,
then it is in reduced echelon form(or reduced row echelon form) :
4. The leading entry in each nonzero row is 1.
5. Each leading 1 is the only nonzero entry in its column.
*/
template <typename C> void print(const C& c) {
for (const auto& e : c) {
cout << setw(10) << right << e;
}
cout << endl;
}
template <typename C> void print2(const C& c) {
for (const auto& e : c) {
print(e);
}
cout << endl;
}
// input matrix consists of rows, which are vectors of double
vector<vector<double>> Gauss::Reduce(const vector<vector<double>>& matrix)
{
if (matrix.size() == 0)
throw string("Empty matrix");
auto A{ matrix };
auto mima = minmax_element(A.begin(), A.end(), [](const vector<double>& a, const vector<double>& b) {return a.size() < b.size(); });
auto mi = mima.first - A.begin(), ma = mima.second - A.begin();
if (A[mi].size() != A[ma].size())
throw string("All rows shall have equal length");
size_t height = A.size();
size_t width = A[0].size();
if (width == 0)
throw string("Only empty rows");
for (size_t row = 0; row != height; row++) {
cout << "processing row " << row << endl;
// Search for maximum below current row in column row and move it to current row; skip this step on the last one
size_t col{ row }, maxRow{ 0 };
// find pivot for current row (partial pivoting)
while (col < width)
{
maxRow = distance(A.begin(), max_element(A.begin() + row, A.end(), [col](const vector<double>& rowVectorA, const vector<double>& rowVectorB) {return abs(rowVectorA[col]) < abs(rowVectorB[col]); }));
if (A[maxRow][col] != 0) // nonzero in this row and column or below found
break;
++col;
}
if (col == width) // e.g. in current row and below all entries are zero
break;
if (row != maxRow)
{
swap(A[row], A[maxRow]);
cout << "swapped " << row << " and " << maxRow;
}
cout << " => leading entry in column " << col << endl;
print2(A);
// here col >= row holds; col is the column of the leading entry e.g. first nonzero column in current row
// moreover, all entries to the left and below are zeroed
if (row+1 < height)
cout << "processing column " << col << endl;
// Make in all rows below this one 0 in current column
for (size_t rowBelow = row + 1; rowBelow < height; rowBelow++) {
// subtract product of current row by factor
double factor = A[rowBelow][col] / A[row][col];
cout << "processing row " << rowBelow << " below the current; factor is " << factor << endl;
if (factor == 0)
continue;
for (size_t colRight{ col }; colRight < width; colRight++)
{
auto d = A[rowBelow][colRight] - factor * A[row][colRight];
A[rowBelow][colRight] = abs(d) < DBL_EPSILON ? 0 : d;
}
print(A[rowBelow]);
}
}
// the matrix A is in echelon form now
cout << "matrix in echelon form" << endl;
print2(A);
// reduced echelon form follows (backward phase)
size_t row(height-1);
auto findPivot = [&row, A] () -> size_t {
do
{
auto pos = find_if(A[row].begin(), A[row].end(), [](double d) {return d != 0; });
if (pos != A[row].end())
return pos - A[row].begin();
} while (row-- > 0);
return A[0].size();
};
do
{
auto col = findPivot();
if (col == width)
break;
cout << "processing row " << row << endl;
if (A[row][col] != 1)
{
//scale row row to make element at [row][col] equal one
auto f = 1 / A[row][col];
transform(A[row].begin()+col, A[row].end(), A[row].begin()+col, [f](double d) {return d * f; });
}
auto rowAbove{ row};
while (rowAbove > 0)
{
rowAbove--;
double factor = A[rowAbove][col];
if (abs(factor) > 0)
{
for (auto colAbove{ 0 }; colAbove < width; colAbove++)
{
auto d = A[rowAbove][colAbove] - factor * A[row][colAbove];
A[rowAbove][colAbove] = abs(d) < DBL_EPSILON ? 0 : d;
}
cout << "transformed row " << rowAbove << endl;
print(A[rowAbove]);
}
}
} while (row-- > 0);
return A;
}