Given array of integers, find the maximal possible sum of some of its
k consecutive elements.
Example
For inputArray = [2, 3, 5, 1, 6]
and k = 2, the output should be
solution(inputArray, k) = 8.
So my program kind of works, at least the test cases that I have seen, except for the fact that it skips the first element.
There are probably multiple ways of solving it. Either by inserting a copy of the first element to the array or make a seperate loop that loops through the first check (2+3 = 5). But none of these solutions really seem to be elegant enough. I want to solve this the best possible way and I cant seem to a good solution. This is my code:
vector<int> arr = {1, 3, 4, 2, 4, 19, 1};
int sum {};
int max {};
int k = 3;
for (int i {}; i < arr.size(); ++i)
{
sum = 0;
int x = k;
for (int j = i+1; j < arr.size(); ++j)
{
sum += arr.at(j);
--x;
if (x == 0)
{
cout << sum << endl;
break;
}
}
if (sum > max)
{
max = sum;
}
}
cout << max << endl;
As you can see my inner for loop starts with the index j+1 so it skips the first index in the vector by default. How do I fix this? Is there an if-statement I can do to manipulate the loop to only have j = i+1 if i != 0?
Is there an if-statement I can do to manipulate the loop to only have j = i+1 if i != 0?
You can use a ternary operator to do it, something like that:
int j = i != 0 ? i + 1 : i;
Structure of a ternary operator:
But, I got curious: why you don't add the value i to the sum (so it's always part of the sum), instead of starting it with zero?
sum = array[i];
There are probably multiple ways of solving it. [...] I want to solve this the best possible way.
Then, consider a O(N) algorithm, instead of a O(N^2) one:
#include <iostream>
#include <vector>
auto max_sum_of_k(std::vector<int> const& v, size_t k)
{
// Sum the first k elements.
long long current_sum{};
size_t i{};
for ( ; i < k and i < v.size(); ++i )
{
current_sum += v[i];
}
// Update the running sum, without a nested loop.
long long sum{ current_sum };
for ( ; i < v.size(); ++i )
{
current_sum -= v[i - k];
current_sum += v[i];
if ( sum < current_sum )
sum = current_sum;
}
return sum;
}
int main()
{
std::vector<int> arr = {1, 3, 4, 2, 4, 19, 1};
for (size_t k{}; k <= arr.size(); ++k)
{
std::cout << "k: " << k << " max sum: " << max_sum_of_k(arr, k) << '\n';
}
}
First note that this task is only possible if k does not exceed the array length.
Now an efficient solution is to
compute the sum of the first k elements, then
repeatedly remove the first element from the sum and add the next one. This makes a significant saving.
int j;
int Sum= A[0];
for (j= 1; j < k; j++)
Sum+= A[j];
// Here we have the first sum
for ( ; j < length; j++)
{
Sum-= A[j - k];
Sum+= A[j];
// Here we have the next sums
}
I leave you as an exercise to keep the maximum sum.
Notice that the sum update trick is not recommended for floating-point types, due to the accumulation of numerical errors.
A bit fancy way to do it, using templates:
template <typename In>
auto sum_at_most_n(In b, In e, size_t n) {
typename std::iterator_traits<In>::value_type sum{};
while (b != e && n--) {
sum = sum + *b++;
}
return std::pair{sum, b};
}
template <typename In>
auto max_sum_of_k(In b, In e, size_t k) {
auto [sum, head] = sum_at_most_n(b, e, k);
auto max_sum = sum;
while (head != e) {
sum = sum - *b++ + *head++;
max_sum = std::max(max_sum, sum);
}
return max_sum;
}
template <typename Container>
auto max_sum_of_k(Container c, size_t k) ->
typename std::iterator_traits<decltype(std::begin(c))>::value_type {
return max_sum_of_k(std::begin(c), std::end(c), k);
}
Just iterating over elements and then subtraction elements which are no longer part of the sum,
but I really recommend to learn how to write tests. See link with demo:
Demo
Related
I have a sorted array with length n where 1 < n <= 1e5, how can I find the kth smallest difference between two elements in the array?
For example, I have {1,4,9,16} and k equal 5, then I have differences {3,5,7,8,12,15} and the result is 12.
I couldn't find any solution other than finding all differences between two elements, this algorithm will take Θ(n2).
It is unclear to me how you intend to handle duplicate differences. Consider the array {1, 2, 3, 4}. Do you say that the differences are {1, 2, 3}? Or would you say that they are {1, 1, 1, 2, 2, 3}?
If the latter, then the following code will take average time O(n log(n)) and worst case time O(n log(n)^2). It is based on a binary search of the differences.
I am ahem not a C++ programmer.
#include <iostream>
#include <vector>
#include <utility>
using namespace std;
template <typename my_type>
my_type kth_diff(my_type a[], int n, int k) {
// {j, {m, n}} represents a[m] - a[j], a[m+1] - a[j], ..., a[n] - a[j]
vector<pair<int, pair<int, int>>> diff_range;
for (int i = 0; i+1 < n; i++) {
diff_range.push_back({i, {i+1, n-1}});
}
while (0 < diff_range.size()) {
int i = diff_range[0].first;
int j = (diff_range[0].second.first + diff_range[0].second.second)/2;
my_type pivot = a[j] - a[i];
// And back up over the max values that make a pivot.
while (0 < j && a[j-1] == a[j]) {
j--;
}
int count_below = 0;
int count_at = 0;
vector<pair<int, pair<int, int>>> diff_range_low;
vector<pair<int, pair<int, int>>> diff_range_high;
vector<pair<int, pair<int, int>>>::iterator it;
for (it = diff_range.begin(); it != diff_range.end(); it++) {
i = it->first;
j = max(it->second.first, j);
while (j < n && a[j] - a[i] < pivot) {
j++;
}
count_below += j - it->second.first;
if (it->second.first < j) {
// If the pivot is too small, use this.
diff_range_low.push_back({i, {it->second.first, j-1}});
}
while (j < n && a[j] - a[i] == pivot) {
j++;
count_at++;
}
if (j <= it->second.second) {
// If the pivot is too big, use this.
diff_range_high.push_back({i, {j, it->second.second}});
}
}
if (count_below + count_at <= k) {
// We only need to count ranges past the pivot.
diff_range = diff_range_high;
// Keep track of the number below that are accounted for.
k -= count_below + count_at;
}
else if (k < count_below) {
// We only need to count ranges before the pivot.
diff_range = diff_range_low;
}
else {
return pivot;
}
}
return a[0];
}
int main() {
int a[] = {1,4,9,16};
int n = sizeof(a) / sizeof(a[0]);
for (int k = 0; k < n*(n-1)/2; k++) {
cout << k << "\t" << kth_diff(a, n, k) << endl;
}
}
I look at this problem like this.
For [a,b,c,d] such a<b<c<d and x,y,z>0 and b = a+x, c = b+y, d=c+z, then b-a = x, c-b = y, c-a = x+y, d-b = y+z, d-a = x+y+z.
What does it tell us? The more apart are the elements the bigger the difference, and it adds up with every step.
It is unknown if x<y or x>y but for sure x+y>x and x+y>y, so your differences can be divided into distance classes.
diff_1 = {d-c,c-b,b-a}, diff_2 = {d-b,c-a}, diff_3={d-a}
now what you probably can see by now is min(diff_1) < min(diff_2) < min(diff_3), so to find the second smallest difference you don't need to check for min(diff_3) because it is at best the third smallest element.
So what you do is to implement something like this pseudocode:
int findLeastDiff<k>(std::vector<int> v)
{
assert(v_contains_k_distinct_elements(v));
std::vector<int> result;
std::sort(v.begin(),v.end()); // O(nlog(n));
v.erase(std::unique(v.begin(),v.end()),v.end()); // O(n)
for<<int i=1; i<=k; ++i>>
{
adjacent_difference<i>(v.begin(), v.end(), std::back_inserter(result));// O(n-i)
} // O(k(n-k/2)) = O(k*n)
std::sort(result.begin(), result.begin()); // O(nlog(n));
result.erase(std::unique(result.begin(),result.end()),result.end()); // O(n)
return result[k];
}
The above is just a concept and for sure can be optimized a lot. What is the complexity? O(nlog(n)+n+k*n+nlog(n)+n) = O((2log(n)+k+2)*n)
It probably can be optimized with some clever way of reducing the search space by removing ranges with already too big differances.
Given an integer n and array a. Finding maximum of (a[i]+a[j])*(j-i) with 1<=i<=n-1 and i+1<=j<=n
Example:
Input
5
1 3 2 5 4
Output
21
Explanation :With i=2 and j=5, we have the maximum of (a[i]+a[j])*(j-i) is (3+4)*(5-2)=21
Constraints:
n<=10^6
a[i]>0 with 1<=i<=n
I can solve this problem with n<=10^4, but what should I do if n is too large, like the constraints?
First, let's reference the "brute force" force algorithm. This will have some issues, that I will call out below, but it is a correct solution.
struct Result
{
size_t i;
size_t j;
int64_t value;
};
Result findBestBruteForce(const vector<int>& a)
{
size_t besti = 0;
size_t bestj = 0;
int64_t bestvalue = INT64_MIN;
for (size_t i = 0; i < a.size(); i++)
{
for (size_t j = i + 1; j < a.size(); j++)
{
// do the math in 64-bit space to avoid overflow
int64_t value = (a[i] + (int64_t)a[j]) * (j - i);
if (value > bestvalue)
{
bestvalue = value;
besti = i;
bestj = j;
}
}
}
return { besti, bestj, bestvalue };
}
The problem with the above code is that it runs at O(N²). Or more precisely, for the the N iterations of the outer for-loop (where i goes from 0 to N), there are an average of N/2 iterations on the inner for-loop. If N is small, this isn't a problem.
On my PC, with full optimizations turned on. When is N under 20000, the run time is less than a second. Once N approaches 100000, it takes several seconds to process the 5 billion iterations. Let's just go with a "billion operations per second" as an expected rate. If N were to 1000000, the maximum as the OP outlined, it would probably take 500 seconds. Such is the nature of a N-squared algorithm.
So how can we speed it up? Here's an interesting observation. Let's say our array was this:
10 5 4 15 13 100 101 6
On the first iteration of the outer loop above, where i=0, we'd be computing this on each iteration of the inner loop:
for each j: (a[0]+a[j])(j-0)
for each j: (10+a[j])(j-0)
for each j: [15*1, 14*2, 25*3, 23*4, 1000*5, 1010*6, 16*6]
= [15, 28, 75, 92, 5000, 6060, 96]
Hence, for when i=0, a[i] = 15 and the largest value computed from that set is 6060.
Since A[0] is 15, and we're tracking a current "best" value, there's no incentive to iterate all the values again for i=1 since a[1]==14 is less than 15. There's no j index that would compute a value of (a[1]+a[j])*(j-1) larger than what's already been found. Because (14+a[j])*(j-1) will always be less than (15+a[j])*(j-1). (Assumes all values in the array are non-negative).
So to generalize, the outer loop can skip over any index of i where A[best_i] > A[i]. And that's a real simple alteration to our above code:
Result findBestOptimized(const std::vector<int>& a)
{
if (a.size() < 2)
{
return {0,0,INT64_MIN};
}
size_t besti = 0;
size_t bestj = 0;
int64_t bestvalue = INT64_MIN;
int minimum = INT_MIN;
for (size_t i = 0; i < a.size(); i++)
{
if (a[i] <= minimum)
{
continue;
}
for (size_t j = i + 1; j < a.size(); j++)
{
int64_t value = (a[i] + (int64_t)a[j]) * (j - i);
if (value > bestvalue)
{
bestvalue = value;
besti = i;
bestj = j;
minimum = a[i];
}
}
}
return { besti, bestj, bestvalue };
}
Above, we introduce a minimum value for A[i] to be before considering doing the full inner loop enumeration.
I benchmarked this with build optimizations on. On a random array of a million items, it runs in under a second.
But wait... there's another optimization!
If the inner loop fails to find an index j such that value > bestvalue, then we already know that the current A[i] is greater than minimum. Hence, we can increment minimum to A[i] regardless at the end of the inner loop.
Now, I'll present the final solution:
Result findBestOptimizedEvenMore(const std::vector<int>& a)
{
if (a.size() < 2)
{
return { 0,0,INT64_MIN };
}
size_t besti = 0;
size_t bestj = 0;
int64_t bestvalue = INT64_MIN;
int minimum = INT_MIN;
for (size_t i = 0; i < a.size(); i++)
{
if (a[i] <= minimum)
{
continue;
}
for (size_t j = i + 1; j < a.size(); j++)
{
int64_t value = (a[i] + (int64_t)a[j]) * (j - i);
if (value > bestvalue)
{
bestvalue = value;
besti = i;
bestj = j;
}
}
minimum = a[i]; // since we know a[i] > minimum, we can do this
}
return { besti, bestj, bestvalue };
}
I benchmarked the above solution on different array sizes from N=100 to N=1000000. It does all iterations in under 25 milliseconds.
In the above solution, there's likely a worst case runtime of O(N²) again when all the items in the array are in ascending order. But I believe the average case should be on the order of O(N lg N) or better. I'll do some more analysis later if anyone is interested.
Note: Some notation for variables and the Result class in the code have been copied from #selbie's excellent answer.
Here's another O(n^2) worst-case solution with (likely provable) O(n) expected performance on random permutations and room for optimization.
Suppose [i, j] are our array bounds for an optimal pair. By the problem definition, this means all elements left of i must be strictly less than A[i], and all elements right of j must be strictly less than A[j].
This means we can compute the left-maxima of A: all elements strictly greater than all previous elements, as well as the right-maxima of A. Then, we only need to consider left endpoints from the left-maxima and right endpoints from the right-maxima.
I don't know the expectation of the product of the sizes of left and right maxima sets, but we can get an upper bound. The size of left maxima is at most the size of the longest increasing subsequence (LIS) of A. The right maxima are at most the size of the longest decreasing subsequence. These aren't independent, but I'm taking as an (unproven) assumption that the LIS and LDS lengths are inversely correlated with each other for random permutations. The right-maxima must start after the left-maxima end, so this seems like a safe assumption.
The length of the LIS for random permutations follows the Tracy-Widom distribution, so it has mean sqrt(2N) and standard deviation N^(-1/6). The expected square of the size is therefore 2N + 1/(N^1/3) so ~2N. This isn't exactly the proof we wanted, since you'd need to sum over the partial density function to be rigorous, but the LIS is already an upper bound on the left-maxima size, so I think the conclusion is still true.
C++ code (Result class and some variable names taken from selbie's post, as mentioned):
struct Result
{
size_t i;
size_t j;
int64_t value;
};
Result find_best_sum_size_product(const std::vector<int>& nums)
{
/* Given: list of positive integers nums
Returns: Tuple with (best_i, best_j, best_product)
where best_i and best_j maximize the product
(nums[i]+nums[j])*(j-i) over 0 <= i < j < n
Runtime: O(n^2) worst case,
O(n) average on random permutations.
*/
int n = nums.size();
if (n < 2)
{
return {0,0,INT64_MIN};
}
std::vector<int> left_maxima_indices;
left_maxima_indices.push_back(0);
for (int i = 1; i < n; i++){
if (nums.at(i) > nums.at(left_maxima_indices.back())) {
left_maxima_indices.push_back(i);
}
}
std::vector<int> right_maxima_indices;
right_maxima_indices.push_back(n-1);
for (int i = n-1; i >= 0; i--){
if (nums.at(i) > nums.at(right_maxima_indices.back())) {
right_maxima_indices.push_back(i);
}
}
size_t best_i = 0;
size_t best_j = 0;
int64_t best_product = INT64_MIN;
int i = 0;
int j = 0;
for (size_t left_idx = 0;
left_idx < left_maxima_indices.size();
left_idx++)
{
i = left_maxima_indices.at(left_idx);
for (size_t right_idx = 0;
right_idx < right_maxima_indices.size();
right_idx++)
{
j = right_maxima_indices.at(right_idx);
if (i == j) continue;
int64_t value = (nums.at(i) + (int64_t)nums.at(j)) * (j - i);
if (value > best_product)
{
best_product = value;
best_i = i;
best_j = j;
}
}
}
return { best_i, best_j, best_product };
}
I started from the two excellent answers by #selbie and #kcsquared.
Their solutions gave impressive results for random inputs. What was not clear is the worst case behavior.
What sequence would correspsond to the worst case?
I finally found a critial sequence for these two answers, a triangle sequence: this sequence slightly increases up to a max, and then slightly decrease. With such a sequence and n=10^5 for example, these answers take more than 10s.
My solutions starts from #selbie solution and add two improvements:
I add #kcsquared's trick: on the right (of j), they can be only lower elements
When considering a new left element a[i], it is useless to start from i + 1 to get the second element. We can start from the current best_j
With these tricks, I was able to improve the two posted answer performances a little bit. However, it still
fails to solve the triangle sequence issue: about 10s for n = 10^5.
#include <iostream>
#include <vector>
#include <string>
#include <cstdlib>
#include <ctime>
#include <chrono>
struct Result {
size_t i;
size_t j;
int64_t value;
};
void print (const Result& res, const std::string& prefix = "") {
std::cout << prefix;
std::cout << "(" << res.i << ", " << res.j << ") -> " << res.value << std::endl;
}
Result findBest(const std::vector<int>& a) {
if (a.size() < 2) {
return { 0, 0, INT64_MIN };
}
int n = a.size();
std::vector<int> next_max(n, -1);
int current_max = n-1;
for (int i = n-1; i >= 0; --i) {
if (a[i] > a[current_max]) {
current_max = i;
}
next_max[i] = current_max;
}
size_t besti = 0;
size_t bestj = 0;
int64_t bestvalue = INT64_MIN;
int minimum = INT_MIN;
for (size_t i = 0; i < a.size(); i++) {
if (a[i] <= minimum) {
continue;
}
minimum = a[i];
size_t jmin = (bestj > i) ? bestj : i+1;
for (size_t j = jmin; j < a.size(); j++) {
j = next_max[j];
value = (a[i] + (int64_t)a[j]) * (j - i);
if (value > bestvalue) {
bestvalue = value;
besti = i;
bestj = j;
}
}
}
return { besti, bestj, bestvalue };
}
int main() {
int n = 1000000;
int vmax = 100000000;
std::vector<int> A (n);
std::srand(std::time(0));
for (int i = 0; i < n; ++i) {
A[i] = rand() % vmax + 1;
}
std::cout << "n = " << n << std::endl;
auto t0 = std::chrono::high_resolution_clock::now();
auto res = findBest (A);
auto t1 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count();
print (res, "Random: ");
std::cout << "time = " << duration/1000 << " ms" << std::endl;
int i_max = n/2;
for (int i = 0; i < i_max; ++i) A[i] = i+1;
A[i_max] = 10 * i_max;
for (int i = i_max+1; i < n; ++i) {
A[i] = 2*i_max - i;
}
t0 = std::chrono::high_resolution_clock::now();
res = findBest (A);
t1 = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count();
print (res, "Triangle sequence: ");
std::cout << "time = " << duration/1000 << " ms" << std::endl;
return 0;
}
There is a problem that I can't solve it. Here are two unordered arrays
int a1[] = { 5, 7, 14, 0, 6, 2, 9, 11, 3 }; int n = 9;
int b[] = { 6, 4, 3, 10, 9, 15, 7 }; int m = 7;
I want to compare them and remove elements in a[] that can be found in b[]. The following code return a correct value of n to me. The correct value should be 4 but it give me 5 even if I successfully sorted array a1[]. It gave me a result like this:
a1[] = { 5, 2, 14, 0 ,11 }
There is a slightly difference between my result and the model answer. I mean the order of the elements in a1[]. The model answer is
a1[] = {5, 11, 14, 0, 2}
Can you guys help me to figure out the problem?
int removeAll_unordered(int *a, int& n, const int *b, int m)
{
for (int i = 0; i < m; i++) {
int j = 0;
for (j = 0; j < n; j++)
{
if (b[i] == a[j])
{
a[j] = a[n - 1];
n -= 1;
}
}
}
return n;
}
If you write code in C++ you should use what standard library provides for you - in your case std::vector and std::remove_if algo:
void removeAll_unordered( std::vector<int> &a, const std::vector<int> &b )
{
auto end = std::remove_if( a.begin(), a.end(), [b]( int i ) {
return std::find( b.begin(), b.end(), i ) != b.end();
} );
a.erase( end, a.end() );
}
Live code 1
But this usage is very inefficient, so using standard library as well which provides std::unordered_set aka hash set we can easily make it optimized:
void removeAll_unordered( std::vector<int> &a, const std::vector<int> &b )
{
auto end = std::remove_if( a.begin(), a.end(),
[set = std::unordered_set<int>( b.begin(), b.end() )]( int i ) {
return set.count( i );
} );
a.erase( end, a.end() );
}
Live code 2
I found one problem in you code, I couldn't compile though, but it should work.
In your code,
if (b[i] == a[j])
{
a[j] = a[n - 1];
n -= 1;
}
When an element in b is found in a, you replace that value with a[n-1], this is okay, but that value was not compared with b[i] as j got incremented, So I correct this part. If you run with different inputs you will able to catch this problem.
int removeAll_unordered(int *a, int& n, const int *b, int m)
{
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n;)
{
if (a[j] == b[i]) // replace a[j] with a[n-1] and decrease n
{
a[j] = a[n - 1];
n--;
}
else
j++; // otherwise increase j
}
}
return n;
}
To get the exact answer (order of the elements in a after the removal)
here is the modified code:
int duplicates = 0; // counts total numbers that removed from a[]
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m;)
{
if (a[i] == b[j]) // replace a[j] with a[n-1] and decrease n
{
if (i == n - 1) // when we reach the last element of a that matches in b
{
n--; // updating length of a[]
duplicates++; // one more removed
break;
}
a[i] = a[n - 1];
n--; // updating length of a[]
duplicates++; // one more removed
j = 0;
}
else
j++; // otherwise increase j
}
}
return duplicates; // returned total removed numbers
I found the problem of your code. You change the variables in the for loops. First for loop use the 'n' variable for their maximum value and second for loop use 'm' value.
Then you only decreasing n value but you didn't check the new i th value. Because now i th value changed. So, you want check again that value also. For that you can decrease 'i' value also.
And also you mentioned above your answer is 5 not 4. Then it correct answer. Because you write this code thinking about 9 elements of the array. Not the 0 to 8. So, if you write this code thinking about 0 to 8 elements you can get whatever you want. If you want 4 you can decrease final value by one. Then you can get your value.
Modified cord given below.
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
{
if (b[j] == a[i])
{
a[i] = a[n - 1];
n -= 1;
i = i - 1;
}
}
}
return n;
A zero-indexed array A consisting of N different integers is given. The array contains integers in the range [1..(N + 1)], which means that exactly one element is missing.
Your goal is to find that missing element.
Write a function:
int solution(int A[], int N);
that, given a zero-indexed array A, returns the value of the missing element.
For example, given array A such that:
A[0] = 2 A[1] = 3 A[2] = 1 A[3] = 5
the function should return 4, as it is the missing element.
Assume that:
N is an integer within the range [0..100,000];
the elements of A are all distinct;
each element of array A is an integer within the range [1..(N + 1)].
Complexity:
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(1), beyond input storage (not counting the storage required for input arguments).
It doesn't work for a case that there are two elements
int solution(vector<int> &A) {
sort(A.begin(), A.end());
int missingIndex = 0;
for (int i = 0; i < A.size(); i++)
{
if ( i != A[i]-1)
{
missingIndex = i+1;
}
}
return missingIndex;
}
Since your array is zero-indexed and the numbers are from 1 to N+1, the statement should be:
if ( i != A[i]-1)
Also, you should immediately break out from the for loop after updating the missingIndex because all entries beyond the missing element shall have (i != A[i]-1)
Moreover because of sorting your solution is O(NlogN) and not O(N).
Instead you can sum all the elements in the array (using unsigned long long int) and check its difference from N(N+1)/2
You can use the simple math formula for an arithmetic progression to get the sum of all numbers from 1 to N+1. Then iterate over all the given numbers and calculate that sum. The missing element will be the difference between the two sums.
int solution(std::vector<int> &a) {
uint64_t sum = (a.size() +1 ) * (a.size() + 2) / 2;
uint64_t actual = 0;
for(int element : a) {
actual += element;
}
return static_cast<int>(sum - actual);
}
Use all the power of STL:
#include <algorithm>
#include <functional>
int solution(vector<int> &A) {
return std::accumulate(A.begin(), A.end(), (A.size()+1) * (A.size()+2) / 2, std::minus<int>());
}
This solution uses the sign of the values as a flag. It needs at worst two pass over the elements. The N(N+1)/2 solution needs exactly one pass.
int solution(vector<int> &a) {
int n = (int)a.size();
for(auto k : a)
{
int i = abs(k) - 1;
if (i != n)
a[i] = -a[i];
}
for (int i = 0; i < n; ++i)
if (a[i]>0)
return i+1;
return n+1;
}
I solved it this way and thought of posting it here for my own reference for future and for others :)
#include <cstdint>
#include <numeric>
int solution(vector<int> &A) {
uint64_t sumAll = (A.size() + 1) * (A.size() + 2) / 2;
uint64_t sumA = std::accumulate(A.begin(), A.end(), 0);
return sumAll- sumA;
}
I solve it with this solution maybe there is something better but I test it with different values and find it work fine while the other solutions gives me strange results.
as example:
std::vector<int> A = { 12,13,11,14,16 };
std::vector<int> A2 = { 112,113,111,114,116 };
int Solution(std::vector<int> &A)
{
int temp;
for (int i = 0; i < A.size(); ++i)
{
for (int j = i+1;j< A.size();++j )
{
if (A[i] > A[j])
{
temp = A[i];
A[i] = A[j];
A[j] = temp;
}
}
}
for (int i = 0; i < A.size()-1;++i)
{
if ((A[i] + 1 != A[i + 1]))
{
return (A[i] + 1);
}
if(i+1 == A.size() - 1)
return (A[i+1] + 1);
}}
Now Everything fine but if I use the array above with the methods below, I will get wrong values excepts with small numbers <10;
std::vector<int> A = { 12,13,11,14,16 };
int Solution_2(std::vector<int> &A)
{
unsigned int n = A.size() + 1;
long long int estimated = n * (n + 1) / 2;
long long int total = 0;
for (unsigned int i = 0; i < n - 1; i++) total += A[i];
return estimated - total;
}
I will get this result -45.
or this one also the same result if I use array A :
std::vector<int> A = { 12,13,11,14,16 };
int Solution_3(std::vector<int> &A)
{
uint64_t sumAll = (A.size() + 1) * (A.size() + 2) / 2;
uint64_t sumA = std::accumulate(A.begin(), A.end(), 0);
return sumAll - sumA;
}
Hope Someone explains why this happens.
I'm trying to devise an algorithm in the form of a function that accepts two parameters, an array and the size of the array. I want it to return the mode of the array and if there are multiple modes, return their average. My strategy was to take the array and first sort it. Then count all the occurrences of a number. while that number is occurring, add one to counter and store that count in an array m. So m is holding all the counts and another array q is holding the last value we were comparing.
For example: is my list is {1, 1, 1, 1, 2, 2, 2}
then i would have m[0] = 4 q[0] = 1
and then m[1] = 3 and q[1] = 2.
so the mode is q[0] = 1;
unfortunately i have had no success thus far. hoping someone could help.
float mode(int x[],int n)
{
//Copy array and sort it
int y[n], temp, k = 0, counter = 0, m[n], q[n];
for(int i = 0; i < n; i++)
y[i] = x[i];
for(int pass = 0; pass < n - 1; pass++)
for(int pos = 0; pos < n; pos++)
if(y[pass] > y[pos]) {
temp = y[pass];
y[pass] = y[pos];
y[pos] = temp;
}
for(int i = 0; i < n;){
for(int j = 0; j < n; j++){
while(y[i] == y[j]) {
counter++;
i++;
}
}
m[k] = counter;
q[k] = y[i];
i--; //i should be 1 less since it is referring to an array subscript
k++;
counter = 0;
}
}
Even though you have some good answers already, I decided to post another. I'm not sure it really adds a lot that's new, but I'm not at all sure it doesn't either. If nothing else, I'm pretty sure it uses more standard headers than any of the other answers. :-)
#include <vector>
#include <algorithm>
#include <unordered_map>
#include <map>
#include <iostream>
#include <utility>
#include <functional>
#include <numeric>
int main() {
std::vector<int> inputs{ 1, 1, 1, 1, 2, 2, 2 };
std::unordered_map<int, size_t> counts;
for (int i : inputs)
++counts[i];
std::multimap<size_t, int, std::greater<size_t> > inv;
for (auto p : counts)
inv.insert(std::make_pair(p.second, p.first));
auto e = inv.upper_bound(inv.begin()->first);
double sum = std::accumulate(inv.begin(),
e,
0.0,
[](double a, std::pair<size_t, int> const &b) {return a + b.second; });
std::cout << sum / std::distance(inv.begin(), e);
}
Compared to #Dietmar's answer, this should be faster if you have a lot of repetition in the numbers, but his will probably be faster if the numbers are mostly unique.
Based on the comment, it seems you need to find the values which occur most often and if there are multiple values occurring the same amount of times, you need to produce the average of these. It seems, this can easily be done by std::sort() following by a traversal finding where values change and keeping a few running counts:
template <int Size>
double mode(int const (&x)[Size]) {
std::vector<int> tmp(x, x + Size);
std::sort(tmp.begin(), tmp.end());
int size(0); // size of the largest set so far
int count(0); // number of largest sets
double sum(0); // sum of largest sets
for (auto it(tmp.begin()); it != tmp.end(); ) {
auto end(std::upper_bound(it, tmp.end(), *it));
if (size == std::distance(it, end)) {
sum += *it;
++count;
}
else if (size < std::distance(it, end)) {
size = std::distance(it, end);
sum = *it;
count = 1;
}
it = end;
}
return sum / count;
}
If you simply wish to count the number of occurences then I suggest you use a std::map or std::unordered_map.
If you're mapping a counter to each distinct value then it's an easy task to count occurences using std::map as each key can only be inserted once. To list the distinct numbers in your list simply iterate over the map.
Here's an example of how you could do it:
#include <cstddef>
#include <map>
#include <algorithm>
#include <iostream>
std::map<int, int> getOccurences(const int arr[], const std::size_t len) {
std::map<int, int> m;
for (std::size_t i = 0; i != len; ++i) {
m[arr[i]]++;
}
return m;
}
int main() {
int list[7]{1, 1, 1, 1, 2, 2, 2};
auto occurences = getOccurences(list, 7);
for (auto e : occurences) {
std::cout << "Number " << e.first << " occurs ";
std::cout << e.second << " times" << std::endl;
}
auto average = std::accumulate(std::begin(list), std::end(list), 0.0) / 7;
std::cout << "Average is " << average << std::endl;
}
Output:
Number 1 occurs 4 times
Number 2 occurs 3 times
Average is 1.42857
Here's a working version of your code. m stores the values in the array and q stores their counts. At the end it runs through all the values to get the maximal count, the sum of the modes, and the number of distinct modes.
float mode(int x[],int n)
{
//Copy array and sort it
int y[n], temp, j = 0, k = 0, m[n], q[n];
for(int i = 0; i < n; i++)
y[i] = x[i];
for(int pass = 0; pass < n - 1; pass++)
for(int pos = 0; pos < n; pos++)
if(y[pass] > y[pos]) {
temp = y[pass];
y[pass] = y[pos];
y[pos] = temp;
}
for(int i = 0; i < n;){
j = i;
while (y[j] == y[i]) {
j++;
}
m[k] = y[i];
q[k] = j - i;
k++;
i = j;
}
int max = 0;
int modes_count = 0;
int modes_sum = 0;
for (int i=0; i < k; i++) {
if (q[i] > max) {
max = q[i];
modes_count = 1;
modes_sum = m[i];
} else if (q[i] == max) {
modes_count += 1;
modes_sum += m[i];
}
}
return modes_sum / modes_count;
}