I am trying to find the minimal distance in the Manhattan metric (x,y). I am searching for information about this. But I haven't found anything.
#include<bits/stdc++.h>
using namespace std;
#define st first
#define nd second
pair<int, int> pointsA[1000001];
pair<int, int> pointsB[1000001];
int main() {
int n, t;
unsigned long long dist;
scanf("%d", &t);
while(t-->0) {
dist = 4000000000LL;
scanf("%d", &n);
for(int i = 0; i < n; i++) {
scanf("%d%d", &pointsA[i].st, &pointsA[i].nd);
}
for(int i = 0; i < n; i++) {
scanf("%d%d", &pointsB[i].st, &pointsB[i].nd);
}
for(int i = 0; i < n ;i++) {
for(int j = 0; j < n ; j++) {
if(abs(pointsA[i].st - pointsB[j].st) + abs(pointsA[i].nd - pointsB[j].nd) < dist) {
dist = abs(pointsA[i].st - pointsB[j].st) + abs(pointsA[i].nd - pointsB[j].nd);
}
}
printf("%lld\n", dist);
}
}
}
My code works in O(n^2) but is too slow. I do not know whether it will be useful but y in pointsA always be > 0 and y in pointsB always be < 0. My code compare actually distance to next and chose smallest.
for example:
input:
2
3
-2 2
1 3
3 1
0 -1
-1 -2
1 -2
1
1 1
-1 -1
Output:
5
4
My solution (note for simplicity I do not care about overflow in manhattan_dist and for that reason it does not work with unsigned long long):
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <vector>
#include <limits>
#include <algorithm>
typedef std::pair<int, int> Point;
typedef std::vector<std::pair<int, int> > PointsList;
static inline bool cmp_by_x(const Point &a, const Point &b)
{
if (a.first < b.first) {
return true;
} else if (a.first > b.first) {
return false;
} else {
return a.second < b.second;
}
}
static inline bool cmp_by_y(const Point &a, const Point &b)
{
if (a.second < b.second) {
return true;
} else if (a.second > b.second) {
return false;
} else {
return a.first < b.first;
}
}
static inline unsigned manhattan_dist(const Point &a, const Point &b)
{
return std::abs(a.first - b.first) +
std::abs(a.second - b.second);
}
int main()
{
unsigned int n_iter = 0;
if (scanf("%u", &n_iter) != 1) {
std::abort();
}
for (unsigned i = 0; i < n_iter; ++i) {
unsigned int N = 0;
if (scanf("%u", &N) != 1) {
std::abort();
}
if (N == 0) {
continue;
}
PointsList pointsA(N);
for (PointsList::iterator it = pointsA.begin(), endi = pointsA.end(); it != endi; ++it) {
if (scanf("%d%d", &it->first, &it->second) != 2) {
std::abort();
}
assert(it->second > 0);
}
PointsList pointsB(N);
for (PointsList::iterator it = pointsB.begin(), endi = pointsB.end(); it != endi; ++it) {
if (scanf("%d%d", &it->first, &it->second) != 2) {
std::abort();
}
assert(it->second < 0);
}
std::sort(pointsA.begin(), pointsA.end(), cmp_by_y);
std::sort(pointsB.begin(), pointsB.end(), cmp_by_y);
const PointsList::const_iterator min_a_by_y = pointsA.begin();
const PointsList::const_iterator max_b_by_y = (pointsB.rbegin() + 1).base();
assert(*max_b_by_y == pointsB.back());
unsigned dist = manhattan_dist(*min_a_by_y, *max_b_by_y);
const unsigned diff_x = std::abs(min_a_by_y->first - max_b_by_y->first);
const unsigned best_diff_y = dist - diff_x;
const int max_y_for_a = max_b_by_y->second + dist;
const int min_y_for_b = min_a_by_y->second - dist;
PointsList::iterator it;
for (it = pointsA.begin() + 1; it != pointsA.end() && it->second <= max_y_for_a; ++it) {
}
if (it != pointsA.end()) {
pointsA.erase(it, pointsA.end());
}
PointsList::reverse_iterator rit;
for (rit = pointsB.rbegin() + 1; rit != pointsB.rend() && rit->second >= min_y_for_b; ++rit) {
}
if (rit != pointsB.rend()) {
pointsB.erase(pointsB.begin(), (rit + 1).base());
}
std::sort(pointsA.begin(), pointsA.end(), cmp_by_x);
std::sort(pointsB.begin(), pointsB.end(), cmp_by_x);
for (size_t j = 0; diff_x > 0 && j < pointsA.size(); ++j) {
const Point &cur_a_point = pointsA[j];
assert(max_y_for_a >= cur_a_point.second);
const int diff_x = dist - best_diff_y;
const int min_x = cur_a_point.first - diff_x + 1;
const int max_x = cur_a_point.first + diff_x - 1;
const Point search_term = std::make_pair(max_x, std::numeric_limits<int>::min());
PointsList::const_iterator may_be_near_it = std::lower_bound(pointsB.begin(), pointsB.end(), search_term, cmp_by_x);
for (PointsList::const_reverse_iterator rit(may_be_near_it); rit != pointsB.rend() && rit->first >= min_x; ++rit) {
const unsigned cur_dist = manhattan_dist(cur_a_point, *rit);
if (cur_dist < dist) {
dist = cur_dist;
}
}
}
printf("%u\n", dist);
}
}
Benchmark on my machine (Linux + i7 2.70 GHz + gcc -Ofast -march=native):
$ make bench
time ./test1 < data.txt > test1_res
real 0m7.846s
user 0m7.820s
sys 0m0.000s
time ./test2 < data.txt > test2_res
real 0m0.605s
user 0m0.590s
sys 0m0.010s
test1 is your variant, and test2 is mine.
You'll need to learn how to write functions and how to use containers. With your current coding style, it's infeasible to get a better solution.
The problem is that the better solution is a recursive method. Sort the points by X coordinate. Now recursively split the set in half and determine the closest distance within each half as well as the closest distance between a pair of points from either half.
The last part is efficient because both halves are sorted by X. Comparing the last values from the left half with the first value of the right half gives a good upper bound on the distance.
So there's a really simple optimization you can make that can cut a ton of time off.
Since you state that all points in set A have y > 0, and all points in set B have y < 0, you can immediately discard all points in A whose y > mindist and all points in B whose y < -mindist so far. These points can never be closer than the current closest pair:
for(int i = 0; i < n ;i++) {
if (pointsA[i].nd > dist)
continue; // <- this is the big one
for(int j = 0; j < n ; j++) {
if (pointsB[j].nd < -dist)
continue; // <- helps although not as much
if(abs(pointsA[i].st - pointsB[j].st) + abs(pointsA[i].nd - pointsB[j].nd) < dist) {
dist = abs(pointsA[i].st - pointsB[j].st) + abs(pointsA[i].nd - pointsB[j].nd);
}
}
printf("%lld\n", dist);
}
For a test of 40000 points per set, on my machine with gcc and -O2 this reduces the time from 8.2 seconds down to roughly 0.01 seconds (and yields correct results)! (measured with QueryPerformanceCounter on Windows).
Not too shabby.
Fwiw, computing your distance twice isn't actually that big of a deal. First of all that "second" calculation doesn't actually happen all that often, it only happens when a closer distance is found.
And secondly, for reasons I can't explain, storing it in a variable and only calculating it once actually consistently seems to add about 20% to the total run time, raising it from an average of 8.2 sec to about 10.5 seconds for the above set.
I'd say discarding points based on your assumptions about the Y values is by far the biggest bang for your buck you can get without significantly changing your algorithm.
You may be able to take advantage of that further by pre-sorting A and B in order of increasing Y values for A, and decreasing Y values for B, before finding the distances, to maximize the chance of skipping over subsequent point sets.
Keep a list of candidates in group A and group B, initially containing the whole input. Take min y of A and max y B for the closest pair in y, calculate the Manhattan distance, and eliminate any with y greater than the upper bound from the candidate lists. This might slash the input or it might have essentially no effect, but it's O(N) and a cheap first step.
Now sort the remaining candidates in x and y. This gives you a separated list in y ,and a mixed list in x, and is O(N log N), where N has been cut down, hopefully but not necessarily, by step one.
For each point, now calculate its closest neighbour in y (trivial) and closest in x (a bit harder), then calculate its minimum possible Manhattan distance, assuming the closest in x is also the closest in y. Eliminate any points further than your bound from the candidate list. Now sort again, by minimum possible. That's another N log N operation.
Now start with your best candidate and find its genuine minimum distance, by trying the closest point in either direction in x or y, and terminating when either delta x or delta y goes above the best so far, or delta x or delta y goes above your maximum bound. If you have better candidate pair then the current candidate pair, purge the candidate list of everything with a worse minimum possible. If the best candidate point doesn't form half of a candidate pair, you just purge that one point.
When you've purged a certain number of candidates, recalculate the lists. I'm not sure what the best value to use would be, certainly if you get to the worst candidate you must do that and then start again at the best. Maybe use 50%.
Eventually you are left with only one candidate pair. I'm not quite sure what the analysis is - worst case I suppose you only eliminate a few candidates on each test. But for most inputs you should get the candidate list down to a small value pretty fast.
Related
I tried to solve this exercise
I got 66 percent
I can not understand why
can you help?
The exercise is:
Write a function:
int solution(vector &A);
that, given an array A of N integers, returns the smallest positive integer (greater than 0) that does not occur in A.
For example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.
Given A = [1, 2, 3], the function should return 4.
Given A = [−1, −3], the function should return 1.
The solution I wrote is:
#include <algorithm>
#include<cmath>
using namespace std;
int solution(vector<int> &A) {
if (A.size() == 0 || (A.size() == 1 && A.at(0) <= 0))
return 1;
if (A.size() == 1)
return A.at(0) + 1;
sort(A.begin(), A.end());
if (A.at(A.size() - 1) <= 0)
return 1;
auto ip = std::unique(A.begin(), A.end());
A.resize(distance(A.begin(), ip));
A.erase(remove_if(A.begin(), A.end(), [](const int i) { return i < 0; }),A.end());
if (A.at(0) != 1)
return 1;
if (A.size() == 1)
return (A.at(0) != 1 ? 1 : 2);
int i = 0;
for (; i < A.size(); ++i) {
if (A.at(i) != i + 1)
return A.at(i - 1) + 1;
}
return A.at(A.size()) + 1;
}
The following algorithm has a complexity O(n). No need to sort or to erase.
We know that the first missing value is less or equal to n+1, if n is the array size.
Then we simply have to use an array of size n+2, present[n+2], initialised to 0, and then to look at all values A[i]:
if (A[i] <= 1+n && A[i] > 0) present[A[i]] = 1;
Finally, in a second step we simply have to examine the array present[.], and search for the first index k such that present[k]==0.
#include <iostream>
#include <vector>
int find_missing (const std::vector<int> &A) {
int n = A.size();
std::vector<int> present (n+2, 0);
int vmax = n+1;
for (int i = 0; i < n; ++i) {
if (A[i] <= vmax && A[i] > 0) {
present[A[i]] = 1;
}
}
for (int k = 1; k <= vmax; ++k) {
if (present[k] == 0) return k;
}
return -1;
}
int main() {
std::vector<int> A = {1, 2, 0, 3, -3, 5, 6, 8};
int missing = find_missing (A);
std::cout << "First missing element = " << missing << std::endl;
return 0;
}
Well this is wrong
if(A.size()==1)
return A.at(0)+1;
If A is {2} that code will return 3 when the correct answer is 1
Also
A.erase(remove_if(A.begin(), A.end(),[](const int i) {return i < 0; }),A.end());
should be
A.erase(remove_if(A.begin(), A.end(),[](const int i) {return i <= 0; }),A.end());
Also
return A.at(A.size()) + 1;
is a guaranteed vector out of bounds error.
Even a small amount of testing and debugging would have caught these errors. It's a habit you should get into.
I think there are far too many special cases in the code, which only serve to complicate the code and increase the chance of bugs.
This answer is the implementation of the proposal given in the comment by PaulMcKenzie.
So, all credits go to PaulMcKenzie
It is not the fastest solution, but compact. The idea is basically.
Sort the data
Then compare the adjacent values, if the next value is equal to the previous value+1.
If not, then we found a gap. This can be implemented by using the function std::adjacent_find. Description can be found here.
We put all the side conditions into the lambda. If std::adjacent_find cannot find a value, then we take the next possible positive value.
I am not sure, what I could describe more. Please see the below example:
#include <iostream>
#include <vector>
#include <algorithm>
int solution(std::vector<int>& data) {
// Sort
std::sort(data.begin(), data.end());
// Check if there is a gap in the positive values
const auto gap = std::adjacent_find(data.begin(), data.end(), [](const int p, const int n) { return (n !=p) && (n != (p + 1) && p>0); });
// If there is no gap, the take the next positive value
return (gap == data.end()) ? (data.back() > 0 ? data.back() + 1 : 1) : *gap + 1;
}
int main() {
//Some test cases
std::vector<std::vector<int>> testCases{
{1,3,6,4,1,2},
{1,2,3},
{-1,-3}
};
for (auto& testCase : testCases)
std::cout << solution(testCase) << '\n';
return 0;
}
others have already pointed out what are the main errors, but I would like to invite you to try a different solution instead of trying to fix all the bugs and spend much time on debugging, because your solution seems a little overcomplicated.
Here I propose a way you can think about the problem:
What is the minimum number the function can return? Since it returns a positive integer, it is 1, in the case 1 is not in the array. Since that we can use any number <=0 to see if we found our result scanning the vector (see next);
In case one is not in the array, how do I find the wanted number? Your intuition is correct, if your vector is sorted it is easier: you can iterate over your data, and when you find an "hole" between two subsequent elements, then the value of the first element of the hole + 1 is you result
What do I do if the array contains 1 and has no holes? Well, you return the smallest element that is not in the array, so the last element + 1. You may notice that by checking if your "candidate" value (that is a number that shouldn't be returned, so <=0) has changed during the scanning;
Let's go to the code:
int solution(std::vector<int>& v){
int retVal=0;
std::sort(v.begin(), v.end());
for(int i=0; i<v.size()-1; i++){
if(v[i]>0 && v[i+1]>v[i]+1){
retVal=v[i]+1;
break;
}
}
if(retVal==0) {
if (v.back() > 0)
retVal = v.back() + 1;
else
retVal = 1;
}
return retVal;
}
As suggested you can use the standard library a little bit more, but I think this is reasonably simple and efficient.
Other note:
I think your assignment does not bother you with this, but I mention just for completeness. Most of the times you don't want a function to modify your parameters: you can pass the vector "by value" meaning that actually you pass a complete copy of your data, without touching the original one, or you can pass a const reference and create a copy inside the function.
I found this problem somewhere in a contest and haven't been able to come up with a solution yet.
There is the N cities with coordinates (x, y). I have to go from first
city and reach the second city. There is a gas station in each city.
So I have to find minimum necessary volume of gas container to reach
the final city.
For example:
Input:
3
17 4
19 4
18 5
Output:
1.414
Here, my way is: 1->3->2
I'm using simple brute-force method, but it so slow. How can I optimize my code?
Maybe there is a better solution?
#include <iostream>
#include <algorithm>
#include <stack>
#include <math.h>
#include <cstring>
#include <iomanip>
#include <map>
#include <queue>
#include <fstream>
using namespace std;
int n, used[203];
double min_dist;
struct pc {
int x, y;
};
pc a[202];
double find_dist(pc a, pc b) {
double dist = sqrt( (a.x - b.x)*(a.x - b.x) + (a.y - b.y)*(a.y - b.y) );
return dist;
}
void functio(double d, int used[], int k, int step) {
used[k] = 1;
if(k == 1) {
if(d < min_dist) {
min_dist = d;
}
used[k] = 0;
return;
}
for(int i = 1; i < n; ++i) {
if(i != k && used[i] == 0) {
double temp = find_dist(a[k], a[i]);
if(temp > d) {
if(temp < min_dist)
functio(temp, used, i, step + 1);
}
else {
if(d < min_dist)
functio(d, used, i, step + 1);
}
}
}
used[k] = 0;
}
int main() {
cin >> n;
for(int i = 0; i < n; ++i)
cin >> a[i].x >> a[i].y;
min_dist = 1000000;
memset(used, 0, sizeof(used));
functio(0, used, 0, 0);
cout << fixed << setprecision(3) << min_dist << endl;
}
The minimum spanning tree has the neat property of encoding all of the paths between vertices that minimize the length of the longest edge on the path. For Euclidean MST, you can compute the Delaunay triangulation and then run your favorite O(m log n)-time algorithm (on a graph with m = O(n) edges) for a total running time of O(n log n). Alternatively, you could run Prim with a naive priority queue for an O(n^2)-time algorithm with a good constant (especially if you exploit SIMD).
So what you are trying to optimise in your algorithm is the longest distance you travel between two cities. Because that's how big your gas tank needs to be.
This is a variation on shortest path, because there you're trying to optimise the enire path length.
I think you could get away with this:
make a list of edges. (the distance between each pair of cities)
remove the longest edge from the list, unless this causes the destination to become unreachable.
once you cannot remove the longest path anymore, that means that this is your limiting factor in going to your destination. The rest of the route doesn't matter anymore.
Then in the end you should have a list of edges that make up a path between source and destination.
I haven't proven this solution to be optimal, so no guarantees. But consider this: if you remove the longest path, there are only shorter paths to take, so the maximum leg distance won't increase.
About the complexity, time complexity is O(n log n) because you have to sort the edges.
Memory complexity is O(n^2)
This is probably not the most efficient algorithm, because it is a graph-algorithm, and makes no use of the fact that the cities are on an euclidean plane. There is probably some optimisation there...
You can reduce time complexity to O(n^2*log(n)) using binary search which will run within the 1 second time limit. The idea behind the binary search is if we can reach city 2 from city 1 using x volume there is no need to check for higher volume container. If we cannot reach using using this then we need more than x volume. To check if we can reach city 2 using x volume you can use BFS. If two cities are within x distance of each other then its possible to move from one to the another and we can say they are connected by edge.
Code:
int vis[203];
double eps=1e-8;
struct pc {
double x, y;
};
double find_dist(pc &a, pc &b) {
double dist=sqrt((a.x - b.x)*(a.x - b.x) + (a.y - b.y)*(a.y - b.y));
return dist;
}
bool can(vector<pc> &v, double x) { // can we reach 2nd city with volume x
int n=v.size();
vector<vector<int>> graph(n, vector<int>(n, 0)); // graph in adjacency matrix form
// set edges in graph
for(int i=0; i<n; i++) {
for(int j=0; j<n; j++) {
if(i==j) continue; //same city
double d=find_dist(v[i], v[j]);
if(d<=x) graph[i][j]=1; // can reach from city i to city j using x volume
}
}
// perform BFS
memset(vis, 0, sizeof(vis));
queue<int> q;
q.push(0); // we start from city 0 (0 absed index)
vis[0]=1;
while(!q.empty()) {
int top=q.front();
q.pop();
if(top==1) return true; // can reach city 2 (1 in 0-based index)
for(int i=0; i<n; i++) {
if(top!=i && !vis[i] && graph[top][i]==1) {
q.push(i);
vis[i]=1;
}
}
}
return false; // can't reach city 2
}
double calc(vector<pc> &v) { // calculates minimum volume using binary search
double lo=0, hi=1e18;
while(abs(hi-lo)>eps) {
double mid=(lo+hi)/2;
if(can(v, mid)) {
hi=mid; // we need at most x volume
} else{
lo=mid; // we need more than x volumer
}
}
return lo;
}
Working on an implementation of finding the Greatest Contiguous Sum of a sequence using the Divide and Conquer method as seen here.
My return value is often incorrect.
For example:
{5, 3} returns 5 instead of 8.
{-5, 3} returns 0 instead of 3.
{ 6, -5, 7 } returns 7 instead of 8.
Other notes:
decrementing or incrementing AT the first or last iterators throws an exception, saying that I either can't increment, decrement, or dereference at that point. There's a bug somewhere in GCSMid, I think, but I haven't been able to solve it.
this implementation uses random-access iterators, signified as RAIter
//function max- finds greatest number given 3 size_ts
size_t max(size_t a, size_t b, size_t c)
{
if (a >= b && a >= c)
{
return a;
}
else if (b >= a && b >= c)
{
return b;
}
else
{
return c;
}
}
//function gcsMid
//main algorithm to find subsequence if it spans across the center line
template<typename RAIter>
size_t gcsMid(RAIter first, RAIter center, RAIter last)
{
size_t sum = 0;
size_t leftSum = 0;
size_t rightSum = 0;
//to the left of center
for (RAIter i = center; i > first; i--)
{
sum += *i;
if(sum > leftSum)
{
leftSum = sum;
}
}
//to right of center
sum = 0;
for (RAIter j = (center + 1); j < last; j++)
{
sum += *j;
if (sum > rightSum)
{
rightSum = sum;
}
}
//return the sums from mid
return leftSum + rightSum;
}
//main function to call
template<typename RAIter>
int gcs(RAIter first, RAIter last)
{
size_t size = distance(first, last);
//base case is when the subarray only has 1 element. when first == last
if (first == last || size == 1)
{
if (size < 1)
{
return 0;
}
if (*first < 0)
{
return 0;
}
return *first;
}
//middle point
RAIter center = first + (size/2);
//return max of leftsum, rightsum, and midsum
return max(gcs(first, center),
gcs(center + 1, last),
gcsMid(first, center, last));
}
You have two problems with your code:
A. This loop:
for (RAIter i = center; i > first; i--)
does not include first in the loop. The reference algorithm does. You can't just use >= as the reference algorithm does as it doesn't work for iterators. Either add an extra bit of code to check first at the end, or change your loop so it somehow includes first (maybe a do while loop would suit better).
B. These definitions:
size_t sum = 0;
size_t leftSum = 0;
size_t rightSum = 0;
should not be size_t as size_t is unsigned. This means that when the sum goes negative, checks like if(sum > leftSum) no longer work, as the negative value (which underflows) is bigger than the positive value.
Change them to int.
The best way to find these kinds of errors is to run the code through a debugger. You can then step through each line of your code and see what the variable values are. This makes it easy to spot things like negative numbers becoming large positive numbers as above.
Suppose you have a linear equation in n variables. The goal is to either determine that no integer solution is possible, or determine the smallest coefficient vector, for an integer solution.
In other words, let ax=b where x is the vector you want to find, and a is a vector of coefficients. b is a scalar constant. Find x such that the sum of x1, ... ,xn is minimized, and all xis are integers. Or, determine that no such x exists. From now on, I will say that |x| is the sum of the xi's.
What is an efficient way to solve this? I feel like this is similar to the Knapsack problem, but I'm not entirely sure.
My Solution
The way I tried to solve this was doing a Breadth-First Search on the space of vectors, where the breadth would be the sum of the vector entries.
At first I did this naively, starting from |x| = 0, but when n is even moderately large, and the solution is non-trivial, the number of vectors generated is enormous (n ^ |x| for each |x| you go through). Even worse, I was generating many duplicates. Even when I found a way to generate almost no duplicates, this way is too slow.
Next, I tried starting from a higher |x| from the beginning, by putting a lower bound on the optimal |x|. I sorted a to have it in decreasing order, then removed all ai > b. Then a lower bound on |x| is b / a[0]. However, from this point, I had difficulty quickly generating all the vectors of size |x|. From here, my code is mostly hacky.
In the code, b = distance, x = clubs, n = numClubs
Here is what it looks like:
short getNumStrokes (unsigned short distance, unsigned short numClubs, vector<unsigned short> clubs) {
if (distance == 0)
return 0;
numClubs = pruneClubs(distance, &clubs, numClubs);
//printClubs (clubs, numClubs);
valarray<unsigned short> a(numClubs), b(numClubs);
queue<valarray<unsigned short> > Q;
unsigned short floor = distance / clubs[0];
if (numClubs > 1) {
for (int i = 0; i < numClubs; i++) {
a[i] = floor / numClubs;
}
Q.push (a);
}
// starter vectors
for (int i = 0; i < numClubs; i++) {
for (int j = 0; j < numClubs; j++) {
if (i == j)
a[j] = distance / clubs[0];
else
a[j] = 0;
}
if (dot_product (a, clubs) == distance)
return count_strokes(a);
// add N starter values
Q.push (a);
}
bool sawZero = false;
while (! Q.empty ()) {
a = Q.front(); // take first element from Q
Q.pop(); // apparently need to do this in 2 operations >_<
sawZero = false;
for (unsigned int i = 0; i < numClubs; i++) {
// only add numbers past right-most non-zero digit
//if (sawZero || (a[i] != 0 && (i + 1 == numClubs || a[i + 1] == 0))) {
// sawZero = true;
b = a; // deep copy
b[i] += 1;
if (dot_product (b, clubs) == distance) {
return count_strokes(b);
} else if (dot_product (b, clubs) < distance) {
//printValArray (b, clubs, numClubs);
Q.push (b);
}
//}
}
}
return -1;
}
EDIT: I'm using valarray because my compiler isn't C++ 11 compliant, so I can't use array. Other code suggestions much appreciated.
Your problem is an equality constrained integer knapsack problem:
min |x|
s.t. ax = b
x integer
If you have access, CPLEX or GUROBI can generally solve such problems quite easily.
Otherwise, consider some reductions of the constraint set
(e.g., http://www.optimization-online.org/DB_FILE/2002/11/561.ps)
I wanted to know what would be the fastest and most efficient way of comparing three numerical properties of a number and determining the lowest value and the highest value.
Say I have an object with following prop
obj.a = 5;
obj.b = 13;
obj.c = 2;
Now I need to calculate the lowest value(2) and the highest value(13);
One option I have is using conditional statement thus having three conditions for checking the greatest element and three for the lowest element.Resulting in 6 conditional statements.My other option is pushing these values in a vector and then retrieving the extreme values from a sorted vector. Is there an alternative approach to this ? Any suggestions ?
If using C+11 is okay, you can just do:
auto result = std::minmax({a, b, c});
result.first will hold the smallest value, result.second the largest - here's a live demo.
Create your Min() function for 2 variables as:
int Min(int x, int y)
{
return y ^ ((x ^ y) & -(x < y));
}
Similarly Max for 2 variables as:
int Max(int x, int y)
{
return x ^ ((x ^ y) & -(x < y));
}
And now call this function for your 3 variables as:
int max = Max(a,Max(b,c));
int min = Min(a,Min(b,c));
These methods are quite efficient and may be used for any number of variable comparison.
It uses the concept that
the XOR of a number with itself is 0
XOR of a number with 0 is the number itself.
Again this can be used for nested calls for N number of number comparisons.
NOTE: You may even replace x<y with x-y>>31 (considering X to be a 32 bit signed integer).
I am very much sure that it cannot be more efficient than that for 3 numbers
A solution with no more than three compares:
struct Obj
{
int a;
int b;
int c;
std::pair<int,int> getMinMax() const
{
std::pair<int,int> minmax = a< b ? std::make_pair(a,b) : std::make_pair(b,a);
if (c < minmax.first )
minmax.first = c;
else if (c > minmax.second)
minmax.second = c;
return minmax;
}
};
I am giving the psudocode:
a[0] = obj.a
a[1] = obj.b
a[2] = obj.c
To get largest:
index = int(obj.a < obj.b)
larger = a[index]
largest = a[(larger > obj.c) ? index : 2]
To get the smallest:
index = int (obj.a > obj.b)
smaller = a[index]
smallest = a[(smaller < obj.c) ? index : 2]
Makes 2 + 2 comparisons.
Once you've found that a is the minimum, you don't have to consider it for the maximum anymore. So any solution that determines min and max separately is sub-optimal.
There are 6 ways to order 3 things: abc acb bac bca cab cba. So no solution can do with less than 6 branches.
This solution has 6 branches, with at most 3 comparisons per case, so I expect it to be optimal:
if (obj.a < obj.b)
{
if (obj.b < obj.c)
{
min = obj.a; max = obj.c; // abc
}
else if (obj.c < obj.a)
{
min = obj.c; max = obj.b; // cab
}
else
{
min = obj.a; max = obj.b; // acb
}
}
else
{
if (obj.a < obj.c)
{
min = obj.b; max = obj.c; // bac
}
else if (obj.c < obj.b)
{
min = obj.c; max = obj.a; // cba
}
else
{
min = obj.b; max = obj.a; // bca
}
}
EDIT:
Apparently, this is slow. I guess it has something to do with the conditional jumps. Conditional moves are better:
Maybe this one is better, although it still has one branch:
if (obj.a < obj.b)
{
min = std::min(obj.a, obj.c);
max = std::max(obj.b, obj.c);
}
else
{
min = std::min(obj.b, obj.c);
max = std::max(obj.a, obj.c);
}
Eleminating the last branch would turn this into one of the other answers:
min = std::min(std::min(obj.a, obj.b), obj.c);
max = std::max(std::max(obj.a, obj.b), obj.c);
Bottomline: Who would have thought that the 'naive' approach would be faster than writing it all out!