Please note: I am not sure if this fits here, if not, please move to the proper forum.
So I have a progrmam that tries to solve th Traveling Salesman Problem, TSP for short.
My code seems to run fine until I try to use 33810 cities, in which the program crashes after trying to access the position costs[69378120], it simply stops responding and end soon after.
I am trying the folowing code:
#include <iostream>
#include <stdlib.h>
#include <malloc.h>
#include <fstream>
#include <math.h>
#include <vector>
#include <limits>
using namespace std;
typedef long long int itype;
int main(int argc, char *argv[]) {
itype n;
ifstream fenter;
fenter.open(argv[1]);
ofstream fexit;
fexit.open(argv[2]);
fenter>> n;
double *x;
double *y;
x = (double*) malloc(sizeof(double)*n);
y = (double*) malloc(sizeof(double)*n);
cout<<"N : "<<n<<endl;
for (int p = 0; p < n; p++) {
fenter>> x[p] >> y[p];
}
fenter.close();
int *costs;
costs = (int*) malloc(sizeof(int)*(n*n));
for (int u = 0; u < n; u++) {
for (int v = u+1; v < n; v++) {
itype cost = floor(sqrt(pow(x[u] - x[v], 2) + pow(y[u] - y[v], 2)));
cout<<"U: "<<u<<" V: "<<v<<" COST: "<<cost<<endl;
costs[u*n + v] = cost;
cout<<"POS (u*n + v): "<<(u*n + v)<<endl;
cout<<"POS (v*n + u): "<<(v*n + u)<<endl;
costs[v*n + u] = cost;
}
}
return 0;
}
According with some verifications, the cost array should use 9.14493GB, but Windows only gives 0.277497GB. Then after triying to read costs[69378120], it closes.
For now, I not worried about the efficiency, nor the solution to the TSP, just need to fix this issue. Any clues?
---UPDATE---
Following the sugestions I tried changing a few things. the result is the code below
int main(int argc, char *argv[]) {
int n;
ifstream entrada;
entrada.open(argv[1]);
ofstream saida;
saida.open(argv[2]);
entrada >> n;
vector<double> x(n);
vector<double> y(n);
for (int p = 0; p < n; p++) {
entrada >> x[p] >> y[p];
}
entrada.close();
vector<itype> costs(n*n);
if(costs == NULL){ cout << "Sem memória!" << endl; return -1;}
for (int u = 0; u < n; u++) {
for (int v = u+1; v < n; v++) {
itype cost = floor(sqrt(pow(x[u] - x[v], 2) + pow(y[u] - y[v], 2)));
costs[u*n + v] = cost;
costs[v*n + u] = cost;
}
}
return 0;
}
The problem still persists
If compiling in 32-bit size_t is 32 bit and then
1143116100*4
is larger than the largest 32-bit number hence int overrun.
Compiling in 64-bit
size_t siz = 1143116100;
std::vector<long long> big(siz);
std::cout << big.size() << ", " << big.max_size() << std::endl;
which prints
1143116100, 2305843009213693951
if I change it to
size_t siz = 1024*1143116100;
I get a bad_alloc as my swap disk is not big enough for that.
Related
The formula is listed in the following article: https://en.wikipedia.org/wiki/Formula_for_primes. I am trying to implement it but to no success, for whatever reason the code is producing number which seem to be nth power of two + 1, which is obviously not what I want to achieve.
#include <iostream>
#include <cmath>
using namespace std;
int nth_prime(int n) {
double s = 1;
for (int i = 1; i <= pow(2, n); i++) {
double c = 0;
for (int j = 1; j <= i; j++) {
double f = (tgamma(j)+1)/j;
c+=floor(pow(cos(M_PI*f), 2));
}
s+=floor(pow(n/c, 1/n));
}
return s;
}
int main() {
int n;
while (cin >> n) {
cout << nth_prime(n) << endl;
}
return 0;
}
#include <iostream>
#include <algorithm>
using std::cin;
using std::cout;
using std::endl;
using std::sort;
int main()
{
int x = 0;
int n; // Enter Size of 2 array
cin >> n; // enter 5
long long *ptr1 = new long long[n - 1]; // size of array must be less than 5 by one n-1
for (int x = 0; x < n - 1; x++)
{
cin >> ptr1[x];
}
sort(ptr1, ptr1 + (n - 1));
for (int z = 1; z < n; z++)
{
if (z != ptr1[x])
{
cout << z;
break;
}
x++;
}
return 0;
}
You're given all positive integers from 1,2,…,n except one integer. Find the missing integer.
Input
The first line of input contains an integer n (2≤n≤2×105).
The second line of input contains n−1 distinct integers from 1 to n (inclusive).
Output
Print the missing integer.
when i try to sumbit this code i get wrong in test 10 but i don't know why! and he didn't show the test so what is wrong?
I have a three-fold answer:
This program leaks memory
You included <algorithm> please use it. (Look on cppreference)
Spoilers vector, iota, mismatch
Also, you don't reset x before the second loop.
That's never going to work unless the missing integer is the last of the array, and not equal to 1
// for (... z)
if (z != ptr1[x] /* Here */) {
// print 1, end loop OR invoke undefined behavior
}
x++; // Now x is equal to (n - 1)
There are some problems with your code:
long long *ptr1 = new long long[n - 1];
You call new, without delete. This will create a memory leak.
You haven't reinitialized x, so any access to ptr1[x] is out-of-bounds.
Slove all of that, and your code will look like:
#include <iostream>
#include <algorithm>
#include <vector>
int main()
{
int n;
cin >> n;
std::vector<int> vec(n-1); // std::vector instead
for (int x = 0; x < n - 1; x++)
{
std::cin >> vec[x];
}
std::sort(vec.begin(), vec.end());
int x = 0; // use another variable instead of reused the old one.
for (int z = 1; z < n; z++)
{
if (z != vec[x])
{
std::cout << z;
break;
}
x++;
}
return 0;
}
But, this isn't the best approach anyway. As #molbdnilo suggest:
#include <iostream>
int main()
{
int n;
std::cin >> n;
int sum{};
for (int i = 0; i < n - 1; ++i)
{
int tmp;
std::cin >> tmp;
sum += tmp;
}
std::cout << (n + 1) * n / 2 - sum;
}
You can solve this in a more straightforward way if you subtract the numbers that were entered from the expected sum of all numbers 1, 2, ..., n (see comments by #molbdnilo, #Aconcagua and #marcus-müller):
#include <iostream>
int main() {
std::size_t n;
std::cin >> n;
std::size_t sum{ n*(n + 1)/2 };
for (std::size_t idx = 1; idx != n; ++idx) {
std::size_t thisNumber;
std::cin >> thisNumber;
sum -= thisNumber;
}
std::cout << "Missing number: " << sum << std::endl;
}
Building blocks:
The expected sum: int expected_sum = n * (n + 1) / 2;
The sum of all integers fed to the test after you've read n:
#include <iterator> // istream_iterator
#include <numeric> // accumulate
int sum = std::accumulate(std::istream_iterator<int>(std::cin),
std::istream_iterator<int>{}, 0);
Now, expected_sum - sum should give you the missing value.
The following code is running perfectly well with my g++ compiler. But on submitting it on an online judge, gives SIGABRT error. I read that stl elements can generate this error if they try to access too much memory. I cannot see any such use of memory. Is it the count? I have implemented my own count, but it still gives the same error.
#pragma GCC optimize ("-O2")
#include <bits/stdc++.h>
using namespace std;
#define fastio ios_base::sync_with_stdio(0);cin.tie(0);cout.tie(0)
#define M 100
void range_change(bitset<M>& x, int lower, int upper, bool change){
if (change){
for (unsigned i = lower; i <= upper; ++i)
x.set(i);
}
else{
for (unsigned i = lower; i <= upper; ++i)
x.reset(i);
}
}
int count_Set(bitset<M> & x){
int count = 0;
for(int i = 0; i< 100; i++){
if(1 & x[i]) count++;
}
return count;
}
bitset<100> houses;
int main(){
fastio;
int t;
cin>> t;
int m, x, y;
int reach{0};
int lower{0}, upper{0};
// int t = 1;
while(t--){
houses.set();
// cout<< houses;
cin >> m >> x >> y;
reach = x*y;
vector<int> cat(m, 0);
for(auto& u: cat){
cin>> u;
lower = u - reach -1;
if(lower < 0) lower = 0;
upper = u + reach-1;
if(upper > 100) upper = 99;
range_change(houses, lower, upper, false);
}
// cout<< houses << "\n";
cout<< houses.count() << "\n";
// cout<< count_Set(houses) << "\n";
}
return 0;
}
Just skimming the source code, this is what I see:
#define M 100
...
bitset<100> houses;
Seems like you want to use M instead of 100 for safety.
Then here:
if(upper > 100) upper = 99;
This is going to break at upper = 100 Suggest:
if(upper >= M) upper = M - 1;
I have the following piece of code:
bool *pho=new bool[n];
memset(pho, 0, sizeof(bool) * n);
for (int i = 0; i < m; i++) {
int d=2;
cout << "i=" << i << ", d="<<d<< endl;
pho[d] = true;
}
Running with input n=8 results in the following output:
i=0, d=2
i=1, d=2
[Segfault]
I don't understand why this is happening! Setting the same location in the array results in a segfault for some reason. I have run the program several times and it always produces the same output.
Stepping through the code with a debugger, I can see that the value of d (the index) is 2 when the array gets accessed.
I have tried using global arrays and also static global arrays, both of which result in the same error.
Is there something wrong with my IDE and compiler? I am using MinGW with Eclipse CDT, running with std/c++11 option enabled.
Here is the whole source file, in case any other part of the program is causing problems:
#include <iostream>
#include <queue>
#include <vector>
#include <unordered_set>
#include <utility>
#include <algorithm>
#include <cstring>
using namespace std;
vector<unordered_set<int>> adj;
static bool *visited;
pair<int, int> dfs(int node) {
if (visited[node])
return make_pair(0, node);
pair<int, int> best = make_pair(0, node);
for (int neigh : adj[node]) {
pair<int, int> alt = dfs(node);
alt.second++;
best = max(best, alt);
}
return best;
}
int main(int argc, char** argv) {
int n, m, def;
cin >> n ;
cin >> m;
bool *pho=new bool[n];
memset(pho, 0, sizeof(bool) * n);
int *degrees=new int[n];
memset(degrees, 0, sizeof(int) * n);
cout << "n="<<n<<", m="<<m<<endl;
for (int i = 0; i < m; i++) {
int d=2;
cout << "i=" << i << ", d="<<d<< endl;
pho[d] = true;
}
for (int i = 0; i < n - 1; i++) {
int a, b;
cin >> a >> b;
adj[a].insert(b);
adj[b].insert(a);
degrees[a]++;
degrees[b]++;
}
queue<int> next;
for (int i = 0; i < n; i++) {
if (degrees[i] == 0) {
next.push(i);
}
}
while (!next.empty()) {
int node = next.front();
next.pop();
if (pho[node])
continue;
for (int neigh : adj[node]) {
adj[node].erase(neigh);
adj[neigh].erase(node);
degrees[node]--;
degrees[neigh]--;
if (degrees[neigh] == 1)
next.push(neigh);
}
}
visited=new bool[n];
memset(visited, 0, sizeof(bool) * n);
pair<int, int> pivot = dfs(def);
memset(visited, 0, sizeof(bool) * n);
pair<int, int> end = dfs(pivot.second);
int dist = end.first; //number of edges she only has to walk once
int tree = n - 1; //number of edges in tree
int otherdist = tree - dist; //number of edges she has to walk twice
int total = dist + otherdist * 2;
cout << total << endl;
return 0;
}
These lines are wrong :
adj[a].insert(b);
adj[b].insert(a);
You need to create unordered_map instance with a and b as keys, then respectively insert b and a as value. You don't need to have a vector of sets if you need key-value pairs.
So I was writing a simple Perceptron model and when I finished the code and saw that there were no errors I was pretty surprised. But it seems like my model doesn't converge (along with some other oddities).
Basically it keeps getting 25/100 samples right at every epoch. And when every epoch ends the weights are always coming back to 0.
Due to the fact that the code is on multiple files I put it on Google Drive here it is:
https://drive.google.com/folderview?id=0B_r3mf9HbUrLaDNlc1F6RXhNMnM&usp=sharing
It is a Visual Studio Community 2013 project. You can open and run it so that you get a better idea.
Here's a quick preview of the files though.
main.cpp:
#include <iostream>
#include <vector>
#include <algorithm>
#include <fstream>
#include <string>
#include <math.h>
#include "LinearAlgebra.h"
#include "MachineLearning.h"
using namespace std;
using namespace LinearAlgebra;
using namespace MachineLearning;
void printVector(vector< vector<float> > X);
vector< vector<float> > getIrisX();
vector<float> getIrisy();
int main()
{
vector< vector<float> > X = getIrisX();
vector<float> y = getIrisy();
vector<float> test1;
test1.push_back(5.0);
test1.push_back(3.3);
test1.push_back(1.4);
test1.push_back(0.2);
vector<float> test2;
test2.push_back(6.0);
test2.push_back(2.2);
test2.push_back(5.0);
test2.push_back(1.5);
//printVector(X);
//for (int i = 0; i < y.size(); i++){ cout << y[i] << " "; }cout << endl;
perceptron clf(0.1, 10);
clf.fit(X, y);
cout << "Now Predicting: 5.0,3.3,1.4,0.2(CorrectClass=1,Iris-setosa) -> " << clf.predict(test1) << endl;
cout << "Now Predicting: 6.0,2.2,5.0,1.5(CorrectClass=-1,Iris-virginica) -> " << clf.predict(test2) << endl;
system("PAUSE");
return 0;
}
void printVector(vector< vector<float> > X)
{
for (int i = 0; i < X.size(); i++)
{
for (int j = 0; j < X[i].size(); j++)
{
cout << X[i][j] << " ";
}
cout << endl;
}
}
vector<float> getIrisy()
{
vector<float> y;
ifstream inFile;
inFile.open("y.data");
string sampleClass;
for (int i = 0; i < 100; i++)
{
inFile >> sampleClass;
if (sampleClass == "Iris-setosa")
{
y.push_back(1);
}
else
{
y.push_back(-1);
}
}
return y;
}
vector< vector<float> > getIrisX()
{
ifstream af;
ifstream bf;
ifstream cf;
ifstream df;
af.open("a.data");
bf.open("b.data");
cf.open("c.data");
df.open("d.data");
vector< vector<float> > X;
for (int i = 0; i < 100; i++)
{
char scrap;
int scrapN;
af >> scrapN;
bf >> scrapN;
cf >> scrapN;
df >> scrapN;
af >> scrap;
bf >> scrap;
cf >> scrap;
df >> scrap;
float a, b, c, d;
af >> a;
bf >> b;
cf >> c;
df >> d;
X.push_back(vector < float > {a, b, c, d});
}
af.close();
bf.close();
cf.close();
df.close();
return X;
}
MachineLearning.h:
#pragma once
#include<vector>
using namespace std;
namespace MachineLearning{
class perceptron
{
public:
perceptron(float eta,int epochs);
float netInput(vector<float> X);
int predict(vector<float> X);
void fit(vector< vector<float> > X, vector<float> y);
private:
float m_eta;
int m_epochs;
vector < float > m_w;
vector < float > m_errors;
};
}
MachineLearning.cpp
#include<vector>
#include <algorithm>
#include <iostream>
#include<fstream>
#include <math.h>
#include "MachineLearning.h"
using namespace std;
namespace MachineLearning{
perceptron::perceptron(float eta, int epochs)
{
m_epochs = epochs;
m_eta = eta;
}
void perceptron::fit(vector< vector<float> > X, vector<float> y)
{
for (int i = 0; i < X[0].size() + 1; i++) // X[0].size() + 1 -> I am using +1 to add the bias term
{
m_w.push_back(0);
}
for (int i = 0; i < m_epochs; i++)
{
int errors = 0;
for (int j = 0; j < X.size(); j++)
{
float update = m_eta * (y[j] - predict(X[j]));
m_w[0] = update;
for (int w = 1; w < m_w.size(); w++){ m_w[w] = update * X[j][w - 1]; }
errors += update != 0 ? 1 : 0;
}
m_errors.push_back(errors);
}
}
float perceptron::netInput(vector<float> X)
{
// Sum(Vector of weights * Input vector) + bias
float probabilities = m_w[0];
for (int i = 0; i < X.size(); i++)
{
probabilities += X[i] * m_w[i + 1];
}
return probabilities;
}
int perceptron::predict(vector<float> X)
{
return netInput(X) > 0 ? 1 : -1; //Step Function
}
}
Any kind of help is much appreciated.
Thanks in advance.
Panos P.
After hours of tedious debugging I finally found the mistake. There was a bug in my code when I updated the weights.
for (int j = 0; j < X.size(); j++)
{
float update = m_eta * (y[j] - predict(X[j]));
m_w[0] = update;
for (int w = 1; w < m_w.size(); w++){ m_w[w] = update * X[j][w - 1]; }
errors += update != 0 ? 1 : 0;
}
notice that:
m_w[w] = update * X[j][w - 1]
I am setting the weights as equal to the update. It looks like I forgot a "+" sign. Now it works fine.
Here's it is now:
m_w[w] += update * X[j][w - 1]
Sometimes the silliest mistakes can cause the most annoying of errors.
I hope that this might help anyone making the same mistake.