I have some problems with my program, it currently gives the wrong results for finding a meeting point.
I choose to use geometric median algorithm for searching for a meeting point, as described here .
Also I have implemented a brute-force algorithm, just to compare the results.
Source code were EDIT to possible solution, correct me, it's not working sometimes for > 100000 points:
#include <vector>
#include <random>
#include <cstdlib>
#include <algorithm>
#include <iostream>
#include <cmath>
using namespace std;
long double ComputeMean(vector<long long> InputData) {
long double rtn = 0;
for (unsigned int i = 0; i < InputData.size(); i++) {
rtn += InputData[i];
}
if(rtn == 0) return rtn;
return rtn/InputData.size();
}
long double CallRecursiveAverage(long double m0, vector<long long> X) {
long double m1 =0 ;
long double numerator = 0, denominator = 0;
for (unsigned int i = 0; i < X.size(); i++) {
long double temp =abs((X[i] - m0));
if(X[i]!=0 && temp!=0) {
numerator += X[i] / temp;
}
if(temp!=0) {
denominator += 1 / temp;
}
}
if( denominator != 0 ) {
m1 = numerator / denominator;
}
return m1;
}
long double ComputeReWeightedAverage(vector<long long> InputVector) {
long double m0 = ComputeMean(InputVector);
long double m1 = CallRecursiveAverage(m0, InputVector);
while (abs(m1 - m0) > 1e-6) {
m0 = m1;
m1 = CallRecursiveAverage(m0, InputVector);
}
return m1;
}
int randomizer(){
int n =(rand() % 1000000 + 1)*(-1 + ((rand() & 1) << 1));
return(n);
}
struct points
{
long double ch;
long long remp;
bool operator<(const points& a) const
{
return ch < a.ch;
}
};
int main () {
long double houses=10;
// rand() % 100 + 1;
// cin >> houses;
vector <long long> x;
vector <long long> y;
vector <long long> xr;
vector <long long> yr;
vector <long long> sums;
vector <long long> remp;
long long x0, y0;
long double path = 1e9;
long double sumy = 0;
long double sumx = 0;
long double avgx = 1;
long double avgy = 1;
srand((unsigned)time(NULL));
int rnd;
for(int i = 0; i < houses; i++) {
// cin>>x0>>y0;
x0 = randomizer();
x.push_back(x0);
sumx += x0;
y0 = randomizer();
y.push_back(y0);
sumy += y0;
}
if(sumx!=0) {
avgx=ComputeReWeightedAverage(x);
} else {
avgx=0;
}
if(sumy!=0) {
avgy=ComputeReWeightedAverage(y);
} else {
avgy=0;
}
long double check=1e9;
long double pathr=0;
int rx, ry;
long double wpath=1e9;
///brute force////
for(int j = 0; j < houses; j++) {
pathr = 0;
for(int i = 0; i < houses; i++) {
pathr += max(abs(x[i] - x[j]), abs(y[i] - y[j]));
}
if(pathr<wpath)
{
wpath = pathr;
ry=j;
}
}
cout << "\nx ="<<x[ry]<<"\n";
cout << "y ="<<y[ry]<<"\n";
cout << "bruteForce path ="<<wpath<<"\n\n";
////end brute force///
cout << "avgx ="<<avgx<<"\n";
cout << "avgy ="<<avgy<<"\n";
vector<points> ch;
for(int j = 0; j < houses; j++) {
remp.push_back(j);
points tb;
tb.ch=max(abs(x[j] - (avgx)), abs(y[j] - (avgy)));
tb.remp=j;
ch.push_back(tb) ;
}
sort(ch.begin(),ch.end());
path =1e9;
for(unsigned int z = 0; z < 10; z++) {
pathr = 0;
for(int i = 0; i < houses; i++) {
pathr += max(abs(x[i] - x[ch[z].remp]), abs(y[i] - y[ch[z].remp]));
}
if(pathr<path)
{
path = pathr;
}
}
cout << "x ="<<x[remp[0]]<<"\n";
cout << "y ="<<y[remp[0]]<<"\n";
cout << "Weizsfield path ="<<path<<"\n\n";
if (wpath!=path){ cout <<"ERRROR"<<"\n";
cout << "dots\n";
for(int i = 0; i < houses; i++) {
cout << x[i]<<" "<<y[i]<<"\n";
}
cout << "dots\n\n";
}
return 0;
}
Where did I make a mistake in my program? Any help will be appreciated.
EDIT
Is changing search radius of nearest points to geometric median and checking path for all of them the best approach? If answer is yes, how do I find the optimal start radius?
The Weiszfeld algorithm is one that approximates the geometric median and will therefore very often deviate from the real one computed by brute force.
Increasing the search radius will probably help.
Related
I am trying to solve the generalized eigenvalue problem for the hydrogen atom by using LAPACKE_dsygvd. For the parameters of the generator functions, I use an interval that starts at 0.01 and takes N steps of 0.01. What I change is the value of N. Everythings fine for N = 14 and below, where I get the answers from the analytical solution. However, when I choose N = 15 and above, I get an error and info is returned with a value > N. After reading the documentation from LAPACK, it says the following:
N: if INFO = N + i, for 1 <= i <= N, then the leading
minor of order i of B is not positive definite.
The factorization of B could not be completed and
no eigenvalues or eigenvectors were computed.
But I have checked my matrix B and it is positive definite. I don't know what is wrong.
Below I show my scripts
#include <cmath>
#include <cstdio>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include "library.h"
#include "mkl.h"
using namespace std;
double Superposition(const double ai, const double aj, const int m);
double Hamiltonian(const double ai, const double aj, const int m);
void print_matrix(double *A, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
printf("%.7f ", A[i*n + j]);
}
cout << "\n";
}
}
void print_vector(double *vec, int n) {
for (int i = 0; i < n; i++) {
cout << vec[i] << " ";
}
cout << "\n";
}
double* interval(double min, double step) {
double *result;
result = (double *)mkl_malloc( N*sizeof( double ), 64 );
for (int i = 0; i < N; i++) {
result[i] = min + i*step;
}
return result;
}
int main() {
cout << Ry << "\n";
double *S, *H, *I, *eigenvalues;
double alpha, beta;
int i, j, info;
char* uplo = "U"; char* jobz = "V";
I = interval(0.01, 0.01);
alpha = 1.0; beta = 0.0;
S = (double *)mkl_malloc( N*N*sizeof( double ), 64 );
H = (double *)mkl_malloc( N*N*sizeof( double ), 64 );
eigenvalues = (double *)mkl_malloc( N*sizeof( double ), 64 );
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
int index = i*N + j;
if (j < i) {
S[index] = 0.0;
H[index] = 0.0;
}
else {
S[index] = Superposition(I[i], I[j], m);
H[index] = Hamiltonian(I[i], I[j], m);
}
}
}
print_matrix(S, N); cout << "\n";
print_matrix(H, N); cout << "\n" << "\n";
info = LAPACKE_dsygv(LAPACK_ROW_MAJOR, 1, *jobz, *uplo, N,
H, N, S, N, eigenvalues);
//print_matrix(H, N); cout << "\n";
//for (i = 0; i < N; i++) {
// eigenvalues[i] /= Ry;
//}
cout << info << "\n" << "\n";
print_matrix(H, N); cout << "\n";
print_vector(eigenvalues, N);
mkl_free(S);
mkl_free(H);
mkl_free(I);
mkl_free(eigenvalues);
}
*Edit: I used dsygvd as included in MKL, and the same error doesn't occur. However, I get very different results for both functions using the same inputs.
In the following, you see my c++ code that it solves 1-d Schrödinger equation with Numerov's method. But I have two problems:
When I increase the size of the box, the wave function for the ground state will be zero, the problem is in Normalization and I do not know why.
It is too slow, I need to increase box sufficiently large, any idea about how to speed up would be great.
This is my code:
#include <iostream>
#include <math.h>
#include <fstream>
#include <string>
#include <iomanip>
#include <sstream>
using namespace std;
#define pi 3.1415926535897932384626433
class ID_TISE{
public:
ID_TISE(int N_graid, int x_min, int x_max, double dx, double e_tr, double de, double e_up, double m, double h_bar, double c);
~ID_TISE();
//functions
double Potential(char, int);
double PotentialIon(char, int);
void Setk2(double *k2, double E, int N, char lr);
void Numerov(double *psi, double *k2, int N);
double diff_slope(double e_tr,int N_L, int N_R);;
string string_precision(int, int);
double x(char lr, int index);
void Numerov_Solver(double*, double*);
void id_tise();
int N_graid ;
double x_min;
double x_max;
double dx;
//set trial energy
double e_tr;
double de;
double e_up;
int N_E = 10000;
//constant
double m ;
double h_bar;
double c; // second point of the wavefunctions
//array
double *psi_l = NULL;
double *psi_r = NULL;
double *k2_l = NULL;
double *k2_r = NULL;
};
ID_TISE::ID_TISE(int N_graid, int x_min, int x_max, double dx, double e_tr, double de, double e_up, double m, double h_bar, double c){
this->N_graid = N_graid;
this->x_min = x_min;
this->x_max = x_max;
this->dx = dx;
this->e_tr =e_tr;
this->de = de;
this->e_up = e_up;
this->m = m;
this->h_bar=h_bar;
this->c=c;
psi_l = new double[N_graid];
psi_r = new double[N_graid];
k2_l = new double[N_graid];
k2_r = new double[N_graid];
}
ID_TISE::~ID_TISE(){
delete[] psi_l;
delete[] psi_r;
delete[] k2_l;
delete[] k2_r;
}
void ID_TISE::id_tise(){
ofstream fout;
int im = int(N_graid/2);
int N_R = N_graid - im + 2;
int N_L = im + 2;
//initialization left solution
psi_l[0] = 0.0;
psi_l[1] = c;
//initialization right solution
psi_r[0] = 0.0;
psi_r[1] = c;
int ii = 0, inode = 0, inodeUp = 1;
while(e_tr < e_up){
double ds1 = diff_slope(e_tr,N_L,N_R);
e_tr += de;
double ds2 = diff_slope(e_tr,N_L,N_R);
if(ds1*ds2 < 0.0){
if(e_tr < -9.0e-7 || e_tr > 9.0e-7){ // to prevent reaching to E = 0
cout <<"E_"<<ii++ <<" = "<< e_tr << endl;
Normalizer(psi_r,im);
Normalizer(psi_l,im);
fout.open("psi_"+string_precision(ii,0)+ ".dat");
if(e_tr < 0){
if(inode%2 != 0.){
for(int i = 0; i < im; i++)
fout << x('l',i) << "\t" << -psi_l[i] << endl;
}
else{
for(int i = 0; i < im; i++)
fout << x('l',i) << "\t" << psi_l[i] << endl;
}
for(int i = im-1; i > -1; i--)
fout << x('r',i) << "\t" << psi_r[i] << endl;
inode++;
}
else{
if(inodeUp%2 == 0.){
for(int i = 0; i < im; i++)
fout << x('l',i) << "\t" << -psi_l[i] << endl;
}
else{
for(int i = 0; i < im; i++)
fout << x('l',i) << "\t" << psi_l[i] << endl;
}
for(int i = im-1; i > -1; i--)
fout << x('r',i) << "\t" << psi_r[i] << endl;
inodeUp++;
}
fout.close();
}
}
else continue;
}
}
void ID_TISE::Normalizer(double *psi,int N){
double sum = 0.;
for(int i=0 ; i<N ; i++)
sum += psi[i]*psi[i]*dx;
sum=sqrt(sum);
for(int i=0 ; i<N ; i++){
psi[i]=psi[i]/sum;
}
}
void ID_TISE::Numerov(double *psi, double *k2 , int N){
double h12 = (dx*dx)/12.;
for(int i = 1; i < N; i++){
psi[i+1] = (2.*(1.-5.*h12*k2[i])*psi[i] - (1. + h12*k2[i-1])*psi[i-1])/(1. + h12*k2[i+1]);
}
}
void ID_TISE::Setk2(double *k2, double E, int N, char lr){
for(int i = 0; i < N; i++){
if(E > 0)
k2[i] = (2.*m/(h_bar*h_bar))*(E - PotentialIon(lr,i));
else
k2[i] = (2.*m/(h_bar*h_bar))*(E - Potential(lr,i));
}
}
double ID_TISE::Potential(char lr, int index){
return -1./sqrt(x(lr, index)*x(lr, index)+2.);
}
double ID_TISE::PotentialIon(char lr, int index){
return 0.;
}
double ID_TISE::x(char lr, int index){
if(lr == 'l')
return x_min + (index)*dx; // dx = h
else
return x_max - (index)*dx; // the right solution
}
double ID_TISE::diff_slope(double e_tr, int N_L, int N_R){
double dslope;
//initialization left solution
psi_l[0] = 0.0;
psi_l[1] = c;
//initialization right solution
psi_r[0] = 0.0;
psi_r[1] = c;
Setk2(k2_l, e_tr, N_L, 'l');
Setk2(k2_r, e_tr, N_R, 'r');
Numerov(psi_l, k2_l, N_L);
Numerov(psi_r, k2_r, N_R);
double y_m = (psi_l[N_L-1]+psi_r[N_R-1])/2.;
dslope = (2.*y_m - psi_l[N_L - 3] - psi_r[N_R - 3])/(dx*psi_r[N_R - 2]);
return dslope;
}
string ID_TISE::string_precision(int value, int n_digits){
if(n_digits < 6) n_digits = 6;
ostringstream out;
out << fixed<<setprecision(n_digits) << value;
return out.str();
}
int main(){
int N_graid = 500000;
double x_min = -500;
double x_max = -x_min;
double dx = (x_max - x_min)/(N_graid-1);
//set trial energy
double e_tr = -0.6;
double de = 0.000001;
double e_up = 1.0;
//constant
double m = 1.;
double h_bar = 1.;
double c = 0.00000001; // second point of the wavefunctions
ID_TISE &id_tise = *(new ID_TISE(N_graid,x_min,x_max,dx,e_tr,de,e_up,m,h_bar,c));
id_tise.id_tise();
}
I am using following code to run kmeans algorithm on Iris flower dataset- https://github.com/marcoscastro/kmeans/blob/master/kmeans.cpp
I have modified the above code to read input from files. Below is my code -
#include <iostream>
#include <vector>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <algorithm>
#include <fstream>
using namespace std;
class Point
{
private:
int id_point, id_cluster;
vector<double> values;
int total_values;
string name;
public:
Point(int id_point, vector<double>& values, string name = "")
{
this->id_point = id_point;
total_values = values.size();
for(int i = 0; i < total_values; i++)
this->values.push_back(values[i]);
this->name = name;
this->id_cluster = -1;
}
int getID()
{
return id_point;
}
void setCluster(int id_cluster)
{
this->id_cluster = id_cluster;
}
int getCluster()
{
return id_cluster;
}
double getValue(int index)
{
return values[index];
}
int getTotalValues()
{
return total_values;
}
void addValue(double value)
{
values.push_back(value);
}
string getName()
{
return name;
}
};
class Cluster
{
private:
int id_cluster;
vector<double> central_values;
vector<Point> points;
public:
Cluster(int id_cluster, Point point)
{
this->id_cluster = id_cluster;
int total_values = point.getTotalValues();
for(int i = 0; i < total_values; i++)
central_values.push_back(point.getValue(i));
points.push_back(point);
}
void addPoint(Point point)
{
points.push_back(point);
}
bool removePoint(int id_point)
{
int total_points = points.size();
for(int i = 0; i < total_points; i++)
{
if(points[i].getID() == id_point)
{
points.erase(points.begin() + i);
return true;
}
}
return false;
}
double getCentralValue(int index)
{
return central_values[index];
}
void setCentralValue(int index, double value)
{
central_values[index] = value;
}
Point getPoint(int index)
{
return points[index];
}
int getTotalPoints()
{
return points.size();
}
int getID()
{
return id_cluster;
}
};
class KMeans
{
private:
int K; // number of clusters
int total_values, total_points, max_iterations;
vector<Cluster> clusters;
// return ID of nearest center (uses euclidean distance)
int getIDNearestCenter(Point point)
{
double sum = 0.0, min_dist;
int id_cluster_center = 0;
for(int i = 0; i < total_values; i++)
{
sum += pow(clusters[0].getCentralValue(i) -
point.getValue(i), 2.0);
}
min_dist = sqrt(sum);
for(int i = 1; i < K; i++)
{
double dist;
sum = 0.0;
for(int j = 0; j < total_values; j++)
{
sum += pow(clusters[i].getCentralValue(j) -
point.getValue(j), 2.0);
}
dist = sqrt(sum);
if(dist < min_dist)
{
min_dist = dist;
id_cluster_center = i;
}
}
return id_cluster_center;
}
public:
KMeans(int K, int total_points, int total_values, int max_iterations)
{
this->K = K;
this->total_points = total_points;
this->total_values = total_values;
this->max_iterations = max_iterations;
}
void run(vector<Point> & points)
{
if(K > total_points)
return;
vector<int> prohibited_indexes;
printf("Inside run \n");
// choose K distinct values for the centers of the clusters
printf(" K distinct cluster\n");
for(int i = 0; i < K; i++)
{
while(true)
{
int index_point = rand() % total_points;
if(find(prohibited_indexes.begin(), prohibited_indexes.end(),
index_point) == prohibited_indexes.end())
{
printf("i= %d\n",i);
prohibited_indexes.push_back(index_point);
points[index_point].setCluster(i);
Cluster cluster(i, points[index_point]);
clusters.push_back(cluster);
break;
}
}
}
int iter = 1;
printf(" Each point to nearest cluster\n");
while(true)
{
bool done = true;
// associates each point to the nearest center
for(int i = 0; i < total_points; i++)
{
int id_old_cluster = points[i].getCluster();
int id_nearest_center = getIDNearestCenter(points[i]);
if(id_old_cluster != id_nearest_center)
{
if(id_old_cluster != -1)
clusters[id_old_cluster].removePoint(points[i].getID());
points[i].setCluster(id_nearest_center);
clusters[id_nearest_center].addPoint(points[i]);
done = false;
}
}
// recalculating the center of each cluster
for(int i = 0; i < K; i++)
{
for(int j = 0; j < total_values; j++)
{
int total_points_cluster = clusters[i].getTotalPoints();
double sum = 0.0;
if(total_points_cluster > 0)
{
for(int p = 0; p < total_points_cluster; p++)
sum += clusters[i].getPoint(p).getValue(j);
clusters[i].setCentralValue(j, sum / total_points_cluster);
}
}
}
if(done == true || iter >= max_iterations)
{
cout << "Break in iteration " << iter << "\n\n";
break;
}
iter++;
}
// shows elements of clusters
for(int i = 0; i < K; i++)
{
int total_points_cluster = clusters[i].getTotalPoints();
cout << "Cluster " << clusters[i].getID() + 1 << endl;
for(int j = 0; j < total_points_cluster; j++)
{
cout << "Point " << clusters[i].getPoint(j).getID() + 1 << ": ";
for(int p = 0; p < total_values; p++)
cout << clusters[i].getPoint(j).getValue(p) << " ";
string point_name = clusters[i].getPoint(j).getName();
if(point_name != "")
cout << "- " << point_name;
cout << endl;
}
cout << "Cluster values: ";
for(int j = 0; j < total_values; j++)
cout << clusters[i].getCentralValue(j) << " ";
cout << "\n\n";
}
}
};
int main(int argc, char *argv[])
{
srand(time(NULL));
int total_points, total_values, K, max_iterations, has_name;
ifstream inFile("datafile.txt");
if (!inFile) {
cerr << "Unable to open file datafile.txt";
exit(1); // call system to stop
}
inFile >> total_points >> total_values >> K >> max_iterations >> has_name;
cout << "Details- \n";
vector<Point> points;
string point_name,str;
int i=0;
while(inFile.eof())
{
string temp;
vector<double> values;
for(int j = 0; j < total_values; j++)
{
double value;
inFile >> value;
values.push_back(value);
}
if(has_name)
{
inFile >> point_name;
Point p(i, values, point_name);
points.push_back(p);
i++;
}
else
{
inFile >> temp;
Point p(i, values);
points.push_back(p);
i++;
}
}
inFile.close();
KMeans kmeans(K, total_points, total_values, max_iterations);
kmeans.run(points);
return 0;
}
Output of code is -
Details-
15043100000Inside run
K distinct cluster i= 0
Segmentation fault
When I run it in gdb, the error shown is -
Program received signal SIGSEGV, Segmentation fault.
0x0000000000401db6 in Point::setCluster (this=0x540, id_cluster=0)
at kmeans.cpp:41
41 this->id_cluster = id_cluster;
I am stuck at this as I cannot find the cause for this segmentation fault.
My dataset file looks like -
150 4 3 10000 1
5.1,3.5,1.4,0.2,Iris-setosa
4.9,3.0,1.4,0.2,Iris-setosa
4.7,3.2,1.3,0.2,Iris-setosa
. . .
7.0,3.2,4.7,1.4,Iris-versicolor
6.4,3.2,4.5,1.5,Iris-versicolor
6.9,3.1,4.9,1.5,Iris-versicolor
5.5,2.3,4.0,1.3,Iris-versicolor
6.5,2.8,4.6,1.5,Iris-versicolor
. . .
in KMeans::run(vector<Point>&) you call points[index_point].setCluster(i); without any guarantee that index_point is within bounds.
index_point is determined by int index_point = rand() % total_points;, and total_points is retrieved from the input file "datafile.txt" which could be anything. It certainly does not have to match points.size(), but it should. Make sure it does, or just use points.size() instead.
A bit offtopic, but using rand() and only using modulo is almost always wrong. If you use C++11 or newer, please consider using std::uniform_int_distribution.
points[index_point].setCluster(i); could be accessing the vector out of bounds. The code you quoted actually always sets a number of total_points in the vector points before calling run, while your modified code just reads until end of file and has no guarantees that the number of total points passed to the constructor of KMeans matches the value of entries in points. Either fix your file I/O or fix the logic of bounds checking.
Background Information:
Here is an outline of the algorithm known as forward Monte Carlo for pricing American options which is from the paper, "A Forward Monte Carlo Method for American Options Pricing" by Daniel Wei-Chung Miao and Yung-Hsin Lee.
Question:
My program works correctly when the time steps N = 100 or anything less and when M = 100 or anything less. But when I increase N or M to say 1000 then my program breaks and does not run and I am not sure why.
Here is my code:
#include <iostream>
#include <cmath>
#include <math.h>
#include <limits>
#include <algorithm>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <random>
#include <vector>
#include <time.h>
using namespace Eigen;
using namespace std;
void FM(double T, double r, double q, double sigma, double S0, double K, int M, int N);
MatrixXd generateGaussianNoise(int M, int N); // Generates Normally distributed random numbers
double Black_Scholes(double T, double K, double S0, double r, double sigma);
double phi(long double x);
VectorXd time_vector(double min, double max, int N);
MatrixXd call_payoff(MatrixXd S, double K);
int main(){
double r = 0.03; // Riskless interest rate
double q = 0.0; // Divident yield
double sigma = 0.15; // Volatility of stock
int T = 1; // Time (expiry)
int N = 100; // Number of time steps
double K = 100; // Strike price
double S0 = 102; // Initial stock price
int M = 1000; // Number of paths // Current issue
FM(T,r,q,sigma,S0,K,M,N);
return 0;
}
MatrixXd generateGaussianNoise(int M, int N){
MatrixXd Z(N,M);
random_device rd;
mt19937 e2(time(0));
normal_distribution<double> dist(0.0, 1.0);
for(int i = 0; i < M; i++){
for(int j = 0; j < N; j++){
Z(i,j) = dist(e2);
}
}
return Z;
}
double phi(double x){
return 0.5 * erfc(-x * M_SQRT1_2);
}
double Black_Scholes(double T, double K, double S0, double r, double sigma){
double d_1 = (log(S0/K) + (r+sigma*sigma/2.)*(T))/(sigma*sqrt(T));
double d_2 = (log(S0/K) + (r-sigma*sigma/2.)*(T))/(sigma*sqrt(T));
double C = S0*phi(d_1) - phi(d_2)*K*exp(-r*T);
return C;
}
VectorXd time_vector(double min, double max, int N){
VectorXd m(N + 1);
double delta = (max-min)/N;
for(int i = 0; i <= N; i++){
m(i) = min + i*delta;
}
return m;
}
MatrixXd call_payoff(MatrixXd S, double K){
MatrixXd result(S.rows(),S.cols());
for(int i = 0; i < S.rows(); i++){
for(int j = 0; j < S.cols(); j++){
if(S(i,j) - K > 0){
result(i,j) = S(i,j) - K;
}else{
result(i,j) = 0.0;
}
}
}
return result;
}
void FM(double T, double r, double q, double sigma, double S0, double K, int M, int N){
MatrixXd Z = generateGaussianNoise(M,N);
double dt = T/N;
VectorXd t = time_vector(0.0,T,N);
// Generate M paths of stock prices
MatrixXd S(M,N+1);
for(int i = 0; i < M; i++){
S(i,0) = S0;
for(int j = 1; j <= N; j++){
S(i,j) = S(i,j-1)*exp((double) (r - q - pow(sigma,2.0))*dt + sigma*sqrt(dt)*(double)Z(i,j-1));
}
}
//
// If path i is "alive" at time index j - 1 < N, generate the price for time index j, denoted as S = S_ij
// Case for call option:
// If j = N, the option is expired with value V = exp(-rT)(S-K)^+ and path i is finished
// If j < N, calculate S_c = f_C(S)
// If S > S_c, the option is exercised with value V_i = exp(-rT)(S-K)^+ and path i is stopped. Otherwise,
// the option is held and path continues to live to the next step j+1
//
// Case for put option:
// If j = N, the option is expired with value V = exp(-rT)(K-S)^+ and path i is finished
// If j < N, calculate S_p = f_p(S)
// if S < S_p, the option is exercised with value V_i and path i is stopped. Otherwise,
// the option is held and path continues to live to the next step j+1.
// Compute S_c parameters and S_p
double m = 2*r/(pow(sigma,2.0));
double n = 2*(r-q)/(pow(sigma,2.0));
VectorXd k(t.size());
for(int i = 0; i < k.size(); i++){
k(i) = 1.0 - exp((double) -r*(double)(T - t(i))); // Note the t vector (not sure if this is correct)
}
VectorXd Q_2(k.size());
VectorXd Q_1(k.size());
for(int i = 0; i < Q_2.size(); i++){
Q_1(i) = (-1*(n-1) + sqrt((double)(n-1)*(n-1) + (double)4*m/(double)(k(i))))/2.0; // Q_1 < 0
Q_2(i) = (-1*(n-1) + sqrt((double)(n-1)*(n-1) + (double)4*m/(double)(k(i))))/2.0; // Q_2 > 0
}
double d_1 = (log(S0/K) + (r+sigma*sigma/2.)*(T))/(sigma*sqrt(T));
double C_e = Black_Scholes(T, K, S0, r, sigma); // C_e(S) is the European call option price calculated by Black-Scholes
double Delta = exp(-q*T)*phi(d_1);
MatrixXd V(M,N+1);
VectorXd S_c(Q_2.size());
MatrixXd call_fun = call_payoff(S,K);
for(int j = 0; j < N + 1; j++){
for(int i = 0; i < M; i++){
if(j == N){
V(i,j) = exp(-r*T)*call_fun(i,j); //////////////
//cout << "The option is expired with value " << V(i) << " and path " << i << " is finished" << endl;
}
else if(j < N){
S_c(j) = Q_2(j)*(C_e + K)/(Q_2(j) - (1 - Delta));
}
else if (S(i,j) > S_c(j)){
V(i,j) = exp(-r*T)*call_fun(i,j); ///////////////
//cout << "The option is expired with value " << V(i) << " and path " << i << " is finished" << endl;
}
}
}
double Value = 0.0;
for(int i = 0; i < V.rows(); i++){
for(int j = 0; j < V.cols(); j++){
Value += V(i,j);
}
}
Value = 1.0/M * Value;
cout << C_e << endl;
cout << endl;
cout << Value << endl;
}
I am pretty new with C++ so I am not sure how to debug my program when this sort of problem arises. This has happened to me to another algorithm I wrote but when I restarted my computer then it worked fine. Any suggestions are greatly appreciated.
From what the user Incomputable asked I believe this is crashing because of low memory, here are the specifications of my computer:
Update:
Taking the advice from user Daniel Jour, I changed the FM function to void. Following the same sample where I set M = 1000 and leave N = 100 then I get this crash message:
This application has requested the Runtime to terminate it in an unusual way.
Please contact the application's support team for more information.
Assertion failed!
Program: C:\Users\Morgan Weiss\workspace\Forward_Monte_Carlo\Debug\Forward_Monte_Carlo.exe
File: c:\mingw\include\c++\6.2.0\eigen\src/Core/DenseCoeffsBase.h, Line 365
Expression: row >= 0 && row < rows() && col >= 0 && col < cols()
Update 2:
I set N = 1000 and M = 1000 and it ran just fine with no issue, so I am not sure why if I set N not equal to M the program will crash... Any ideas?
Looking onto your code the following seems to be quite strange:
MatrixXd call_payoff(MatrixXd S, double K){
MatrixXd result(S.rows(),S.cols()); <-- result size is exact the same as input size
....
}
Then:
VectorXd S_c(Q_2.size()); <---- Vector (one of the dimensions is 1)
MatrixXd call_fun = call_payoff(S,K); <--- Matrix 1xN (or Nx1, I am not sure)
And then:
for(int j = 0; j < N + 1; j++){
for(int i = 0; i < M; i++){
...
V(i,j) = exp(-r*T)*call_fun(i,j); <---- i and j may be significantly bigger than 1
...
}
}
So I was writing a simple Perceptron model and when I finished the code and saw that there were no errors I was pretty surprised. But it seems like my model doesn't converge (along with some other oddities).
Basically it keeps getting 25/100 samples right at every epoch. And when every epoch ends the weights are always coming back to 0.
Due to the fact that the code is on multiple files I put it on Google Drive here it is:
https://drive.google.com/folderview?id=0B_r3mf9HbUrLaDNlc1F6RXhNMnM&usp=sharing
It is a Visual Studio Community 2013 project. You can open and run it so that you get a better idea.
Here's a quick preview of the files though.
main.cpp:
#include <iostream>
#include <vector>
#include <algorithm>
#include <fstream>
#include <string>
#include <math.h>
#include "LinearAlgebra.h"
#include "MachineLearning.h"
using namespace std;
using namespace LinearAlgebra;
using namespace MachineLearning;
void printVector(vector< vector<float> > X);
vector< vector<float> > getIrisX();
vector<float> getIrisy();
int main()
{
vector< vector<float> > X = getIrisX();
vector<float> y = getIrisy();
vector<float> test1;
test1.push_back(5.0);
test1.push_back(3.3);
test1.push_back(1.4);
test1.push_back(0.2);
vector<float> test2;
test2.push_back(6.0);
test2.push_back(2.2);
test2.push_back(5.0);
test2.push_back(1.5);
//printVector(X);
//for (int i = 0; i < y.size(); i++){ cout << y[i] << " "; }cout << endl;
perceptron clf(0.1, 10);
clf.fit(X, y);
cout << "Now Predicting: 5.0,3.3,1.4,0.2(CorrectClass=1,Iris-setosa) -> " << clf.predict(test1) << endl;
cout << "Now Predicting: 6.0,2.2,5.0,1.5(CorrectClass=-1,Iris-virginica) -> " << clf.predict(test2) << endl;
system("PAUSE");
return 0;
}
void printVector(vector< vector<float> > X)
{
for (int i = 0; i < X.size(); i++)
{
for (int j = 0; j < X[i].size(); j++)
{
cout << X[i][j] << " ";
}
cout << endl;
}
}
vector<float> getIrisy()
{
vector<float> y;
ifstream inFile;
inFile.open("y.data");
string sampleClass;
for (int i = 0; i < 100; i++)
{
inFile >> sampleClass;
if (sampleClass == "Iris-setosa")
{
y.push_back(1);
}
else
{
y.push_back(-1);
}
}
return y;
}
vector< vector<float> > getIrisX()
{
ifstream af;
ifstream bf;
ifstream cf;
ifstream df;
af.open("a.data");
bf.open("b.data");
cf.open("c.data");
df.open("d.data");
vector< vector<float> > X;
for (int i = 0; i < 100; i++)
{
char scrap;
int scrapN;
af >> scrapN;
bf >> scrapN;
cf >> scrapN;
df >> scrapN;
af >> scrap;
bf >> scrap;
cf >> scrap;
df >> scrap;
float a, b, c, d;
af >> a;
bf >> b;
cf >> c;
df >> d;
X.push_back(vector < float > {a, b, c, d});
}
af.close();
bf.close();
cf.close();
df.close();
return X;
}
MachineLearning.h:
#pragma once
#include<vector>
using namespace std;
namespace MachineLearning{
class perceptron
{
public:
perceptron(float eta,int epochs);
float netInput(vector<float> X);
int predict(vector<float> X);
void fit(vector< vector<float> > X, vector<float> y);
private:
float m_eta;
int m_epochs;
vector < float > m_w;
vector < float > m_errors;
};
}
MachineLearning.cpp
#include<vector>
#include <algorithm>
#include <iostream>
#include<fstream>
#include <math.h>
#include "MachineLearning.h"
using namespace std;
namespace MachineLearning{
perceptron::perceptron(float eta, int epochs)
{
m_epochs = epochs;
m_eta = eta;
}
void perceptron::fit(vector< vector<float> > X, vector<float> y)
{
for (int i = 0; i < X[0].size() + 1; i++) // X[0].size() + 1 -> I am using +1 to add the bias term
{
m_w.push_back(0);
}
for (int i = 0; i < m_epochs; i++)
{
int errors = 0;
for (int j = 0; j < X.size(); j++)
{
float update = m_eta * (y[j] - predict(X[j]));
m_w[0] = update;
for (int w = 1; w < m_w.size(); w++){ m_w[w] = update * X[j][w - 1]; }
errors += update != 0 ? 1 : 0;
}
m_errors.push_back(errors);
}
}
float perceptron::netInput(vector<float> X)
{
// Sum(Vector of weights * Input vector) + bias
float probabilities = m_w[0];
for (int i = 0; i < X.size(); i++)
{
probabilities += X[i] * m_w[i + 1];
}
return probabilities;
}
int perceptron::predict(vector<float> X)
{
return netInput(X) > 0 ? 1 : -1; //Step Function
}
}
Any kind of help is much appreciated.
Thanks in advance.
Panos P.
After hours of tedious debugging I finally found the mistake. There was a bug in my code when I updated the weights.
for (int j = 0; j < X.size(); j++)
{
float update = m_eta * (y[j] - predict(X[j]));
m_w[0] = update;
for (int w = 1; w < m_w.size(); w++){ m_w[w] = update * X[j][w - 1]; }
errors += update != 0 ? 1 : 0;
}
notice that:
m_w[w] = update * X[j][w - 1]
I am setting the weights as equal to the update. It looks like I forgot a "+" sign. Now it works fine.
Here's it is now:
m_w[w] += update * X[j][w - 1]
Sometimes the silliest mistakes can cause the most annoying of errors.
I hope that this might help anyone making the same mistake.