SURF comparison code giving issues - computer-vision

I am doing SURF comparison to identify objects in images by calculating euclidean distances between the desriptors. but the following code isnt working. IPoint is a SURF feature point, Any help apreciated.
List<IPoint> ipts = new List<IPoint>();
Dictionary<string, List<IPoint>> objs = new Dictionary<string, List<IPoint>>();
double distance(IPoint a, IPoint b)
{
double dis = 0;
for (int i = 0; i < 64; i++)
{
dis += Math.Sqrt(Math.Pow((a.descriptor[i] - b.descriptor[i]), 2));
}
return (dis);
}
bool matchpoint(IPoint a, List<IPoint> l, out string e)
{
e = "";
double smallest = double.MaxValue;
string s = string.Empty;
for (int i = 0; i < l.Count; i++)
{
var d = distance(a, l[i]);
if (d < smallest)
{
smallest = d;
s = i.ToString();
}
}
if (smallest < 0.5)
{
e = s;
return true;
}
else
{
return false;// null;
}
return false;
}
string match(out double per)
{
string h;
Dictionary<string, double> torn = new Dictionary<string, double>();
foreach (string s in objs.Keys.ToList())
{
int count = 0;
for (int i = 0; i < objs[s].Count; i++)
{
if (matchpoint(objs[s][i], ipts,out h))
{
count++;
}
}
torn[s] = count / objs[s].Count;
count = 0;
}
string smalln = "";
double smallest = double.MaxValue;
foreach (string s in torn.Keys.ToList())
{
if (torn[s] < smallest)
{
smallest = torn[s];
smalln = s;
}
}
per = smallest;
return smalln;
}
private void button1_Click(object sender, EventArgs e)
{
double d;
match(out d);
MessageBox.Show(match(out d) + " " + d.ToString());
}

Should be:
double distance(IPoint a, IPoint b)
{
double dis = 0;
for (int i = 0; i < 64; i++)
{
dis += Math.Pow((a.descriptor[i] - b.descriptor[i]), 2);
}
return Math.Sqrt(dis);
}
You are squaring and then taking the root of each difference, which is basically doing an absolute value. Try to remember the simple example of Pythagoras: C=SQRT(A*A+B*B) and not C=SQRT(A*A)+SQRT(B*B)

Thanks for your help, also taking the ratio of the two distances works perfectly. I'm posting working code here because you cant find the answer to this question anywhere else.
void getMatches(List<IPoint> ipts1, List<IPoint> ipts2,out List<IPoint> mats)
{
List<IPoint> matches = new List<IPoint>();
float dist, d1, d2;
IPoint match;
matches.Clear();
for(int i = 0; i < ipts1.Count; i++)
{
d1 = d2 = float.MaxValue;
for(int j = 0; j < ipts2.Count; j++)
{
dist = (float)distance(ipts1[i], ipts2[j]);//ipts1[i] - ipts2[j];
if(dist<d1) // if this feature matches better than current best
{
d2 = d1;
d1 = dist;
match = ipts2[j];
}
else if(dist<d2) // this feature matches better than second best
{
d2 = dist;
}
}
// If match has a d1:d2 ratio < 0.65 ipoints are a match
if(d1/d2 < Convert.ToSingle(textBox2.Text))
{
matches.Add(ipts1[i]);
}
}
mats = matches;
}

Related

How can I shorten this code to avoid redundancy?

Here's my code snippet:
string trend()
{
double emaTrend0 = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,0);
double emaTrend1 = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,1);
double emaTrend2 = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,2);
double emaTrend3 = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,3);
double emaTrend4 = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,4);
double emaTrend5 = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,5);
double emaTrend6 = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,6);
double emaTrend7 = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,7);
double emaTrend8 = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,8);
double emaTrend9 = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,9);
string signal;
double HighCandle0 = High[0];
double HighCandle1 = High[1];
double HighCandle2 = High[2];
double HighCandle3 = High[3];
double HighCandle4 = High[4];
double HighCandle5 = High[5];
double HighCandle6 = High[6];
double HighCandle7 = High[7];
double HighCandle8 = High[8];
double HighCandle9 = High[9];
double LowCandle0 = Low[0];
double LowCandle1 = Low[1];
double LowCandle2 = Low[2];
double LowCandle3 = Low[3];
double LowCandle4 = Low[4];
double LowCandle5 = Low[5];
double LowCandle6 = Low[6];
double LowCandle7 = Low[7];
double LowCandle8 = Low[8];
double LowCandle9 = Low[9];
if (emaTrend0 > HighCandle0 && emaTrend1 > HighCandle1 && emaTrend2 > HighCandle2 &&emaTrend3 > HighCandle3 &&
emaTrend4 > HighCandle4 &&emaTrend5 > HighCandle5 &&emaTrend6 > HighCandle6 &&emaTrend7 > HighCandle7 &&
emaTrend8 > HighCandle8 && emaTrend9 > HighCandle9 )
signal = "downtrend";
else if (emaTrend0 < LowCandle0 && emaTrend1 < LowCandle1 && emaTrend2 < LowCandle2 &&emaTrend3 < LowCandle3 &&
emaTrend4 < LowCandle4 &&emaTrend5 < LowCandle5 &&emaTrend6 < LowCandle6 &&emaTrend7 < LowCandle7 &&
emaTrend8 < LowCandle8 && emaTrend9 < LowCandle9 )
signal = "uptrend";
return signal;
}
What I wanted to happen is to loop the variable and every condition must be true before producing the signal such as downtrend or uptrend.
The code below is my attempt, but it was still no good. My attempt produced a signal of downtrend or uptrend when one of the conditions is true, but what I need is for everything to be true before producing a signal.
string trend() {
string signal = "";
for (int i = 0; i<=9 ; i++){
double emaTrend = iMA (NULL,0,200,0,MODE_EMA,PRICE_CLOSE,i);
double highCandle = High[i];
double lowCandle = Low[i];
if (emaTrend > highCandle){
signal = "downtrend";
}
else if (emaTrend < lowCandle){
signal = "uptrend";
}
}
return signal;
}
All replies would be deeply appreciated. Thank you!
Here's a possible solution by counting the number of down and up trends and then compare those counts (not tested):
std::string trend() {
const std::size_t max_count = 10;
std::size_t count_downtrend = 0;
std::size_t count_uptrend = 0;
for (std::size_t i = 0; i != max_count; ++i) {
const double emaTrend = iMA (NULL, 0, 200, 0, MODE_EMA, PRICE_CLOSE, i);
const double highCandle = High[i];
const double lowCandle = Low[i];
if (emaTrend > highCandle) {
++count_downtrend;
} else if (emaTrend < lowCandle) {
++count_uptrend;
}
}
std::string signal;
if (count_downtrend == max_count) {
signal = "downtrend";
} else if (count_uptrend == max_count) {
signal = "uptrend";
}
return signal;
}
This is very similar to the regular "linear search loop", with an early break if the trend is broken:
enum Direction
{
Up, Down, None
};
Direction direction(double value, double low, double high)
{
return value < low ? Down : (value > high ? Up : None);
}
double emaClose(int i)
{
return iMA(NULL,0,200,0,MODE_EMA,PRICE_CLOSE,i);
}
string trend()
{
const Direction d = direction(emaClose(0), Low[0], High[0]);
if (d == None)
{
return "";
}
for (int i = 1; i < 10 ; i++)
{
if (direction(emaClose(i), Low[i], High[i]) != d)
{
return "";
}
}
return d == Up ? "uptrend" : "downtrend";
}

Fixing Neural Net vanishing gradients problem?

This is going to be a long one. I am still very new to coding, started 3 months ago so I know my code is not perfect, any criticism beyond the question is more than welcome. I have specifically avoided using pointers because I do not fully understand them, I can use them but I dont trust that I will use them correctly in a program like this.
First things first, I have a version of this where there is only 1 hidden layer and the net works perfectly. I have started running into problems since I tried to expand the number of hidden layers.
Some info on the net:
-I am using softmax output activation as I have 3 output neurons.
-I am using tanh as my activation function on the rest of the net.
-The file being read for the input has a format of
"input: 0.56 0.76 0.23 0.67"
"output: 0.0 0.0 1.0" (this is the target)
-The weights for connecting layer 1 neuron to layer 2 neuron are stored in layer 1 one neuron.
-The bias's for each neuron are stored in that neuron.
-The target is 1.0 0.0 0.0 if the sum of the input numbers is below one, 0.0 1.0 0.0 if sum is between 1 and 2, 0.0 0.0 1.0 if sum is above 2.
-using L1 regularization.
Those problems specifically being:
The softmax output values do not move from an relatively equalised range ie:
(position 1 and 2 in the target vector have a roughly 50/50 occurance rate while position 3 less than 3% occurance rate. so by relatively equalised I mean the softmax output generally looks something like
"0.56.... 0.48.... 0.02..." even after 500 epochs.
The weights at the hidden layer closer to inputlayer dont change much at all, which is what i think vanishing gradients are. I might be wrong on this. But the weights at hiddenlayer closest to output are ending up at between -50 & 50 (which i think is okay?)
Things I have tried:
I have tried using Relu, parametric Relu, exponential Relu, but with all of these the softmax output value for neuron 3 keeps rising, the other 2 neurons values keep falling. these values continue their trajectory until either 500 epochs have been reached or they just turn into nans. (I think this is to do with the structure of my code rather than the Relu function itself).
If I set the number of hidden layers above 3 while using relu, it immediately spits out nans, within the first epoch.
The backprop function is pretty long, but this is specifically because I have deconstructed it many times over to try and figure out where I might be mismatching values or something. I do have it in a condensed version but I feel I have a higher chance of being completely off the mark there than I do if I have it deconstructed.
I have included the Relu function code that I used, it is the first time I use it so I might be wrong on that aswell but I dont think so, I have double checked multiple times. The Relu in the code is specifically "Elu" or exponential relu.
here is the code for the net:
#include <iostream>
#include <fstream>
#include <cmath>
#include <vector>
#include <sstream>
#include <random>
#include <string>
#include <iomanip>
double randomt(double x, double y)
{
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_real_distribution<double> dist(x, y);
return dist(mt);
}
class InputN
{
public:
double val{};
std::vector <double> weights{};
};
class HiddenN
{
public:
double preactval{};
double actval{};
double actvalPD{};
double preactvalpd{};
std::vector <double> weights{};
double bias{};
};
class OutputN
{
public:
double preactval{};
double actval{};
double preactvalpd{};
double bias{};
};
class Net
{
public:
std::vector <InputN> inneurons{};
std::vector <std::vector <HiddenN>> hiddenneurons{};
std::vector <OutputN> outputneurons{};
double lambda{ 0.015 };
double alpha{ 0.02 };
};
double tanhderiv(double val)
{
return 1 - tanh(val) * tanh(val);
}
double Relu(double val)
{
if (val < 0) return 0.01 *(exp(val) - 1);
else return val;
}
double Reluderiv(double val)
{
if (val < 0) return Relu(val) + 0.01;
else return 1;
}
double regularizer(double weight)
{
double absval{};
if (weight < 0) absval = weight - weight - weight;
else if (weight > 0 || weight == 0) absval = weight;
else;
if (absval > 0) return 1;
else if (absval < 0) return -1;
else if (absval == 0) return 0;
else return 2;
}
void feedforward(Net& net)
{
double sum{};
int prevlayer{};
for (size_t Hsize = 0; Hsize < net.hiddenneurons.size(); Hsize++)
{
//std::cout << "in first loop" << '\n';
prevlayer = Hsize - 1;
for (size_t Hel = 0; Hel < net.hiddenneurons[Hsize].size(); Hel++)
{
//std::cout << "in second loop" << '\n';
if (Hsize == 0)
{
//std::cout << "in first if" << '\n';
for (size_t Isize = 0; Isize < net.inneurons.size(); Isize++)
{
//std::cout << "in fourth loop" << '\n';
sum += (net.inneurons[Isize].val * net.inneurons[Isize].weights[Hel]);
}
net.hiddenneurons[Hsize][Hel].preactval = net.hiddenneurons[Hsize][Hel].bias + sum;
net.hiddenneurons[Hsize][Hel].actval = tanh(sum);
sum = 0;
//std::cout << "first if done" << '\n';
}
else
{
//std::cout << "in else" << '\n';
for (size_t prs = 0; prs < net.hiddenneurons[prevlayer].size(); prs++)
{
//std::cout << "in fourth loop" << '\n';
sum += net.hiddenneurons[prevlayer][prs].actval * net.hiddenneurons[prevlayer][prs].weights[Hel];
}
//std::cout << "fourth loop done" << '\n';
net.hiddenneurons[Hsize][Hel].preactval = net.hiddenneurons[Hsize][Hel].bias + sum;
net.hiddenneurons[Hsize][Hel].actval = tanh(sum);
//std::cout << "else done" << '\n';
sum = 0;
}
}
}
//std::cout << "first loop done " << '\n';
int lasthid = net.hiddenneurons.size() - 1;
for (size_t Osize = 0; Osize < net.outputneurons.size(); Osize++)
{
for (size_t Hsize = 0; Hsize < net.hiddenneurons[lasthid].size(); Hsize++)
{
sum += (net.hiddenneurons[lasthid][Hsize].actval * net.hiddenneurons[lasthid][Hsize].weights[Osize]);
}
net.outputneurons[Osize].preactval = net.outputneurons[Osize].bias + sum;
}
}
void softmax(Net& net)
{
double sum{};
for (size_t Osize = 0; Osize < net.outputneurons.size(); Osize++)
{
sum += exp(net.outputneurons[Osize].preactval);
}
for (size_t Osize = 0; Osize < net.outputneurons.size(); Osize++)
{
net.outputneurons[Osize].actval = exp(net.outputneurons[Osize].preactval) / sum;
}
}
void lossfunc(Net& net, std::vector <double> target)
{
int pos{ -1 };
double val{};
for (size_t t = 0; t < target.size(); t++)
{
pos += 1;
if (target[t] > 0)
{
break;
}
}
for (size_t s = 0; net.outputneurons.size(); s++)
{
val = -log(net.outputneurons[pos].actval);
}
}
void backprop(Net& net, std::vector<double>& target)
{
for (size_t outI = 0; outI < net.outputneurons.size(); outI++)
{
double PD = target[outI] - net.outputneurons[outI].actval;
net.outputneurons[outI].preactvalpd = PD * -1;
}
size_t lasthid = net.hiddenneurons.size() - 1;
for (size_t LH = 0; LH < net.hiddenneurons[lasthid].size(); LH++)
{
for (size_t LHW = 0; LHW < net.hiddenneurons[lasthid][LH].weights.size(); LHW++)
{
double weight = net.hiddenneurons[lasthid][LH].weights[LHW];
double PD = net.outputneurons[LHW].preactvalpd * net.hiddenneurons[lasthid][LH].actval;
PD = PD * -1;
double delta = PD - (net.lambda * regularizer(weight));
weight = weight + (net.alpha * delta);
net.hiddenneurons[lasthid][LH].weights[LHW] = weight;
}
}
for (size_t OB = 0; OB < net.outputneurons.size(); OB++)
{
double bias = net.outputneurons[OB].bias;
double BPD = net.outputneurons[OB].preactvalpd;
BPD = BPD * -1;
double Delta = BPD;
bias = bias + (net.alpha * Delta);
}
for (size_t HPD = 0; HPD < net.hiddenneurons[lasthid].size(); HPD++)
{
double PD{};
for (size_t HW = 0; HW < net.outputneurons.size(); HW++)
{
PD += net.hiddenneurons[lasthid][HPD].weights[HW] * net.outputneurons[HW].preactvalpd;
}
net.hiddenneurons[lasthid][HPD].actvalPD = PD;
PD = 0;
}
for (size_t HPD = 0; HPD < net.hiddenneurons[lasthid].size(); HPD++)
{
net.hiddenneurons[lasthid][HPD].preactvalpd = net.hiddenneurons[lasthid][HPD].actvalPD * tanhderiv(net.hiddenneurons[lasthid][HPD].preactval);
}
for (size_t AllHid = net.hiddenneurons.size() - 2; AllHid > -1; AllHid--)
{
size_t uplayer = AllHid + 1;
for (size_t cl = 0; cl < net.hiddenneurons[AllHid].size(); cl++)
{
for (size_t clw = 0; clw < net.hiddenneurons[AllHid][cl].weights.size(); clw++)
{
double weight = net.hiddenneurons[AllHid][cl].weights[clw];
double PD = net.hiddenneurons[uplayer][clw].preactvalpd * net.hiddenneurons[AllHid][cl].actval;
PD = PD * -1;
double delta = PD - (net.lambda * regularizer(weight));
weight = weight + (net.alpha * delta);
net.hiddenneurons[AllHid][cl].weights[clw] = weight;
}
}
for (size_t up = 0; up < net.hiddenneurons[uplayer].size(); up++)
{
double bias = net.hiddenneurons[uplayer][up].bias;
double PD = net.hiddenneurons[uplayer][up].preactvalpd;
PD = PD * -1;
double delta = PD;
bias = bias + (net.alpha * delta);
}
for (size_t APD = 0; APD < net.hiddenneurons[AllHid].size(); APD++)
{
double PD{};
for (size_t APDW = 0; APDW < net.hiddenneurons[AllHid][APD].weights.size(); APDW++)
{
PD += net.hiddenneurons[AllHid][APD].weights[APDW] * net.hiddenneurons[uplayer][APDW].preactvalpd;
}
net.hiddenneurons[AllHid][APD].actvalPD = PD;
PD = 0;
}
for (size_t PPD = 0; PPD < net.hiddenneurons[AllHid].size(); PPD++)
{
double PD = net.hiddenneurons[AllHid][PPD].actvalPD * tanhderiv(net.hiddenneurons[AllHid][PPD].preactval);
net.hiddenneurons[AllHid][PPD].preactvalpd = PD;
}
}
for (size_t IN = 0; IN < net.inneurons.size(); IN++)
{
for (size_t INW = 0; INW < net.inneurons[IN].weights.size(); INW++)
{
double weight = net.inneurons[IN].weights[INW];
double PD = net.hiddenneurons[0][INW].preactvalpd * net.inneurons[IN].val;
PD = PD * -1;
double delta = PD - (net.lambda * regularizer(weight));
weight = weight + (net.alpha * delta);
net.inneurons[IN].weights[INW] = weight;
}
}
for (size_t hidB = 0; hidB < net.hiddenneurons[0].size(); hidB++)
{
double bias = net.hiddenneurons[0][hidB].bias;
double PD = net.hiddenneurons[0][hidB].preactvalpd;
PD = PD * -1;
double delta = PD;
bias = bias + (net.alpha * delta);
net.hiddenneurons[0][hidB].bias = bias;
}
}
int main()
{
std::vector <double> invals{ };
std::vector <double> target{ };
Net net;
InputN Ineuron;
HiddenN Hneuron;
OutputN Oneuron;
int IN = 4;
int HIDLAYERS = 4;
int HID = 8;
int OUT = 3;
for (int i = 0; i < IN; i++)
{
net.inneurons.push_back(Ineuron);
for (int m = 0; m < HID; m++)
{
net.inneurons.back().weights.push_back(randomt(0.0, 0.5));
}
}
//std::cout << "first loop done" << '\n';
for (int s = 0; s < HIDLAYERS; s++)
{
net.hiddenneurons.push_back(std::vector <HiddenN>());
if (s == HIDLAYERS - 1)
{
for (int i = 0; i < HID; i++)
{
net.hiddenneurons[s].push_back(Hneuron);
for (int m = 0; m < OUT; m++)
{
net.hiddenneurons[s].back().weights.push_back(randomt(0.0, 0.5));
}
net.hiddenneurons[s].back().bias = 1.0;
}
}
else
{
for (int i = 0; i < HID; i++)
{
net.hiddenneurons[s].push_back(Hneuron);
for (int m = 0; m < HID; m++)
{
net.hiddenneurons[s].back().weights.push_back(randomt(0.0, 0.5));
}
net.hiddenneurons[s].back().bias = 1.0;
}
}
}
//std::cout << "second loop done" << '\n';
for (int i = 0; i < OUT; i++)
{
net.outputneurons.push_back(Oneuron);
net.outputneurons.back().bias = randomt(0.0, 0.5);
}
//std::cout << "third loop done" << '\n';
int count{};
std::ifstream fileread("N.txt");
for (int epoch = 0; epoch < 500; epoch++)
{
count = 0;
if (epoch == 100 || epoch == 100 * 2 || epoch == 100 * 3 || epoch == 100 * 4 || epoch == 499)
{
printvals("no", net);
}
fileread.clear(); fileread.seekg(0, std::ios::beg);
while (fileread.is_open())
{
std::cout << '\n' << "epoch: " << epoch << '\n';
std::string fileline{};
fileread >> fileline;
if (fileline == "in:")
{
std::string input{};
double nums{};
std::getline(fileread, input);
std::stringstream ss(input);
while (ss >> nums)
{
invals.push_back(nums);
}
}
if (fileline == "out:")
{
std::string output{};
double num{};
std::getline(fileread, output);
std::stringstream ss(output);
while (ss >> num)
{
target.push_back(num);
}
}
count += 1;
if (count == 2)
{
for (size_t inv = 0; inv < invals.size(); inv++)
{
net.inneurons[inv].val = invals[inv];
}
//std::cout << "calling feedforward" << '\n';
feedforward(net);
//std::cout << "ff done" << '\n';
softmax(net);
printvals("output", net);
std::cout << "target: " << '\n';
for (auto element : target) std::cout << element << " / ";
std::cout << '\n';
backprop(net, target);
invals.clear();
target.clear();
count = 0;
}
if (fileread.eof()) break;
}
}
//std::cout << "fourth loop done" << '\n';
return 1;
}
Much aprecciated to anyone who actually made it through all that! :)

Debugging a bad_alloc error c++

When I run my code everything seems to be working fine but after a certain number of timesteps (usually ~100, but a different number each time) I get the error:
"terminate called after throwing an instance of 'std::bad_alloc' "
Not really sure how to go about debugging this as it doesn't happen at the same point each time the code runs. I will post my code but it's quite long and is admittedly a bit of a mess (this is my first real attempt at writing a program in c++), but I will try and explain the structure and where I would expect the most likely place for the origin of the error to be.
The basic structure is that I have an array of "birds" (a class I define) that choose how to update themselves at every time step by some quite complicated calculation. In doing so it regularly calls the function getVisualState to update a linked list that every bird stores as its "visual state". I believe this is the only time I allocate any memory dynamically during the simulation, so I guess there's a pretty good chance this is the source of the error. The function Bird::resetVisualState() should clear the allocated memory after it's been used (but it doesn't seem like I am running out of memory, at least monitoring it in the task manager).
If anyone can see anything they think may be the source of the problem that would be fantastic, or if not just any suggestions for how I should actually debug this!
#include <iostream>
#include <cmath>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <ctime>
#include <vector>
#include <algorithm>
#include <fstream>
#include "birdClasses.h"
using namespace std;
/*
nBirds, nSteps, nF, v, dt, birdRad defined in "birdClasses.h"
*/
//define other parameters.
const int nSensors = 20;
const int nMoves = 3; //no. possible moves at each step.
double dTheta = 15*M_PI/180.0; //angle that birds can change their orientation by in a timestep.
double moves[nMoves] = {-dTheta, 0, dTheta}; //possible moves.
double noise = 0.0;
double initBoxX = 20, initBoxY = 20; //size of initial box particles are placed in.
double sensorFrac[nSensors];
double sensorRef[nSensors];
double sensorRange = 2*M_PI/((double)nSensors);
int counter = 0;
int nps = numStates(nMoves,nF);
int *possibleStates = new int[nps];
//variables to record positions and orientations.
double xPositions[nSteps][nBirds], yPositions[nSteps][nBirds], orientations[nSteps][nBirds];
//array to keep track of which collisions are possible.
int couldCollide[nF][nBirds][nBirds];
//function prototypes
bool checkCollision(int i, int nFut, Bird *birds, double xi, double yi);
unsigned long int getVisualState(Bird *birdList, int nFut, int i, double cX, double cY, double cAng);
void updateTree(double exploreX, double exploreY, double exploreO, Bird *bird, int bn, int nFut);
int main()
{
sensorRef[0] = sensorRange;
for(int u=1; u<nSensors; u++) sensorRef[u] = sensorRef[u-1] + sensorRange;
//set up GSL random number generator.
const gsl_rng_type * Tr;
gsl_rng * RNG;
gsl_rng_env_setup();
Tr = gsl_rng_default;
RNG = gsl_rng_alloc (Tr);
gsl_rng_set(RNG,time(NULL));
//set up output
ofstream output("output.txt");
//initialize birds in a box randomly, all with the same orientation.
Bird birdList[nBirds];
for(int i=0; i<nBirds; i++) {
birdList[i].set_position(gsl_ran_flat(RNG,0,initBoxX),gsl_ran_flat(RNG,0,initBoxY));
}
//ACTUAL CODE
int uniqueVisStates[nMoves];
double cX, cY, fX, fY, exploreX, exploreY, exploreO;
//main time step loop
for(int ts=0; ts<nSteps; ts++) {
//save current positions
for(int i=0; i<nBirds; i++) {
xPositions[ts][i] = birdList[i].get_xPos();
yPositions[ts][i] = birdList[i].get_yPos();
orientations[ts][i] = birdList[i].get_orientation();
birdList[i].updateFuture();
}
//update list of possible collisions.
for(int nFut=0; nFut<nF; nFut++) {
for(int i=0; i<nBirds; i++) {
cX = birdList[i].get_xPos(); cY = birdList[i].get_yPos();
counter = 0;
for(int j=0; j<nBirds; j++) {
if(i==j) {
continue;
} else {
fX = birdList[j].get_futureX(nFut); fY = birdList[j].get_futureY(nFut);
if((cX-fX)*(cX-fX)+(cY-fY)*(cY-fY) < ((nFut+1)*v*dt+2*birdRad)*((nFut+1)*v*dt+2*birdRad)) {
couldCollide[nFut][i][counter]=j;
counter++;
}
}
}
if(counter < nBirds) couldCollide[nFut][i][counter]=-1;
}
}
//loop over birds to choose how they update their orientation.
for(int bn=0; bn<nBirds; bn++) {
//loop over possible moves bird can make NOW.
for(int l=0; l<nMoves; l++) {
uniqueVisStates[l]=0;
}
for(int mn=0; mn<nMoves; mn++) {
for(int l=0; l<nps; l++) {
possibleStates[l]=0;
}
counter = 0;
exploreO = birdList[bn].get_orientation() + moves[mn];
exploreX = birdList[bn].get_xPos() + cos(exploreO)*v*dt;
exploreY = birdList[bn].get_yPos() + sin(exploreO)*v*dt;
updateTree(exploreX,exploreY,exploreO,&birdList[0],bn,0);
vector<int> visStates (possibleStates,possibleStates+counter);
vector<int>::iterator it;
sort (visStates.begin(),visStates.end());
it = unique(visStates.begin(),visStates.end());
uniqueVisStates[mn] = distance(visStates.begin(),it);
}
int maxInd = 0, maxVal = uniqueVisStates[0];
for(int h=1; h<nMoves; h++) {
if(uniqueVisStates[h] > maxVal) {
maxInd = h; maxVal = uniqueVisStates[h];
} else if(uniqueVisStates[h]==maxVal) {
if(abs(moves[h])<abs(moves[maxInd])) {
maxInd = h;
}
}
}
birdList[bn].update_Orientation(moves[maxInd]);
birdList[bn].update_Pos(birdList[bn].get_xPos()+cos(birdList[bn].get_orientation())*v*dt,birdList[bn].get_yPos()+sin(birdList[bn].get_orientation())*v*dt);
}
for(int bn=0; bn<nBirds; bn++) birdList[bn].finishUpdate();
cout << ts << "\n";
}
//OUTPUT DATA INTO A TEXT FILE.
for(int ts=0; ts<(nSteps-1); ts++) {
for(int bn=0; bn<nBirds; bn++) {
output << xPositions[ts][bn] << " " << yPositions[ts][bn] << " " << orientations[ts][bn] << "\n";
}
}
delete[] possibleStates;
return 0;
}
bool checkCollision(int i, int nFut, Bird *birds, double xi, double yi) {
int cond = 1; int index, counti=0;
while(cond) {
index = couldCollide[nFut][i][counti];
if(index==-1) break;
double xj = birds[index].get_futureX(nFut);
double yj = birds[index].get_futureY(nFut);
if((xi-xj)*(xi-xj)+(yi-yj)*(yi-yj) < 4*birdRad*birdRad) {
return 1;
}
counti++;
if(counti==nBirds) break;
}
return 0;
}
unsigned long int getVisualState(Bird *birdList, int nFut, int i, double cX, double cY, double cAng) {
//finds the visual state of bird i based on its current "exploring position" and the predicted positions of other birds at timestep nFut.
//visual state is defined by discretizing the bird's field of view into nSensors (relative to current orientation) and creating a vector of
//0s and 1s depending on whether each sensor is < half covered or not. This is then converted to an integer (as we are actually interested only
//in the number of unique visual states.
double relX, relY, relDist, dAng, s, dTheta, ang1, ang2;
//clear current visual state.
birdList[i].resetVisualState();
for(int j=0; j<nBirds; j++) {
if(i==j) continue;
relX = birdList[j].get_futureX(nFut)-cX;
relY = birdList[j].get_futureY(nFut)-cY;
relDist = sqrt(relX*relX+relY*relY);
dAng = acos((cos(cAng)*relX+sin(cAng)*relY)/relDist);
dTheta = atan(birdRad/relDist);
s = cos(cAng)*relY - sin(cAng)*relX;
if( s<0 ) dAng = 2*M_PI-dAng;
ang1 = dAng - dTheta; ang2 = dAng + dTheta;
if( ang1 < 0 ) {
birdList[i].addInterval(0,ang2);
birdList[i].addInterval(2*M_PI+ang1,2*M_PI);
} else if( ang2 > 2*M_PI ) {
birdList[i].addInterval(0,fmod(ang2,2*M_PI));
birdList[i].addInterval(ang1,2*M_PI);
} else {
birdList[i].addInterval(ang1,ang2);
}
}
Node *sI = birdList[i].get_visualState();
birdList[i].cleanUp(sI);
int ind1, ind2;
for(int k=0; k<nSensors; k++) sensorFrac[k]=0.0; //initialize.
while(sI->next->next != 0) {
ang1 = sI->value; ang2 = sI->next->value;
ind1 = floor(ang1/sensorRange); ind2 = floor(ang2/sensorRange);
if(ind2==nSensors) ind2--; //this happens if ang2 = 2pi (which can happen a lot).
if(ind1==ind2) {
sensorFrac[ind1] += (ang2-ang1)/sensorRange;
} else if(ind2-ind1==1) {
sensorFrac[ind1] += (sensorRef[ind1]-ang1)/sensorRange;
sensorFrac[ind2] += (ang2-sensorRef[ind1])/sensorRange;
} else {
sensorFrac[ind1] += (sensorRef[ind1]-ang1)/sensorRange;
sensorFrac[ind2] += (ang2-sensorRef[ind2-1])/sensorRange;
for(int y=ind1+1;y<ind2;y++) sensorFrac[y] = 1.0;
}
sI=sI->next->next;
}
//do final interval separately.
ang1 = sI->value; ang2 = sI->next->value;
ind1 = floor(ang1/sensorRange); ind2 = floor(ang2/sensorRange);
if(ind2==nSensors) ind2--; //this happens if ang2 = 2pi (which can happen a lot).
if(ind1==ind2) {
sensorFrac[ind1] += (ang2-ang1)/sensorRange;
} else if(ind2-ind1==1) {
sensorFrac[ind1] += (sensorRef[ind1]-ang1)/sensorRange;
sensorFrac[ind2] += (ang2-sensorRef[ind1])/sensorRange;
} else {
sensorFrac[ind1] += (sensorRef[ind1]-ang1)/sensorRange;
sensorFrac[ind2] += (ang2-sensorRef[ind2-1])/sensorRange;
for(int y=ind1+1;y<ind2;y++) sensorFrac[y] = 1.0;
}
int output = 0, multiplier = 1;
for(int y=0; y<nSensors; y++) {
if(sensorFrac[y]>0.5) output += multiplier;
multiplier *= 2;
}
return output;
}
void updateTree(double exploreX, double exploreY, double exploreO, Bird *bird, int bn, int nFut) {
double o,x,y;
if(checkCollision(bn,nFut,bird,exploreX,exploreY)) return;
int vs = getVisualState(bird,nFut,bn,exploreX,exploreY,exploreO);
possibleStates[counter] = vs;
counter++;
if(nFut < (nF-1)) {
for(int m=0; m<nMoves; m++) {
o = exploreO + moves[m];
x = exploreX + cos(o)*v*dt;
y = exploreY + sin(o)*v*dt;
updateTree(x,y,o,bird,bn,nFut+1);
}
} else {
return;
}
}
"birdClasses.h":
#ifndef BIRDCLASSES_H_INCLUDED
#define BIRDCLASSES_H_INCLUDED
#include <iostream>
#include <cmath>
using namespace std;
//DEFINE SOME GLOBAL PARAMETERS OF THE SIMULATION
const int nBirds = 50;
const int nF = 6; //number of future timesteps to consider.
const int nSteps = 200;
const double v = 20, dt = 0.1, birdRad = 0.2;
int numStates(int numMoves, int nFut) {
int num = 1; int multiplier = numMoves;
for(int i=1; i<nFut; i++) {
num += multiplier;
multiplier *= numMoves;
}
return num;
}
//Node class is just for a linked list (used in constructing the visual states),
class Node {
public:
int identifier; // 0 is left side of interval, 1 is right side
double value; //angular value.
Node *next; //pointer to the next interval.
void display(Node *start);
};
//printout linked list if necessary (mainly for debugging purposes).
void Node::display(Node *start) {
if(start != 0) {
double inter = start->value;
cout << inter << " ";
display(start->next);
}
}
//bird class.
class Bird {
double currX, currY;
double updatedX, updatedY;
double currOrientation;
double futureX[nF], futureY[nF];
Node *visualState;
public:
Bird() {
currOrientation=0.0; currX = 0.0; currY = 0.0;
visualState = new Node;
visualState->value = 0.0;
visualState->next = new Node;
visualState->next->value = 0.0;
visualState->next->next = 0;
}
Bird(double x, double y, double o) {
currX = x; currY = y; currOrientation = o;
visualState = new Node;
visualState->value = 0.0;
visualState->next = new Node;
visualState->next->value = 0.0;
visualState->next->next = 0;
}
void set_position(double x, double y) {
currX = x; currY = y;
}
double get_xPos() {
return currX;
}
double get_yPos() {
return currY;
}
double get_orientation() {
return currOrientation;
}
double get_futureX(int ts) {
return futureX[ts];
}
double get_futureY(int ts) {
return futureY[ts];
}
//return pointer to first node.
Node* get_visualState() {
return visualState;
}
void updateFuture() {
//use current orientation and position to update future positions.
for(int i=0; i<nF; i++) {
futureX[i] = currX + v*(i+1)*cos(currOrientation)*dt;
futureY[i] = currY + v*(i+1)*sin(currOrientation)*dt;
}
}
void update_Pos(double x, double y) {
updatedX = x;
updatedY = y;
}
//run this after all birds have updated positions:
void finishUpdate() {
currX = updatedX;
currY = updatedY;
}
void update_Orientation(double o) {
currOrientation += o;
}
//add the interval defined by [l r] to the visual state.
void addInterval(double l, double r) {
int placed = 0; double cL = 0.0; double cR = 0.0;
if(visualState->value==0.0 && visualState->next->value==0.0) { //then this is first interval to place.
visualState->value = l;
visualState->next->value = r;
placed = 1;
return;
}
Node *curr_L = visualState;
Node *prev_L = visualState;
while(placed==0) {
cL = curr_L->value;
cR = curr_L->next->value;
if(l<cL && r<cL) { //add new interval before this one.
Node *newRoot = new Node;
newRoot->value = l;
newRoot->identifier = 0;
newRoot->next = new Node;
newRoot->next->value = r;
newRoot->next->next = curr_L;
if(curr_L == visualState) {
visualState = newRoot;
} else {
prev_L->next->next = newRoot;
}
placed = 1;
} else if(l <= cL && r >= cR) {
curr_L->value = l;
curr_L->next->value = r;
placed = 1;
} else if(l <= cL && r <= cR) {
curr_L->value = l;
placed = 1;
} else if(l >= cL && r <= cR) {
placed = 1; //dont need to do anything.
} else if(l >= cL && l<=cR && r >= cR) {
curr_L->next->value = r;
placed = 1;
}
if(l > cR && r > cR) {
if(curr_L->next->next != 0) {
prev_L = curr_L;
curr_L = curr_L->next->next;
} else {
Node *newEndL = new Node;
newEndL->value = l;
newEndL->identifier = 0;
newEndL->next = new Node;
newEndL->next->value = r;
newEndL->next->identifier = 1;
newEndL->next->next = 0;
curr_L->next->next = newEndL;
placed = 1;
}
}
}
}
//remove any overlaps.
void cleanUp(Node *start) {
Node *NP, *NNP; NP = start->next->next;
if(NP==0) return;
NNP = start->next->next->next->next;
double cL = start->value, cR = start->next->value, nL = start->next->next->value, nR = start->next->next->next->value;
if(nL < cR) {
if(nR > cR) {
start->next->value = nR;
}
start->next->next = NNP;
}
if(NNP!=0) cleanUp(NP);
}
//reset the visual state.
void resetVisualState() {
Node *cNode = visualState;
Node *nNode = visualState->next;
while(nNode != 0) {
delete cNode;
cNode = nNode;
nNode = nNode->next;
}
delete cNode;
delete nNode;
visualState = new Node;
visualState->identifier = 0;
visualState->value = 0.0;
visualState->next = new Node;
visualState->next->identifier = 1;
visualState->next->value = 0.0;
visualState->next->next = 0;
return;
}
};
#endif // BIRDCLASSES_H_INCLUDED
or if not just any suggestions for how I should actually debug this!
You can try to set catchpoint in gdb to catch std::bad_alloc exception:
(gdb) catch throw bad_alloc
(See Setting Catchpoints)
If you are able to reproduce this bad_alloc in gdb you can then look at bt to see possible reason of this exception.
I think this is a logic bug and not necessarily memory related.
In void addInterval(double l, double r) you declare
Node *curr_L = visualState;
Node *prev_L = visualState;
These pointers will now point to whatever the member visualState is pointing to.
later on you are changing visualState to point to a newly created Node
Node *newRoot = new Node;
// ....
if(curr_L == visualState) {
visualState = newRoot;
but your pointers curr_L and prev_L will still point to whatever visualState was pointing to before. The only time you change those pointers is at
if(curr_L->next->next != 0) {
prev_L = curr_L;
curr_L = curr_L->next->next;
which is the same as
if(WHATEVER_VISUAL_STATE_USED_TO_POINT_TO->next->next != 0) {
prev_L = curr_L;
curr_L = curr_L->next->next;
Is this your intention? You can follow the assignment of curr_L by looking for *curr_L = * in your editor.
I would suggest testing your code on a small data sample and make sure your code follows your intentions. Use a debugger or trace outputs. Use
valgrind if you have access to it, I think you will appreciate valgrind.

Showing the result of Levenshtein Distance

Given two strings (s1, s2), Levenshtein Distance is the minimum number of operations needed to change s1 to s2 or vice versa.
I want to show the result of changing s1 to s2. For example, changing Sunday to Saturday needs 3 operations. I need to show S++u+day. "+" is for each operations needed.
Here is a javascript snippet that returns what you want. If you are familiar with the dynamic programming algorithm you should be able follow this code. All the string operations/manipulation of the return string r and handling of min/curMin are what's changed from the original version.
function edits(t, s) {
var r = "";
if (s === t) {
return s;
}
var n = s.length, m = t.length;
if (n === 0 || m === 0) {
return "+".repeat(n + m);
}
var x, y, a, b, c, min = 0;
var p = new Array(n);
for (y = 0; y < n;)
p[y] = ++y;
for (x = 0; x < m; x++) {
var e = t.charCodeAt(x);
c = x;
b = x + 1;
var currMin = min;
min = n + 1;
for (y = 0; y < n; y++) {
a = p[y];
if (a < c || b < c) {
b = (a > b ? b + 1 : a + 1);
}
else {
if (e !== s.charCodeAt(y)) {
b = c + 1;
}
else {
b = c;
}
}
if (b < min) {
min = b;
}
p[y] = b;
c = a;
}
if (min > currMin) {
r += "+";
}
else {
r += t[x];
}
}
return r;
}
EDIT: The implementation above is an version optimized for speed and space so might be harder to read. The implemetation below is a modified version of the JS version from Wikipedia and should be easier to follow.
function getEditDistance(a, b) {
if(a.length === 0) return "+".repeat(b.length);
if(b.length === 0) return "+".repeat(a.length);
var matrix = [];
// increment along the first column of each row
var i;
for(i = 0; i <= b.length; i++){
matrix[i] = [i];
}
// increment each column in the first row
var j;
for(j = 0; j <= a.length; j++){
matrix[0][j] = j;
}
var r = "", min = 0;;
// Fill in the rest of the matrix
for(i = 1; i <= b.length; i++){
var currMin = min;
min = a.length + 1;
for(j = 1; j <= a.length; j++){
if(b.charAt(i-1) == a.charAt(j-1)){
matrix[i][j] = matrix[i-1][j-1];
} else {
matrix[i][j] = Math.min(matrix[i-1][j-1] + 1, // substitution
Math.min(matrix[i][j-1] + 1, // insertion
matrix[i-1][j] + 1)); // deletion
}
if (matrix[i][j] < min) {
min = matrix[i][j];
}
}
if (min > currMin) {
r += "+";
}
else {
r += b[i-1];
}
}
return r;
}
EDIT2: Added explanation of the algorithm and example output
Below is the levenshtein matrix from the input strings "kitten" and "sitting". What I changed in the original algorithm is that I added a check if the current rows minimum value is larger then the previous rows minimum, and if it is, there is an edit in the current row and thus adding a "+". If not and the " edit cost" for the current row is the same as the previous. Then there is no edit necessary and we just add the current character to the result string. You can follow the algorithm row by row in the image (starting at row 1, and column 1)
Examples:
> getEditDistance("kitten", "sitting");
'+itt+n+'
> getEditDistance("Sunday", "Saturday");
'S++u+day'

Passing Extra Data Into a Function

I am using the dlib optimization library for C++ specifically the following function:
template <
typename search_strategy_type,
typename stop_strategy_type,
typename funct,
typename funct_der,
typename T
>
double find_max (
search_strategy_type search_strategy,
stop_strategy_type stop_strategy,
const funct& f,
const funct_der& der,
T& x,
double max_f
);
Functions f and der are designed to take a vector of the data parameters being modified to obtain the maximum value of my function. However my function being maximized has four parameters (one is my dataset and the other is fixed by me). However I can't pass these as inputs to my f and der functions because of the format they are supposed to have. How do I get this data into my functions? I am currently trying the below (I hard set variable c but for vector xgrequ I read data from a file each time I process the function.
//Function to be minimized
double mleGPD(const column_vector& p)
{
std::ifstream infile("Xm-EVT.csv");
long double swapRet;
std::string closeStr;
std::vector<double> histRet;
//Read in historical swap data file
if (infile.is_open())
{
while (!infile.eof())
{
infile >> swapRet;
histRet.push_back(swapRet);
}
}
sort(histRet.begin(), histRet.end());
std::vector<double> negRet;
//separate out losses
for (unsigned c = 0; c < histRet.size(); c++)
{
if (histRet[c] < 0)
{
negRet.push_back(histRet[c]);
}
}
std::vector<double> absValRet;
//make all losses positive to fit with EVT convention
for (unsigned s = 0; s < negRet.size(); s++)
{
absValRet.push_back(abs(negRet[s]));
}
std::vector<double> xminusu, xmu, xgrequ;
int count = absValRet.size();
double uPercent = .9;
int uValIndex = ceil((1 - uPercent)*count);
int countAbove = count - uValIndex;
double c = (double)absValRet[uValIndex - 1];
//looking at returns above u
for (unsigned o = 0; o < uValIndex; ++o)
{
xmu.push_back(absValRet[o] - c);
if (xmu[o] >= 0)
{
xgrequ.push_back(absValRet[o]);
xminusu.push_back(xmu[o]);
}
}
double nu = xgrequ.size();
double sum = 0.0;
double a = p(0);
double b = p(1);
for (unsigned h = 0; h < nu; ++h)
{
sum += log((1 / b)*pow(1 - a*((xgrequ[h] - c) / b), -1 + (1 / a)));
}
return sum;
}
//Derivative of function to be minimized
const column_vector mleGPDDer(const column_vector& p)
{
std::ifstream infile("Xm-EVT.csv");
long double swapRet;
std::string closeStr;
std::vector<double> histRet;
//Read in historical swap data file
if (infile.is_open())
{
while (!infile.eof())
{
infile >> swapRet;
histRet.push_back(swapRet);
}
}
sort(histRet.begin(), histRet.end());
std::vector<double> negRet;
//separate out losses
for (unsigned c = 0; c < histRet.size(); c++)
{
if (histRet[c] < 0)
{
negRet.push_back(histRet[c]);
}
}
std::vector<double> absValRet;
//make all losses positive to fit with EVT convention
for (unsigned s = 0; s < negRet.size(); s++)
{
absValRet.push_back(abs(negRet[s]));
}
std::vector<double> xminusu, xmu, xgrequ;
int count = absValRet.size();
double uPercent = .9;
int uValIndex = ceil((1 - uPercent)*count);
int countAbove = count - uValIndex;
double c = (double)absValRet[uValIndex - 1];
//looking at returns above u
for (unsigned o = 0; o < uValIndex; ++o)
{
xmu.push_back(absValRet[o] - c);
if (xmu[o] >= 0)
{
xgrequ.push_back(absValRet[o]);
xminusu.push_back(xmu[o]);
}
}
column_vector res(2);
const double a = p(0);
const double b = p(1);
double nu = xgrequ.size();
double sum1 = 0.0;
double sum2 = 0.0;
for (unsigned h = 0; h < nu; ++h)
{
sum1 += ((xgrequ[h]-c)/b)/(1-a*((xgrequ[h]-c)/b));
sum2 += log(1 - a*((xgrequ[h] - c) / b));
}
res(0) = sum1;//df/da
res(1) = sum2;//df/db
return res;
}
Here is what my actual function call looks like:
//Dlib max finding
column_vector start(2);
start = .1, .1; //starting point for a and b
find_max(bfgs_search_strategy(), objective_delta_stop_strategy(1e-6), mleGPD, mleGPDDer, start,100);
std::cout << "solution" << start << std::endl;
This kind of API is very common. It's almost always possible to for f and der to any callable, not just static functions. that is, you can pass a custom class object with operator () to it.
For example
struct MyF {
//int m_state;
// or other state variables, such as
std::vector<double> m_histRet;
// (default constructors will do)
double operator()(const column_vector& p) const {
return some_function_of(p, m_state);
}
};
int main(){
. . .
MyF myf{42};
// or
MyF myf{someVectorContainingHistRet};
// then use myf as you would have used mleGPD
}
You need to initiate MyF and MyDer with the same state (std::vector<double> histRet I presume.) Either as copies or (const) references to the same state.
Edit: More full example:
struct MLGDPG_State {
std::vector<double> xgrequ;
// . . . and more you need in f or fder
}
MLGDPG_State makeMLGDPG_State(const std::string& filename){
std::ifstream infile(filename);
std::ifstream infile("Xm-EVT.csv");
long double swapRet;
std::string closeStr;
std::vector<double> histRet;
//Read in historical swap data file
if (infile.is_open())
{
while (!infile.eof())
{
infile >> swapRet;
histRet.push_back(swapRet);
}
}
sort(histRet.begin(), histRet.end());
std::vector<double> negRet;
//separate out losses
for (unsigned c = 0; c < histRet.size(); c++)
{
if (histRet[c] < 0)
{
negRet.push_back(histRet[c]);
}
}
std::vector<double> absValRet;
//make all losses positive to fit with EVT convention
for (unsigned s = 0; s < negRet.size(); s++)
{
absValRet.push_back(abs(negRet[s]));
}
std::vector<double> xminusu, xmu, xgrequ;
int count = absValRet.size();
double uPercent = .9;
int uValIndex = ceil((1 - uPercent)*count);
int countAbove = count - uValIndex;
double c = (double)absValRet[uValIndex - 1];
//looking at returns above u
for (unsigned o = 0; o < uValIndex; ++o)
{
xmu.push_back(absValRet[o] - c);
if (xmu[o] >= 0)
{
xgrequ.push_back(absValRet[o]);
xminusu.push_back(xmu[o]);
}
}
return {std::move(xgrequ)};
// Or just 'return MleGPD(xgrequ)' if you are scared of {} and move
}
//Functor Class, for ion to be minimized
struct MleGPD{
MLGDPG_State state;
double operator()(const column_vector& p) const {
auto mu = state.xgrequ.size();
double sum = 0.0;
double a = p(0);
double b = p(1);
for (unsigned h = 0; h < nu; ++h)
{
sum += log((1 / b)*pow(1 - a*((xgrequ[h] - c) / b), -1 + (1 / a)));
}
return sum;
};
Use the same pattern for a struct MleGPD_Derivative.
Usage:
const auto state = makeMLGDPG_State("Xm-EVT.csv");
const auto f = MleGPD{state};
const auto der = MleGPD_Derivative{state};
start = .1, .1; //starting point for a and b
find_max(bfgs_search_strategy(), objective_delta_stop_strategy(1e-6), f, der, start,100);
std::cout << "solution" << start << std::endl;
Note, that for simple structs like these, it's often fine to use the default constructors, copy constructor etc. Also note http://en.cppreference.com/w/cpp/language/aggregate_initialization