Creating a Borland C++ Builder-compatible dll in Visual C++ - c++
friends and comrades!
I have program that was created in Borland C++ Builder. This program uses several dll libraries that was created in Borland C++ Builder too. But now we use Microsoft Visual Studio.
I have task to rewrite existing dll. I need to add several new strings with math-formulas. Chief gave me cpp-file, that was written in Borland C++ Builder. I have included needed formulas, but I'm not sure that i made correct preprocessor directives. Must I build only cpp-file? Are there needed any other files?
Original cpp-file
#include <windows.h>
#include <stdio.h>
#include <math.h>
#pragma argsused
void FAR PASCAL _export userdll(float *x,char *c1,char *c2,char *c3,char *c4,char *c5)
{
x[501]=x[115]*9.81; // -- Mdv v Nm
x[502]=x[501]*x[122]/7023.5; //-- Ndv v lc
x[503]=x[502]*0.7355; // -- Ndv v kW
if(x[502]>1.0&&x[123]>4.0)x[510]=x[123]/x[502]; // -- Ce v kg/lc*h
else x[510]=0;
if(x[502]>1.0&&x[123]>4.0)x[511]=x[123]/x[503]; // -- Ce v kg/kW*h
else x[511]=0;
x[520]=x[34]*7.50064; // -- Pbar v mmHg
return;
}
Rewritten cpp-file
#define STRUCT
#include <windows.h>
#include <stdio.h>
#include <math.h>
#pragma argsused
__declspec(dllexport)
void FAR PASCAL userdll(float *x, char *c1, char *c2, char *c3, char *c4, char *c5)
{
x[501] = x[115] * 9.81; // -- Mdv v Nm
x[502] = x[501] * x[122] / 7023.5; //-- Ndv v lc
x[503] = x[502] * 0.7355; // -- Ndv v kW
if (x[502] > 1.0 && x[123] > 4.0)
x[510] = x[123] / x[502]; // -- Ce v kg/lc*h
else x[510] = 0;
if (x[502] > 1.0 && x[123] > 4.0)
x[511] = x[123] / x[503]; // -- Ce v kg/kW*h
else x[511] = 0;
x[520] = x[34] * 7.50064; // -- Pbar v mmHg
int chooseStr, chooseCol;
float temp; // temp - ТЕМПЕРАТУРА по датчику
float DAVLENIE; // ДАВЛЕНИЕ по таблице из ГОСТ Р 52517-2005
float massiv[61][9] =
{
// --------------------------------------------------------------------------------------
// | Влажность, % | |
// -------------------------------------------------------| Температура, град. Цельсия |
// | 100 | 90 | 80 | 70 | 60 | 50 | 40 | 30 | 20 | |
// --------------------------------------------------------------------------------------
{ 0.30, 0.27, 0.24, 0.21, 0.18, 0.15, 0.12, 0.09, 0.06 }, // -10
{ 0.33, 0.29, 0.26, 0.23, 0.20, 0.16, 0.13, 0.10, 0.07 }, // -9
{ 0.35, 0.32, 0.28, 0.25, 0.21, 0.18, 0.14, 0.11, 0.07 }, // -8
{ 0.38, 0.34, 0.30, 0.27, 0.23, 0.19, 0.15, 0.11, 0.08 }, // -7
{ 0.41, 0.36, 0.32, 0.28, 0.24, 0.20, 0.16, 0.12, 0.08 }, // -6
{ 0.43, 0.39, 0.35, 0.30, 0.26, 0.22, 0.17, 0.13, 0.09 }, // -5
{ 0.46, 0.41, 0.37, 0.32, 0.28, 0.23, 0.18, 0.14, 0.09 }, // -4
{ 0.49, 0.44, 0.39, 0.34, 0.30, 0.25, 0.20, 0.15, 0.10 }, // -3
{ 0.53, 0.47, 0.42, 0.37, 0.32, 0.26, 0.21, 0.16, 0.10 }, // -2
{ 0.56, 0.50, 0.45, 0.39, 0.34, 0.28, 0.22, 0.17, 0.11 }, // -1
{ 0.60, 0.54, 0.48, 0.42, 0.36, 0.30, 0.24, 0.18, 0.12 }, // 0
{ 0.64, 0.58, 0.51, 0.45, 0.39, 0.32, 0.26, 0.19, 0.13 }, // 1
{ 0.69, 0.62, 0.55, 0.48, 0.41, 0.34, 0.28, 0.21, 0.14 }, // 2
{ 0.74, 0.66, 0.59, 0.52, 0.44, 0.37, 0.30, 0.22, 0.15 }, // 3
{ 0.79, 0.71, 0.63, 0.55, 0.47, 0.40, 0.32, 0.24, 0.16 }, // 4
{ 0.85, 0.76, 0.68, 0.59, 0.51, 0.42, 0.34, 0.25, 0.17 }, // 5
{ 0.91, 0.82, 0.73, 0.64, 0.55, 0.46, 0.36, 0.27, 0.18 }, // 6
{ 0.98, 0.88, 0.78, 0.68, 0.59, 0.49, 0.39, 0.29, 0.20 }, // 7
{ 1.05, 0.94, 0.84, 0.73, 0.63, 0.52, 0.42, 0.31, 0.21 }, // 8
{ 1.12, 1.01, 0.90, 0.78, 0.67, 0.56, 0.45, 0.34, 0.22 }, // 9
{ 1.20, 1.08, 0.96, 0.84, 0.72, 0.60, 0.48, 0.36, 0.24 }, // 10
{ 1.28, 1.16, 1.03, 0.90, 0.77, 0.64, 0.51, 0.39, 0.26 }, // 11
{ 1.37, 1.24, 1.10, 0.96, 0.82, 0.69, 0.55, 0.41, 0.27 }, // 12
{ 1.47, 1.32, 1.17, 1.03, 0.88, 0.73, 0.59, 0.44, 0.29 }, // 13
{ 1.57, 1.41, 1.25, 1.10, 0.94, 0.78, 0.63, 0.47, 0.31 }, // 14
{ 1.67, 1.51, 1.34, 1.17, 1.00, 0.84, 0.67, 0.50, 0.33 }, // 15
{ 1.79, 1.61, 1.43, 1.25, 1.07, 0.89, 0.71, 0.54, 0.36 }, // 16
{ 1.90, 1.71, 1.52, 1.33, 1.14, 0.95, 0.76, 0.57, 0.38 }, // 17
{ 2.03, 1.83, 1.62, 1.42, 1.22, 1.01, 0.81, 0.61, 0.41 }, // 18
{ 2.16, 1.94, 1.73, 1.51, 1.30, 1.08, 0.86, 0.65, 0.43 }, // 19
{ 2.30, 2.07, 1.84, 1.61, 1.38, 1.15, 0.92, 0.69, 0.46 }, // 20
{ 2.45, 2.20, 1.96, 1.71, 1.47, 1.22, 0.98, 0.73, 0.49 }, // 21
{ 2.60, 2.34, 2.08, 1.82, 1.56, 1.30, 1.04, 0.78, 0.52 }, // 22
{ 2.77, 2.49, 2.21, 1.94, 1.66, 1.38, 1.11, 0.83, 0.55 }, // 23
{ 2.94, 2.65, 2.35, 2.06, 1.76, 1.47, 1.18, 0.88, 0.59 }, // 24
{ 3.12, 2.81, 2.50, 2.19, 1.87, 1.56, 1.25, 0.94, 0.62 }, // 25
{ 3.32, 2.98, 2.65, 2.32, 1.99, 1.66, 1.33, 0.99, 0.66 }, // 26
{ 3.52, 3.17, 2.82, 2.46, 2.11, 1.76, 1.41, 1.06, 0.70 }, // 27
{ 3.73, 3.36, 2.99, 2.61, 2.24, 1.87, 1.49, 1.12, 0.75 }, // 28
{ 3.96, 3.56, 3.17, 2.77, 2.38, 1.98, 1.58, 1.19, 0.79 }, // 29
{ 4.20, 3.78, 3.36, 2.94, 2.52, 2.10, 1.68, 1.26, 0.84 }, // 30
{ 4.45, 4.01, 3.56, 3.12, 2.67, 2.23, 1.78, 1.34, 0.89 }, // 31
{ 4.72, 4.25, 3.78, 3.30, 2.83, 2.36, 1.89, 1.42, 0.94 }, // 32
{ 5.00, 4.50, 4.00, 3.50, 3.00, 2.50, 2.00, 1.50, 1.00 }, // 33
{ 5.29, 4.76, 4.24, 3.71, 3.18, 2.65, 2.12, 1.59, 1.06 }, // 34
{ 5.60, 5.04, 4.48, 3.92, 3.36, 2.80, 2.24, 1.68, 1.12 }, // 35
{ 5.93, 5.34, 4.74, 4.15, 3.56, 2.97, 2.37, 1.78, 1.19 }, // 36
{ 6.27, 5.64, 5.02, 4.39, 3.76, 3.14, 2.51, 1.88, 1.25 }, // 37
{ 6.63, 5.97, 5.30, 4.64, 3.98, 3.32, 2.65, 1.99, 1.33 }, // 38
{ 7.01, 6.31, 5.61, 4.90, 4.20, 3.50, 2.80, 2.10, 1.40 }, // 39
{ 7.40, 6.66, 5.92, 5.18, 4.44, 3.70, 2.96, 2.22, 1.48 }, // 40
{ 7.81, 7.03, 6.25, 5.47, 4.69, 3.91, 3.12, 2.34, 1.56 }, // 41
{ 8.24, 7.42, 6.59, 5.77, 4.94, 4.12, 3.30, 2.47, 1.65 }, // 42
{ 8.69, 7.82, 6.95, 6.08, 5.21, 4.34, 3.47, 2.61, 1.74 }, // 43
{ 9.15, 8.24, 7.32, 6.41, 5.49, 4.58, 3.66, 2.75, 1.83 }, // 44
{ 9.63, 8.67, 7.71, 6.74, 5.78, 4.82, 3.85, 2.89, 1.93 }, // 45
{ 10.13, 9.12, 8.11, 7.09, 6.08, 5.07, 4.05, 3.04, 2.03 }, // 46
{ 10.65, 9.58, 8.52, 7.45, 6.39, 5.33, 4.26, 3.20, 2.13 }, // 47
{ 11.18, 10.07, 8.95, 7.83, 6.71, 5.59, 4.47, 3.36, 2.24 }, // 48
{ 11.73, 10.56, 9.39, 8.21, 7.04, 5.87, 4.69, 3.52, 2.35 }, // 49
{ 12.30, 11.07, 9.84, 8.61, 7.38, 6.15, 4.92, 3.69, 2.46 } // 50
};
temp = x[33];
chooseStr = roundf(temp) + 10; // Округляем для получения номера строки в массиве massiv
if (x[32] <= 100 && x[32] > 95)
chooseCol = 0;
if (x[32] <= 95 && x[32] > 85)
chooseCol = 1;
if (x[32] <= 85 && x[32] > 75)
chooseCol = 2;
if (x[32] <= 75 && x[32] > 65)
chooseCol = 3;
if (x[32] <= 65 && x[32] > 55)
chooseCol = 4;
if (x[32] <= 55 && x[32] > 45)
chooseCol = 5;
if (x[32] <= 45 && x[32] > 35)
chooseCol = 6;
if (x[32] <= 35 && x[32] > 25)
chooseCol = 7;
if (x[32] <= 25)
chooseCol = 8;
DAVLENIE = massiv[chooseStr][chooseCol]; // Давление по таблице из ГОСТ Р 52517-2005
x[530] = ((x[34] - DAVLENIE) / 100.26)*sqrt(288 / (273 + x[33])); // Коэффициент индикаторной мощности К (формула 3)
x[531] = 1.15*x[530] - 0.15; // Коэффициент приведения Аи (формула 2)
x[535] = x[502] / x[531]; // Приведенная мощность, л.с. (формула 1)
x[536] = x[503] / x[531]; // Приведенная мощность, кВт (формула 1)
x[533] = x[501] / x[531]; // Приведенный крутящий момент, Н*м (формула 4)
x[537] = x[123] / x[530]; // Приведенный расход топлива, кг/ч (формула 5)
x[539] = (x[123] * 1000 * x[531]) / (x[503] * x[530]); // Приведенный удельный расход топлива Ceи, г/(кВТ*ч) (формула 6)
return;
}
If it's possible, check this code. I obtained several errors after copying dll-file to program folder and executing program. Program (written in Borland C++) can't use my dll file (created in Visual Studio).
One of the error is something like this
"...Missing MSVCR120D.dll file..."
Thank you in advance, friends!
Related
Backpropagation giving strange values in C++ neural network
I am trying to solve the iris data set with a neural network I wrote in C++ from scratch, which has 150 rows divided up into 3 different flowers with 4 columns and then a fifth for the flower type which I converted to a 0, 1 or 2. Problem: Whenever I run the network it will go through a test set of 90 rows, split into 3 different flowers (30, 30, 30). Every time I run an epoch it will show the output values being all very high like (0.99, 0.99, 0.98). It will do that for a few epochs and then eventually get lower to more sensible values. But when it will get to the later epochs, when I'm doing say 50 epochs, the values for the correct flower will get closer and closer to 1.00, for each flower, then do the same for the next flower and the flower after that, then it will start that process over. Instead of starting close to 1.0 indicating that it had learned and the weights were properly adjusted. Console output for running epoch (which runs forward_prop(), back_prop() and then update_weights()), after each epoch it prints out the output values for the network. Printing at the end of the epoch means that the actual values are always {0, 0, 1}. When I ran the network I ran it 1000 times, the output values never changed for every epoch after 15. Why is it doing this? File parsed, weights and bias randomized Epoch 1 0.97 0.97 0.99 Epoch 2 0.93 0.94 0.99 Epoch 3 0.64 0.70 0.99 Epoch 4 0.27 0.36 0.99 Epoch 5 0.22 0.31 0.99 Epoch 6 0.21 0.30 0.99 Epoch 7 0.21 0.30 0.98 Epoch 8 0.21 0.30 0.98 Epoch 9 0.21 0.30 0.96 Epoch 10 0.21 0.30 0.88 Epoch 11 0.21 0.30 0.66 Epoch 12 0.21 0.30 0.56 Epoch 13 0.21 0.30 0.54 Epoch 14 0.21 0.30 0.53 Epoch 15 0.21 0.30 0.53 completed successfully End console output. Example of epoch 9 0.21 0.30 0.98 0.21 0.30 0.98 0.22 0.29 0.98 0.23 0.29 0.98 0.24 0.28 0.98 0.25 0.28 0.98 0.25 0.27 0.98 0.26 0.27 0.98 0.27 0.27 0.98 0.28 0.26 0.98 0.29 0.26 0.98 0.30 0.26 0.98 0.31 0.26 0.98 0.32 0.25 0.98 0.34 0.25 0.98 0.35 0.24 0.98 0.36 0.24 0.98 0.37 0.24 0.98 0.38 0.24 0.98 0.40 0.23 0.98 0.41 0.23 0.98 0.42 0.23 0.98 0.43 0.23 0.98 0.44 0.22 0.98 0.45 0.22 0.98 0.46 0.22 0.98 0.48 0.22 0.98 0.49 0.22 0.98 0.50 0.21 0.98 0.51 0.21 0.98 0.53 0.20 0.98 0.52 0.21 0.98 0.50 0.22 0.98 0.49 0.23 0.98 0.48 0.24 0.98 0.47 0.24 0.98 0.46 0.25 0.98 0.45 0.26 0.98 0.44 0.27 0.98 0.43 0.28 0.98 0.42 0.29 0.98 0.42 0.30 0.98 0.41 0.32 0.98 0.40 0.33 0.98 0.39 0.34 0.98 0.38 0.35 0.98 0.38 0.36 0.98 0.37 0.37 0.98 0.36 0.38 0.98 0.35 0.40 0.98 0.35 0.41 0.98 0.34 0.42 0.98 0.34 0.43 0.98 0.33 0.44 0.98 0.32 0.46 0.98 0.32 0.47 0.98 0.31 0.48 0.98 0.31 0.49 0.98 0.30 0.50 0.98 0.30 0.51 0.97 0.30 0.52 0.98 0.29 0.51 0.98 0.29 0.50 0.98 0.28 0.49 0.98 0.28 0.48 0.98 0.27 0.47 0.98 0.27 0.46 0.97 0.27 0.45 0.98 0.26 0.44 0.98 0.26 0.43 0.98 0.26 0.42 0.98 0.25 0.41 0.98 0.25 0.40 0.98 0.25 0.40 0.98 0.24 0.39 0.98 0.24 0.38 0.98 0.24 0.37 0.98 0.24 0.37 0.98 0.23 0.36 0.98 0.23 0.35 0.98 0.23 0.35 0.98 0.23 0.34 0.98 0.22 0.33 0.98 0.22 0.33 0.98 0.22 0.32 0.98 0.22 0.32 0.98 0.21 0.31 0.98 0.21 0.31 0.98 0.21 0.30 0.98 0.21 0.30 0.98 Epoch 9 So with epoch 9 the first 30 rows have an actual value of {1, 0, 0}, then next 30 have an actual value of {0, 1, 0} and finally the last 30 have an actual value of {0, 0, 1}. See how it inches closer and closer for each row of data, yet the last row stays the same, while not staying the same for all the epochs. This is strange and I am not sure exactly why it is doing this. So the basic structure of the program is: main() executes, declare and initialize a class Neural_Network with a input, hidden and output layer. calling train() then executes epoch() which runs in a loop the amount of times specified when calling train. epoch() itself runs forward_prop(), back_prop() and finally update_network(), there are also a few variables like arrays for the expected and actual values for the output. The vectors bias, values, weights and errors all hold the values for the network separately, which I found was better for readability. the first layer or position [0] of the weights vector is empty and the input values use the weights in the hidden layer and the hidden layer uses the weights in the output layer. Each weight is a vector of weights equal to the amount of nodes in the previous layer, Position [0] of the vector of weights is used for the node at position [0] in the previous layer. #include <iostream> #include <cstdlib> #include <iomanip> #include <cmath> #include <fstream> #include <sstream> #include <vector> #include <array> #include <string> #include <numeric> class Neural_Network { private: std::vector<std::array<double, 4>> training_set; // 30 setosa -> 30 versicolor -> 30 virginica std::vector<std::vector<double>> values, bias, errors; std::vector<std::vector<std::vector<double>>> weights; size_t net_size = 0; double dot_val(std::vector<double> val, std::vector<double> weights); double sigmoid(const double num); double random_number(); double transfer_derivitive(double num); void initialize(std::vector<size_t> layers); void forward_prop(std::vector<double>& expected); void back_prop(std::vector<double> expected); void update_network(double l_rate); public: Neural_Network(const std::vector<std::array<double, 4>>& data); ~Neural_Network() = default; void train(size_t epochs = 1); void display(); }; Neural_Network::Neural_Network(const std::vector<std::array<double, 4>>& data) : training_set{ data } { initialize({ 4, 6, 3 }); } double Neural_Network::dot_val(std::vector<double> val, std::vector<double> weights) { return std::inner_product(val.begin(), val.end(), weights.begin(), 0.0); } double Neural_Network::sigmoid(const double num) { return (1 / (1 + exp(-num))); } double Neural_Network::random_number() { return (double)rand() / (double)RAND_MAX; } double Neural_Network::transfer_derivitive(double num) { return num * (1 - num); } void Neural_Network::display() { std::cout << std::fixed << std::setprecision(2) << "values:\n"; for (size_t i = 0; i < values.size(); ++i) { std::cout << "layer " << i << "\n[ "; for (size_t j = 0; j < values[i].size(); ++j) std::cout << values.at(i).at(j) << " "; std::cout << " ]\n"; } } void Neural_Network::initialize(std::vector<size_t> layers) { for (size_t i = 0; i < layers.size(); ++i) { std::vector<double> v{}, b{}, e{}; std::vector<std::vector<double>> w{}; //initializing the nodes in the layers for (size_t j = 0; j < layers.at(i); ++j) { v.push_back(0); b.push_back(random_number()); e.push_back(1); std::vector<double> inner_w{}; if (i != 0) // checking if the current layer is the input for (size_t k = 0; k < layers.at(i - 1); ++k) // adding weights to the current layer to the amount of nodes in the next layer inner_w.push_back(random_number()); // adding a weight to the current layer for a node in the next layer w.push_back(inner_w); } values.push_back(v); bias.push_back(b); errors.push_back(e); weights.push_back(w); ++net_size; } std::cout << "initialize network success" << std::endl; } void Neural_Network::train(size_t epoch_count) { const size_t count = epoch_count; while (epoch_count > 0) { std::cout << "\nEpoch " << 1 + (count - epoch_count) << std::endl; for (size_t i = 0; i < 90; ++i) { std::vector<double> expected{ 0, 0, 0 }; if (i < 30) expected[0] = 1; else if (i < 60) expected[1] = 1; else if (i < 90) expected[2] = 1; for (size_t j = 0; j < values[0].size(); ++j) // Initialize input layer values values.at(0).at(j) = training_set.at(i).at(j); // value[0] is the input layer, j is the node forward_prop(expected); back_prop(expected); update_network(0.05); } display(); --epoch_count; } } void Neural_Network::forward_prop(std::vector<double>& expected) { for (size_t i = 1; i < net_size - 1; ++i) // looping through every layer except the first and last for (size_t j = 0; j < values.at(i).size(); ++j) // looping through every node in the current non input/output layer values.at(i).at(j) = sigmoid(dot_val(values.at(i - 1), weights.at(i).at(j)) + bias.at(i).at(j)); // assigning node j of layer i a sigmoided val that is the dotval + the associated bias for (size_t i = 0; i < values.at(net_size - 1).size(); ++i) // looping through the ouptut layer values.at(net_size - 1).at(i) = sigmoid(dot_val(values.at(net_size - 2), weights.at(net_size - 1).at(i)) + bias.at(net_size - 1).at(i)); } void Neural_Network::back_prop(std::vector<double> expected) // work backwards from the output layer { std::vector<double> output_errors{}; for (size_t i = 0; i < errors.at(net_size - 1).size(); ++i) // looping through the output layer { output_errors.push_back(expected.at(i) - values.at(net_size - 1).at(i)); errors.at(net_size - 1).at(i) = output_errors.at(i) * transfer_derivitive(values.at(net_size - 1).at(i)); } // output layer finished for (size_t i = net_size - 2; i > 0; i--) // looping through the non output layers backwards { std::vector<double> layer_errors{}; for (size_t j = 0; j < errors.at(i).size(); ++j) // looping through the current layer's nodes { double error = 0; for (size_t k = 0; k < weights.at(i + 1).size(); ++k) // looping through the current set of weights error += errors.at(i).at(j) * weights.at(i + 1).at(k).at(j); layer_errors.push_back(error); } for (size_t j = 0; j < layer_errors.size(); ++j) errors.at(i).at(j) = layer_errors.at(j) * transfer_derivitive(values.at(i).at(j)); } } void Neural_Network::update_network(double l_rate) { for (size_t i = 1; i < net_size; ++i) { for (size_t j = 0; j < weights.at(i).size(); ++j) { for (size_t k = 0; k < weights.at(i).at(j).size(); ++k) weights.at(i).at(j).at(k) += l_rate * errors.at(i).at(j) * values.at(i - 1).at(j); bias.at(i).at(j) += l_rate * errors.at(i).at(j); } } } int main() { std::vector<std::array<double, 4>> data = { {5.1, 3.5, 1.4, 0.2}, {4.9, 3, 1.4, 0.2}, {4.7, 3.2, 1.3, 0.2}, {4.6, 3.1, 1.5, 0.2}, {5, 3.6, 1.4, 0.2}, {5.4, 3.9, 1.7, 0.4}, {4.6, 3.4, 1.4, 0.3}, {5, 3.4, 1.5, 0.2}, {4.4, 2.9, 1.4, 0.2}, {4.9, 3.1, 1.5, 0.1}, {5.4, 3.7, 1.5, 0.2}, {4.8, 3.4, 1.6, 0.2}, {4.8, 3, 1.4, 0.1}, {4.3, 3, 1.1, 0.1}, {5.8, 4, 1.2, 0.2}, {5.7, 4.4, 1.5, 0.4}, {5.4, 3.9, 1.3, 0.4}, {5.1, 3.5, 1.4, 0.3}, {5.7, 3.8, 1.7, 0.3}, {5.1, 3.8, 1.5, 0.3}, {5.4, 3.4, 1.7, 0.2}, {5.1, 3.7, 1.5, 0.4}, {4.6, 3.6, 1, 0.2}, {5.1, 3.3, 1.7, 0.5}, {4.8, 3.4, 1.9, 0.2}, {5, 3, 1.6, 0.2}, {5, 3.4, 1.6, 0.4}, {5.2, 3.5, 1.5, 0.2}, {5.2, 3.4, 1.4, 0.2}, {4.7, 3.2, 1.6, 0.2}, {7, 3.2, 4.7, 1.4}, {6.4, 3.2, 4.5, 1.5}, {6.9, 3.1, 4.9, 1.5}, {5.5, 2.3, 4, 1.3}, {6.5, 2.8, 4.6, 1.5}, {5.7, 2.8, 4.5, 1.3}, {6.3, 3.3, 4.7, 1.6}, {4.9, 2.4, 3.3, 1}, {6.6, 2.9, 4.6, 1.3}, {5.2, 2.7, 3.9, 1.4}, {5, 2, 3.5, 1}, {5.9, 3, 4.2, 1.5}, {6, 2.2, 4, 1}, {6.1, 2.9, 4.7, 1.4}, {5.6, 2.9, 3.6, 1.3}, {6.7, 3.1, 4.4, 1.4}, {5.6, 3, 4.5, 1.5}, {5.8, 2.7, 4.1, 1}, {6.2, 2.2, 4.5, 1.5}, {5.6, 2.5, 3.9, 1.1}, {5.9, 3.2, 4.8, 1.8}, {6.1, 2.8, 4, 1.3}, {6.3, 2.5, 4.9, 1.5}, {6.1, 2.8, 4.7, 1.2}, {6.4, 2.9, 4.3, 1.3}, {6.6, 3, 4.4, 1.4}, {6.8, 2.8, 4.8, 1.4}, {6.7, 3, 5, 1.7}, {6, 2.9, 4.5, 1.5}, {5.7, 2.6, 3.5, 1}, {6.3, 3.3, 6, 2.5}, {5.8, 2.7, 5.1, 1.9}, {7.1, 3, 5.9, 2.1}, {6.3, 2.9, 5.6, 1.8}, {6.5, 3, 5.8, 2.2}, {7.6, 3, 6.6, 2.1}, {4.9, 2.5, 4.5, 1.7}, {7.3, 2.9, 6.3, 1.8}, {6.7, 2.5, 5.8, 1.8}, {7.2, 3.6, 6.1, 2.5}, {6.5, 3.2, 5.1, 2}, {6.4, 2.7, 5.3, 1.9}, {6.8, 3, 5.5, 2.1}, {5.7, 2.5, 5, 2}, {5.8, 2.8, 5.1, 2.4}, {6.4, 3.2, 5.3, 2.3}, {6.5, 3, 5.5, 1.8}, {7.7, 3.8, 6.7, 2.2}, {7.7, 2.6, 6.9, 2.3}, {6, 2.2, 5, 1.5}, {6.9, 3.2, 5.7, 2.3}, {5.6, 2.8, 4.9, 2}, {7.7, 2.8, 6.7, 2}, {6.3, 2.7, 4.9, 1.8}, {6.7, 3.3, 5.7, 2.1}, {7.2, 3.2, 6, 1.8}, {6.2, 2.8, 4.8, 1.8}, {6.1, 3, 4.9, 1.8}, {6.4, 2.8, 5.6, 2.1}, {7.2, 3, 5.8, 1.6} }; Neural_Network network{ data }; network.train(1); return 0; } Edit to use .at() instead of [] for accessing std::vector in program I hope I made everything clear, if not let me know. note: I had this question of stackoverflow, I was told that I should move it to codereview.stackexchange, then they told me I should move it back to stackoverflow again, while reframing my question with more detail. Please don't tell me to move this question a 3rd time. If there is something wrong with the way I am asking please give me a chance to change it or add information so I can get some help, please and thank you
One obvious mistake is in dot_val: double Neural_Network::dot_val(std::vector<double> val,std::vector<double> weights) { double output; // <-- This is uninitialized for (size_t i = 0; i < weights.size(); ++i) output += val[i] * weights[i]; return output; // <-- Who knows what this will be } You are using an uninitialized variable. Either initialize output to 0, or you can use std::inner_product : #include <numeric> //... double Neural_Network::dot_val(std::vector<double> val,std::vector<double> weights) { return std::inner_product(val.begin(), val.end(), weights.begin(), 0.0); }
Capture undefined number of items in Regex
I want to capture the following data: [TREND, JOHN, DATA1, 0.17, 33.34, 26, 33.33, 0.25, 33.33, DATA2, 0.26, 20.0, 261, 20.0, 0.234, 20.0, 0.1, 20.0, 5, 20.0, FINAL, 245] From the following text: Market #TREND Person: JOHN DATA1 1) 0.17 (33.34%) 2) 26 (33.33%) 3) 0.25 (33.33%) * random text here DATA2 1) 0.26 (20.0%) 2) 261 (20.0%) 3) 0.234 (20.0%) 4) 0.1 (20.0%) 5) 5 (20.0%) * qsdfdsf random dsfg text random here FINAL 245 Signature I have written the following Regex code that works properly in this precise example : #(TREND)\n+\w*:\s*(JOHN)\n+(DATA1)\n\d\S\s(\d+.?\d*)\s\((\d+.?\d*)%\)\s*\n\d\S\s(\d+.?\d*)\s\((\d+.?\d*)%\)\s*\n\d\S\s(\d+.?\d*)\s\((\d+.?\d*)%\)\s*\n.*\n*(DATA2)\n\d\S\s(\d+.?\d*)\s\((\d+.?\d*)%\)\s*\n\d\S\s(\d+.?\d*)\s\((\d+.?\d*)%\)\s*\n\d\S\s(\d+.?\d*)\s\((\d+.?\d*)%\)\s*\n\d\S\s(\d+.?\d*)\s\((\d+.?\d*)%\)\s*\n\d\S\s(\d+.?\d*)\s\((\d+.?\d*)%\)\s*\n.*\n*(FINAL)\n(\d+) I would like to make it extentable for random number of item of DATA1 and DATA2 from 1 to 10 items each : Market #TREND Person: JOHN DATA1 1) 0.17 (33.34%) 2) 26 (33.33%) 3) 0.25 (33.33%) 4) 0.11 (40.40%) 5) 0.222 (50.50%) * random text here DATA2 1) 0.26 (20.0%) 2) 261 (20.0%) 3) 0.234 (20.0%) * qsdfdsf random dsfg text random here FINAL 245 Signature
Calculating paystring for loan data
Example: Input: cpi = 100.0, payments = [100.0, 94.0, 90.0, 100.0, 200.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ] Output: paystring = [0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] Explanation: Since the first payment was 100.0 and that is greater than or equal to cpi - 5.0 (95.0) then the first element in the output is 0. Then the next element is 1 since 94.0 is less than cpi - 5.0 (95.0) i.e. missed a payment, then since the next element 90.0 is less than cpi - 5.0 (95.0) i.e. missed another payment than now we are at 2 (or 2 total missed payments). Then in the next element we had 100 then that counts as 1 payment made so now we made that payment that was due but we still didn't cover the other two payments from the prior month so we are still at 2. Then the process continues. I have this so far: double cpi = 100.0; std::vector<double> payments = { 100.0, 94.0, 90.0, 100.0, 200.0, 300.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 }; std::vector<int> paystring(payments.size(), 0); int count = 0; for (int i = 0; i < payments.size(); ++i) { if (payments[i] <= cpi - 5.0) { paystring[i] = ++count; } else { paystring[i] = count; } } for (auto it : paystring) std::cout << it << " "; Although, this is not correct since it fails to update count when I made lets say the full payment or more than the due amount (cpi). I just want to know what I need to change in my logic to make this work. Let me know if the example provided is unclear. For example say I have Input: cpi = 100.0, payments = [100.0, 94.0, 90.0, 100.0, 200.0, 100.0, 300.0, 100.0, 100.0, 100.0, 100.0, 100.0 ] Output: [0, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0] But I get [0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] Here is a few more examples that are correct:
You have a condition that if payment is too low, penalty is added, the customer is one month behind. Add another condition: If over payment is made, for example $200, you want to give customer credit, that puts the customer one month ahead. Then add a condition that the customer is not ahead by a negative count. std::vector<double> payments = { 100, 94, 90, 100, 200, 100, 300, 100, 100, 100, 100, 100 //"0 1 1 0 -1 0 -2 0 0 0 0 0 <-penalty //"0 1 2 2 1 1 0 0 0 0 0 0 <-penalty sum }; double cpi = 100.0; for(int i = 0; i < payments.size(); ++i) { double payment = payments[i]; if(payment <= (cpi - 5.0)) { //one month behind on payment count++; } while((payment > cpi) && count) { //customer made large payment. //allow count to decrease. //but count cannot be less than zero count--; payment -= cpi; } paystring[i] = count; } Ouput for 100, 94, 90, 100, 200, 100, 100, 100, 100, 100, 100, 100: 0 1 2 2 2 2 2 2 2 2 2 2 //expected output 0 1 2 2 1 1 1 1 1 1 1 1 //result Ouput for 100, 94, 90, 100, 200, 100, 300, 100, 100, 100, 100, 100: 0 1 2 2 1 0 0 0 0 0 0 0 //expected output 0 1 2 2 1 1 0 0 0 0 0 0 //result My output is not the same, maybe the expected output is incorrect or you left something out. Note the 6th payment is 100, so there shouldn't be any change for that index.
Python Pandas: Create Groups by Range using map
I have a large data set where I am looking to create groups based upon cumulative sum percent of the total. I have gotten this to work by using the map function see below code. Is there a better way to do this say if I wanted to make my groups even more granular? So for example now am looking at 5% increments...what if want to look at 1 % increments. Wondering if there is another way where I don't have to explicitly enter them into my "codethem" function. def codethem(dl): if dl < .05 : return '5' elif .05 < dl <= .1: return '10' elif .1 < dl <= .15: return '15' elif .15 < dl <= .2: return '20' elif .2 < dl <= .25: return '25' elif .25 < dl <= .3: return '30' elif .3 < dl <= .35: return '35' elif .35 < dl <= .4: return '40' elif .4 < dl <= .45: return '45' elif .45 < dl <= .5: return '50' elif .5 < dl <= .55: return '55' elif .55 < dl <= .6: return '60' elif .6 < dl <= .65: return '65' elif .65 < dl <= .7: return '70' elif .7 < dl <= .75: return '75' elif .75 < dl <= .8: return '80' elif .8 < dl <= .85: return '85' elif .85 < dl <= .9: return '90' elif .9 < dl <= .95: return '95' elif .95 < dl <= 1: return '100' else: return 'None' my_df['code'] = my_df['sales_csum_aspercent'].map(code them) Thank you!
there is a special method for that - pd.cut() Demo: create random DF: In [393]: df = pd.DataFrame({'a': np.random.rand(10)}) In [394]: df Out[394]: a 0 0.860256 1 0.399267 2 0.209185 3 0.773647 4 0.294845 5 0.883161 6 0.985758 7 0.559730 8 0.723033 9 0.126226 we should specify bins when calling pd.cut(): In [404]: np.linspace(0, 1, 11) Out[404]: array([ 0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ]) In [395]: pd.cut(df.a, bins=np.linspace(0, 1, 11)) Out[395]: 0 (0.8, 0.9] 1 (0.3, 0.4] 2 (0.2, 0.3] 3 (0.7, 0.8] 4 (0.2, 0.3] 5 (0.8, 0.9] 6 (0.9, 1] 7 (0.5, 0.6] 8 (0.7, 0.8] 9 (0.1, 0.2] Name: a, dtype: category Categories (10, object): [(0, 0.1] < (0.1, 0.2] < (0.2, 0.3] < (0.3, 0.4] ... (0.6, 0.7] < (0.7, 0.8] < (0.8, 0.9] < (0.9, 1]] if we want to have a custom labels, we should explicitly specify them: In [401]: bins = np.linspace(0,1, 11) NOTE: bin labels must be one fewer than the number of bin edges In [402]: labels = (bins[1:]*100).astype(int) In [412]: labels Out[412]: array([ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) In [403]: pd.cut(df.a, bins=bins, labels=labels) Out[403]: 0 90 1 40 2 30 3 80 4 30 5 90 6 100 7 60 8 80 9 20 Name: a, dtype: category Categories (10, int64): [10 < 20 < 30 < 40 ... 70 < 80 < 90 < 100] Lets do it with the 5% step In [419]: bins = np.linspace(0, 1, 21) In [420]: bins Out[420]: array([ 0. , 0.05, 0.1 , 0.15, 0.2 , 0.25, 0.3 , 0.35, 0.4 , 0.45, 0.5 , 0.55, 0.6 , 0.65, 0.7 , 0.75, 0.8 , 0.8 5, 0.9 , 0.95, 1. ]) In [421]: labels = (bins[1:]*100).astype(int) In [422]: labels Out[422]: array([ 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100]) In [423]: pd.cut(df.a, bins=bins, labels=labels) Out[423]: 0 90 1 40 2 25 3 80 4 30 5 90 6 100 7 60 8 75 9 15 Name: a, dtype: category Categories (20, int64): [5 < 10 < 15 < 20 ... 85 < 90 < 95 < 100]
Trying to get my program to read an input file correctly 2d array [duplicate]
This question already has an answer here: I can't get my program to read the values from my input file correctly (2D array) (1 answer) Closed 9 years ago. I am trying to make my program read my input file correctly, the following is my input file, the first column is what I want the user to input and then the values need to be taken out from the corresponding row, the values being read are wrong though. 14, 14, 8, 0.4, 16, 2.0, 1.7, 7, 4.7, 0.23, 0.44, 290, 350 16, 16, 10, 0.5, 17, 2.2, 1.8, 8, 5.4, 0.27, 0.5, 310, 370 18, 18, 11, 0.5, 18, 2.2, 2.0, 9, 6.0, 0.30, 0.56, 320, 380 20, 20, 12, 0.5, 19, 2.3, 2.2, 9.5, 6.4, 0.32, 0.59, 330, 390 22, 22, 13, 0.5, 20, 2.4, 2.4, 10, 6.7, 0.33, 0.63, 340, 410 24, 24, 14, 0.5, 21, 2.5, 2.5, 11, 7.4, 0.37, 0.69, 350, 420 27, 27, 16, 0.6, 22, 2.6, 2.8, 11.5, 7.7, 0.38, 0.72, 370, 450 30, 30, 18, 0.6, 23, 2.7, 3.0, 12, 8.0, 0.40, 0.75, 380, 460 35, 35, 21, 0.6, 25, 2.8, 3.4, 13, 8.7, 0.43, 0.81, 400, 480 40, 40, 24, 0.6, 26, 2.9, 3.8, 14, 9.4, 0.47, 0.88, 420, 500 45, 45, 27, 0.6, 27, 3.1, 3.8, 15, 10.0, 0.50, 0.94, 440, 520 50, 50, 30, 0.6, 29, 3.2, 3.8, 16, 10.7, 0.53, 1.00, 460, 550 ifstream soft; soft.open ("Softwood.txt"); //Opens the softwood text file which holds the values required for calculations cout <<"Please enter the strength class of the timber, excluding the letter." <<endl; cin >> type; float a [12][13]; //begins the array so the input file can be read int i, j; for (i=0; i<12; i++) { for (int j=0; j<13; j++) soft>>a[i][j]; } int m=0; while(a[m][0]!= type && m<12) { m++; } bendingStrength = a[m][1]; tensionParallel = a[m][2]; tensionPerpindicular = a[m][3]; compressionParallel = a[m][4]; compressionPerpindicular = a[m][5]; shearStrength = a[m][6]; elasticityParallel = a[m][7]; elasticityParallelFive = a[m][8]; elasticityPerpindicular = a[m][9]; shearModulus = a[m][10]; density = a[m][11]; meanDensity = a[m][12];
You should ignore comma while reading the file. It could be acomplished like this: for (i=0; i<12; i++) { for (int j=0; j<13; j++) { soft >> a[i][j]; char ch; soft.get(ch); if(ch != ',') soft.unget(); } }
while(a[m][0]!= type && m<12) { m++; } If m == 11, your program increments the value of m (now m == 12) and you try to assign the value of a[12][i] (for i between 1 and 12) to an object. But in arrays in C++, we count the number of object from 0 so the biggest number in your array is a[11][12]. Your programm tries to read the value of an object which doesn't exist.