Tensorflow C++: use array for feed_dict - c++

I have a C++ code in Tensorflow as shown below which involves the multiplication of matrices using placeholders:
#include <stdio.h>
#include <stdlib.h>
#include <ctime>
#include <iostream>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor.h"
int main(int argc, char const *argv[]){
using namespace tensorflow;
using namespace tensorflow::ops;
Scope root = Scope::NewRootScope();
auto alpha = Const(root, 2.0, {1, 1});
auto beta = Const(root, 3.0, {1, 1});
auto A = Placeholder(root, DT_FLOAT);
auto B = Placeholder(root, DT_FLOAT);
auto C = Placeholder(root, DT_FLOAT);
auto temp1 = MatMul(root, A, B);
auto temp2 = Mul(root, alpha, temp1);
auto temp3 = Mul(root, beta, C);
auto D = Add(root.WithOpName("D"), temp1, temp3);
std::vector<Tensor> outputs;
ClientSession session(root);
int num_size = 2;
for(int step = 1; step < num_size; step++){
/*Allocating arrays*/
int array_size = pow(10, step);
float **a, **b, **c;
a = (float **)malloc(sizeof(float)*array_size);
b = (float **)malloc(sizeof(float)*array_size);
c = (float **)malloc(sizeof(float)*array_size);
for(int i = 0; i < array_size; i++){
a[i] = (float *)malloc(sizeof(float)*array_size);
b[i] = (float *)malloc(sizeof(float)*array_size);
c[i] = (float *)malloc(sizeof(float)*array_size);
}
srand((unsigned)time(0));
for(int i = 0; i < array_size; i++){
for(int j = 0; j < array_size; j++){
a[i][j] = (rand()%100)+1;
b[i][j] = (rand()%200)+1;
c[i][j] = (rand()%300)+1;
}
}
for(int num = 0; num < 10; num++){
Status s = session.Run({{A, a}, {B, b}, {C, c}}, {D}, &outputs);
if(s.ok())
c = outputs[0];
else
printf("Error\n");
}
}
return 0;
}
However the format of sending values to placeholders in C++ is shown in this link. The feedtype used in C++ is given here.
I'm confused as to how I can modify my 2D arrays to the feeddict format so as to supply in 'session.Run()'.
Thank you.
Edit 1
A minimal representation of the question is as follows-
Consider the following snippet of code:
Scope root = Scope::NewRootScope();
auto a = Placeholder(root, DT_INT32);
// [3 3; 3 3]
auto b = Const(root, 3, {2, 2});
auto c = Add(root, a, b);
ClientSession session(root);
std::vector<Tensor> outputs;
// Feed a <- [1 2; 3 4]
int feed_a[2][2] = {{1, 2}, {3, 4}};
session.Run({ {a, feed_a} }, {c}, &outputs);
// The working code is - session.Run({ {a, { {1, 2}, {3, 4} } } }, {c}, &outputs);
// outputs[0] == [4 5; 6 7]
How can I make this code work in the case shown where the 'feed_a' array is received from a separate function and it needs to be used to set the value of placeholder 'a'.

You need to create a c-array and place the data there instead of using a jagged array.
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor.h"
int main() {
using namespace tensorflow;
using namespace tensorflow::ops;
Scope root = Scope::NewRootScope();
// [3 3; 3 3]
auto b = Const(root, {{3.f, 3.f}, {3.f, 3.f}});
ClientSession session(root);
std::vector<Tensor> outputs;
// just print b
TF_CHECK_OK(session.Run({}, {b}, &outputs));
LOG(INFO) << "b = ";
LOG(INFO) << outputs[0].matrix<float>();
// just print c = a + b
float *a_data = new float[4];
for (int i = 0; i < 4; ++i)
a_data[i] = 1.f;
auto a_shape = TensorShape({2, 2});
auto a_init = Input::Initializer(*a_data, a_shape);
auto a_plhdr = Placeholder(root, DT_FLOAT);
auto c = Add(root, a_plhdr, b);
TF_CHECK_OK(session.Run({{a_plhdr, a_init}}, {c}, &outputs));
LOG(INFO) << "a + b";
LOG(INFO) << outputs[0].matrix<float>();
return 0;
}
gives me
2018-02-14 22:45:47.469766: I tensorflow/cc/example/example.cc:20] b =
2018-02-14 22:45:47.469800: I tensorflow/cc/example/example.cc:21] 3 3
3 3
2018-02-14 22:45:47.473519: I tensorflow/cc/example/example.cc:36] a + b
2018-02-14 22:45:47.473543: I tensorflow/cc/example/example.cc:37] 4 4
4 4
Note, for some reason
int32 *a_data = new int32[4];
for (int i = 0; i < 4; ++i)
a_data[i] = 1;
auto a_shape = TensorShape({2, 2});
auto a_init = Input::Initializer(*a_data, a_shape);
auto a_plhdr = Placeholder(root, DT_INT32);
produces a failure (no output):
Check failed: dtype() == expected_dtype (1 vs. 3)
which could not be solved by a
auto a_casted = Cast(root, a_plhdr, DT_FLOAT)
auto c = Add(root, a_casted, b);

Related

How to use thrust remove_if 's results

I am trying to use thrust remove_if and I have some questions. However first, the example in the documentation is not working as it should
Here the code (that contains the doc code too)
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
//#include <thrust/copy.h>
#include <thrust/remove.h>
#include <iostream>
template<typename T>
struct is_zero {
__host__ __device__
auto operator()(T x) const -> bool {
return x == 0;
}
};
struct is_even
{
__host__ __device__
bool operator()(const int x)
{
return (x % 2) == 0;
}
};
int main(void){
int h_data[6] = {1, 0, 2, 0, 1, 3};
const int N = 6;
int A[N] = {1, 4, 2, 8, 5, 7};
int *new_end = thrust::remove_if(A, A + N, is_even());
int * d_data;
cudaMalloc((void**)&d_data, 6 * sizeof(int));
cudaMemcpy(d_data, h_data, 6 * sizeof(int), cudaMemcpyHostToDevice);
thrust::device_ptr<int> dev_ptr(d_data);
thrust::device_vector<int> output;
thrust::remove_if(dev_ptr, dev_ptr+6, is_zero<int>());
//thrust::remove_if(d_data, d_data+6, is_zero<int>()); //--> segmentation fault
cudaMemcpy(h_data, d_data, 6 * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 6; i++)
std::cout << "H[" << i << "] = " << h_data[i]<< std::endl;
for(int i = 0; i < 6; i++)
std::cout << "new_end[" << i << "] = " << new_end[i]<< std::endl;
}
I run this and I got
H[0] = 1
H[1] = 2
H[2] = 1
H[3] = 3
H[4] = 1
H[5] = 3
new_end[0] = 8
new_end[1] = 5
new_end[2] = 7
new_end[3] = -491667200
new_end[4] = 541501445
new_end[5] = 2019959568
In the documentation it is said
// The first three values of A are now {1, 5, 7}
// Values beyond new_end are unspecified
The results of the part programmed seems to be working (if zero)
But the ones in the tutorial are not the results.

Could not get the expected output

Problme:
I won't get the result that I get from Matlab implementation, I am not sure pow_pos(norm(x(:, 1) - start'), 2) I have converted correctly, here is the converted code
Variable::t x_0 = Var::vstack(x->index(0, 0), x->index(1, 0), x->index(2, 0));
M->constraint(Expr::vstack(t_tmp->index(0), Expr::sub(x_0, start_)), Domain::inQCone());
M->constraint(Expr::hstack(t->index(0), 1, t_tmp->index(0)), Domain::inPPowerCone(1.0/2));
Output
Here black dot represents what I get from Matlab and green dots depicts what I get from C++
Here is the original code I wrote in Matlab which gives the expected output
clear all
close all
clc
number_of_steps = 15;
lambda3_val = 1000;
lambda1_val = 1000;
lambda2_val = 0.1;
dim_ = 3;
Ab = [-0.470233 0.882543 0 3.21407
0.470233 -0.882543 -0 0.785929
-0.807883 -0.430453 0.402535 4.81961
0.807883 0.430453 -0.402535 -1.40824
-0.355254 -0.189285 -0.915405 0.878975
0.355254 0.189285 0.915405 1.12103];
A = Ab(:,1:dim_);
b = Ab(:,dim_+1);
start = [-4 , 0, 2];
goal = [-1, -1, 1];
nPolytopes = 1;
free_space = polytope(A, b);
cvx_solver mosek
cvx_begin
variable x(3, number_of_steps)
binary variable c(nPolytopes, number_of_steps);
cost = 0;
for i = 1:(number_of_steps-1)
cost = cost + pow_pos(norm(x(:, i) - x(:, i+1), 1),2)*lambda2_val;
end
cost = cost + pow_pos(norm(x(:, 1) - start'), 2)*lambda1_val;
cost = cost + pow_pos(norm(x(:, number_of_steps) - goal'),2)*lambda3_val;
minimize(cost)
subject to
for i = 1:number_of_steps
A*x(:, i) <= b;
end
cvx_end
Here is the code the conversion of the above into c++ using Mosek
#include <iostream>
#include "fusion.h"
using namespace mosek::fusion;
using namespace monty;
int main(int argc, char ** argv)
{
int number_of_steps = 15;
int lambda1_val = 1000;
int lambda2_val = 1000;
int lambda3_val = 0.1;
int dim_space = 3;
auto A = new_array_ptr<double, 2>({{-0.4702, 0.8825, 0},
{0.4702, -0.8825, 0},
{-0.8079, -0.4305, 0.4025},
{0.8079, 0.4305, -0.4025},
{-0.3553, -0.1893, -0.9154},
{0.3553, 0.1893, 0.9154}});
auto b = new_array_ptr<double, 1>({3.2141,
0.7859,
4.8196,
-1.4082,
0.8790,
1.1210});
auto end_ = new_array_ptr<double, 1>({-1, -1, -1});
auto start_ = new_array_ptr<double, 1>({-4, 0, 2});
Model::t M = new Model();
auto x = M->variable(new_array_ptr<int,1>({dim_space, number_of_steps}), Domain::unbounded()) ;
auto t = M->variable(number_of_steps, Domain::unbounded());
auto t_tmp = M->variable(number_of_steps, Domain::unbounded());
auto lambda_1 = M->parameter("lambda_1");
auto lambda_2 = M->parameter("lambda_2");
auto lambda_3 = M->parameter("lambda_3");
Variable::t x_0 = Var::vstack(x->index(0, 0), x->index(1, 0), x->index(2, 0));
M->constraint(Expr::vstack(t_tmp->index(0), Expr::sub(x_0, start_)), Domain::inQCone());
M->constraint(Expr::hstack(t->index(0), 1, t_tmp->index(0)), Domain::inPPowerCone(1.0/2));
for(int i=1; i<number_of_steps-1; i++){
Variable::t x_i = Var::vstack(x->index(0,i), x->index(1,i), x->index(2,i));
Variable::t x_i1 = Var::vstack(x->index(0,i+1), x->index(1,i+1), x->index(2,i+1));
M->constraint(Expr::vstack(t_tmp->index(i), Expr::sub(x_i1, x_i)), Domain::inQCone());
M->constraint(Expr::hstack(t->index(i), 1, t_tmp->index(i)), Domain::inPPowerCone(1.0/2));
}
Variable::t x_n = Var::vstack(x->index(0, number_of_steps-1), x->index(1, number_of_steps-1), x->index(2, number_of_steps-1));
M->constraint(Expr::vstack(t_tmp->index(number_of_steps-1), Expr::sub(x_n, end_)), Domain::inQCone());
M->constraint(Expr::hstack(t->index(number_of_steps-1), 1, t_tmp->index(number_of_steps-1)), Domain::inPPowerCone(1.0/2));
for (int i = 0; i < number_of_steps; i++)
{
auto x_i = Var::vstack(x->index(0,i), x->index(1, i), x->index(2, i));
M->constraint(Expr::mul(A, x_i), Domain::lessThan(b));
}
M->setLogHandler([](const std::string& msg){std::cout<< msg << std::flush;});
auto lambda1 = M->getParameter("lambda_1");
auto lambda2 = M->getParameter("lambda_2");
auto lambda3 = M->getParameter("lambda_3");
lambda1->setValue(lambda1_val);
lambda2->setValue(lambda2_val);
lambda3->setValue(lambda3_val);
auto objs = new_array_ptr<Expression::t, 1>(number_of_steps);
(*objs)[0] = (Expr::mul(lambda1, t->index(0)));
for(int i=1; i<number_of_steps-1; i++){
(*objs)[i] = Expr::mul(lambda2, t->index(i));
}
(*objs)[number_of_steps-1] = Expr::mul(lambda3, t->index(number_of_steps-1));
M->objective(ObjectiveSense::Minimize, Expr::add(objs));
M->solve();
auto sol = *(x->level());
std::cout<< "solution "<< sol << std::endl;
}
With the help of #michal-adamaszek and the answer given in the Mosek Google Group (https://mail.google.com/mail/u/0/#inbox/FMfcgzGkXmgmHqFBVJFSNRWmjQfQSPlg) Here is the working solution for the above problem,
#include <iostream>
#include "fusion.h"
using namespace mosek::fusion;
using namespace monty;
#define nint1(a) new_array_ptr<int>({(a)})
#define nint(a,b) new_array_ptr<int>({(a),(b)})
int main(int argc, char ** argv)
{
int number_of_steps = 15;
double lambda1_val = 1000;
double lambda2_val = 0.1;
double lambda3_val = 1000;
int dim_space = 3;
auto A = new_array_ptr<double, 2>({{-0.470233, 0.882543, 0},
{0.470233, -0.882543, 0},
{-0.807883, -0.430453, 0.402535},
{0.807883, 0.430453, -0.402535},
{-0.355254, -0.189285, -0.915405},
{0.355254, 0.189285, 0.915405}});
auto b = new_array_ptr<double, 1>({3.21407,
0.785929,
4.81961,
-1.40824,
0.878975,
1.12103});
auto end_ = new_array_ptr<double, 1>({-1, -1, 1});
auto start_ = new_array_ptr<double, 1>({-4, 0, 2});
Model::t M = new Model();
auto x = M->variable("x", new_array_ptr<int,1>({dim_space, number_of_steps}), Domain::unbounded()) ;
auto t = M->variable("t", number_of_steps-1, Domain::unbounded());
auto tstart = M->variable("ts",Domain::unbounded());
auto tend = M->variable("te",Domain::unbounded());
M->constraint(Expr::vstack(tstart, 0.5, Expr::sub(x->slice(nint(0,0), nint(3,1))->reshape(nint1(3)),
start_)),
Domain::inRotatedQCone());
M->constraint(Expr::hstack(t,
Expr::constTerm(number_of_steps-1, 0.5),
Expr::transpose(Expr::sub(x->slice(nint(0,0), nint(3,number_of_steps-1)),
x->slice(nint(0,1), nint(3,number_of_steps))))),
Domain::inRotatedQCone());
M->constraint(Expr::vstack(tend, 0.5, Expr::sub(x->slice(nint(0,number_of_steps-1), nint(3,number_of_steps))->reshape(nint1(3)),
end_)),
Domain::inRotatedQCone());
for (int i = 0; i < number_of_steps; i++)
M->constraint(Expr::mul(A, x->slice(nint(0,i), nint(3,i+1))->reshape(nint1(3))), Domain::lessThan(b));
M->setLogHandler([](const std::string& msg){std::cout<< msg << std::flush;});
auto lambda1 = M->parameter("lambda_1");
auto lambda2 = M->parameter("lambda_2");
auto lambda3 = M->parameter("lambda_3");
lambda1->setValue(lambda1_val);
lambda2->setValue(lambda2_val);
lambda3->setValue(lambda3_val);
M->objective(ObjectiveSense::Minimize,
Expr::add( Expr::sum(Expr::mul(lambda2, t)),
Expr::add(Expr::mul(lambda1, tstart), Expr::mul(lambda3, tend))));
M->writeTask("a.ptf");
M->solve();
auto sol = *(x->level());
std::cout<< "solution "<< sol << std::endl;
}

My c++ code returns 3 (after throwing an instance of bad_alloc)

My code returns 3 even though everything seems to be right. It previously returned 255, and i re-arranged it a bit, and now it returns 3. It also says "terminate called after throwing an instance of 'std::bad_alloc'".
I looked it up on Google but I can't find any solution. Any idea why this happens?
#include <iostream>
#include <vector>
using namespace std;
std::vector<int> calculeazaPunctul(double pct[]) {
double pctfinal[2];
double xv = pct[0];
double yv = pct[1];
double zv = pct[2];
double res = .02;
double xk = 1;
double yk = 1;
double zk = -2;
cout << "h" << endl;
double zp = 0;
double xp = xv - ((xv - xk) * (zv - zp));
xp = xp / (((zv - zk != 0) ? (zv - zk) : 0.0001));
double yp = yv - ((yv - yk) * (zv - zp));
yp = yp / (((zv - zk != 0) ? (zv - zk) : 0.0001));
return {(int)((res * 2 * (xp * 50 + 100))), (int)((res * (yp * 50 + 100)))};
}
int main()
{
double puncte[8][3] = {{1, 0, -1},
{1, 0, 1},
{-1, 0, 1},
{-1, 0, -1},
{1, 1, -1},
{1, 1, 1},
{-1, 1, 1},
{-1, 1, -1}};
std::vector<std::vector<int> > pcteFinal;
pcteFinal.resize(8);
for (int i = 0; i < 8; i++) {
pcteFinal[i] = calculeazaPunctul(puncte[i]);
}
std::vector<std::vector<char> > image;
image.resize(10);
for (int y = 0; y < 10; y++) {
std::vector<char> row;
image[y] = row;
for (int x = 0; x < 20; x++) {
image[y].push_back('.');
}
}
for (int i = 0; i < 8; i++) {
if (pcteFinal[i][0] < 20 && pcteFinal[i][0] >= 0)
{
if (pcteFinal[i][1] < 10 && pcteFinal[i][0] >= 0)
{
image[pcteFinal[i][0]][pcteFinal[i][1]] = '#';
}
}
}
for (int y = 0; y < 10; y++) {
for (int x = 0; x < 20; x++) {
cout << image[y][x];
}
cout << endl;
}
return 0;
}

Use C++ armadillo Expectation Maximization for Gaussian mixture model

I am trying to use armadillo's expectation maximization maximization gmm_diag class, but when I try to compile it I get "error gmm_diag was not declared in this scope".
My code is as follows:
#include <stdio.h>
#include <iostream>
#include <vector>
#include <armadillo>
#include <omp.h>
using namespace std;
using namespace arma;
int main()
{
// create synthetic data with 2 Gaussians
uword N = 10000;
uword d = 5;
mat data(d, N, fill::zeros);
vec mean0 = linspace<vec>(1,d,d);
vec mean1 = mean0 + 2;
uword i = 0;
while(i < N)
{
if(i < N) { data.col(i) = mean0 + randn<vec>(d); ++i; }
if(i < N) { data.col(i) = mean0 + randn<vec>(d); ++i; }
if(i < N) { data.col(i) = mean1 + randn<vec>(d); ++i; }
}
gmm_diag model;
model.learn(data, 2, maha_dist, random_subset, 10, 5, 1e-10, true);
model.means.print("means:");
double scalar_likelihood = model.log_p( data.col(0) );
rowvec set_likelihood = model.log_p( data.cols(0,9));
double overall_likelihood = model.avg_log_p(data);
uword gaus_id = model.assign( data.col(0), eucl_dist );
urowvec gaus_ids = model.assign( data.cols(0,9), prob_dist );
urowvec hist1 = model.raw_hist (data, prob_dist);
rowvec hist2 = model.norm_hist(data, eucl_dist);
model.save("my_model.gmm");
// the table is now initialized
}

Wrong shortest distance BFS algorithm?

I am doing this problem http://community.topcoder.com/stat?c=problem_statement&pm=2915&rd=5853, but my program gives wrong output, I tried more ways and it does not work properly. I do not get it, because other people do it like me and they are fine. Can you please check if I have properly implemented the BFS? Thanks in advance.
#include <vector>
#include <queue>
#include <algorithm>
#include <iostream>
#include <cstring>
using namespace std;
#define P push
#define PP pop();
#define T front();
int mo[][2] = { {-2, -1}, {-2, 1}, {2, -1}, {2, 1}, {-1, -2}, {1, -2}, {-1, 2}, {1, 2} };
int m[8][8];
int BFS(int sy, int sx, int fy, int fx)
{
queue<int> s;
m[sy][sx] = 1;
s.P(sy);
s.P(sx);
s.P(0);
while(!s.empty())
{
int d = s.T s.PP
int x = s.T s.PP
int y = s.T s.PP
for(int i=0;i < 8;i++)
{
int yy = y + mo[i][0];
int xx = x + mo[i][1];
if(yy < 0 || yy > 7 || xx < 0 || xx > 7) continue;
if(m[yy][xx] != -1) continue;
if(yy == fy && xx == fx) return d + 1;
m[yy][xx] = 0;
s.P(yy);
s.P(xx);
s.P(d+1);
}
}
return -1;
}
class CaptureThemAll {
public:
int fastKnight(string knight, string rook, string queen) {
vector<int> p{knight[0] - 'a', knight[1] - '1', rook[0] - 'a', rook[1] - '1', queen[0] - 'a', queen[1] - '1'};
memset(m, -1, sizeof(m));
int a = BFS(p[1], p[0], p[3], p[2]);
memset(m, -1, sizeof(m));
int b = BFS(p[1], p[0], p[5], p[4]);
memset(m, -1, sizeof(m));
int c = BFS(p[3], p[2], p[5], p[4]);
return min(a,b) + c;
}
};
I think the problem might be that you push y,x,d so your queue will be
Front y Middle x End d
But when you pop the front element you place it (y) into a variable called d.
It may work better if you change:
int d = s.T s.PP
int x = s.T s.PP
int y = s.T s.PP
to
int y = s.T s.PP
int x = s.T s.PP
int d = s.T s.PP