Pyomo KeyError: "Error accessing indexed component: Index '0' is not valid for array component 'li_f_inv'" - pyomo

I was trying to run the following code on Pyomo. It is a simple Transmission Expansion problem using a concrete model.
Thanks jsiirola. I am actually just learning Pyomo, thats why I might be making silly mistakes. I implemented all you suggested as shown below:
from pyutilib.misc import import_file
from pyomo.environ import *
import networkx as nx
model = ConcreteModel()
model.name = "DTEPM_trial_concrete"
#Sets
#Epoch
model.E = Set(initialize = [0, 1, 2, 3])
model.E_n = Set(model.E, initialize = {0:[1,2,3,4,5], 1:[6,7,8,9,10], 2:[11,12,13,14,15], 3:[16,17,18,19,20]})
#System nodes
model.N = ['N1', 'N2', 'N3']
model.n_name= Param(model.N, within = Integers)
#T = Set()
model.G = ['G1', 'G2', 'G3']
model.LI = ['L1', 'L2', 'L3']
#Scalar Parameters
model.int_rate = 0.05
model.vll = 3000
model.tau_period = 8760
def R_discount_inv_init(model, i):
return sum(1 / (1 + model.int_rate)**(i - 1) for i in model.E)
model.cum_disc_inv_cost = Param(model.E, initialize = R_discount_inv_init)
def R_discount_op_init(model, i):
for index in model.E_n:
return sum(1 / (1 + model.int_rate)**(i - 1) for i in model.E_n[index])
model.cum_disc_op_cost = Param(model.E, initialize = R_discount_op_init)
#Demand Periods
model.t_demand = {'N1': 1.05, 'N2': 2.10, 'N3': 7.35}
model.demand_curtailed = Var(model.E, model.N, within = NonNegativeReals)
#Generation Units
model.ge_max = {'G1': 2.00, 'G2': 2.00, 'G3': 7.35}
model.ge_marginal_cost = {'G1': 30, 'G2': 35, 'G3': 40}
model.B = {('N1','G1'): 1, ('N1','G2'): 0, ('N1','G3'): 0, ('N2','G1'): 0, ('N2','G2'): 1, ('N2','G3'): 0, ('N3','G1'): 0, ('N3','G2'): 0, ('N3','G3'): 1,}
#Transmission lines
model.li_x = {'L1': 0.2, 'L2': 0.2, 'L3': 0.2}
model.li_max_f = 1.50
model.li_f = {'L1': 1.00, 'L2': 1.00, 'L3': 1.00}
model.li_sending_bus = {'L1': 'N1', 'L2': 'N1', 'L3': 'N2'}
model.li_receiving_bus = {'L1': 'N2', 'L2': 'N3', 'L3': 'N3'}
model.li_length = {'L1': 100, 'L2': 100, 'L3': 100}
#Expansion Options
model.inv_cost_var = 4000000
nodes = ['N1', 'N2', 'N3']
edges = [['N1', 'N2'], ['N1', 'N3'], ['N2', 'N3']]
I = nx.DiGraph()
I.add_nodes_from(nodes)
I.add_edges_from(edges)
model.I = -nx.incidence_matrix(I, oriented=True) # this returns a scipy sparse matrix
#Variables
#Transmission line power flow limits
def fl_inv(model, i, l):
return (0, model.li_max_f)
model.li_f_inv = Var(model.E, model.LI, bounds = fl_inv)
#Transmission line investment and operation contraints
model.f = Var(model.LI, model.E, initialize=0)
def fl_rule(model, l, j, i):
if i:
return model.f[l,j] >= -(model.li_f_inv[j,l] + model.li_f[l])
else:
return model.f[l,j] <= (model.li_f_inv[j,l] + model.li_f[l])
model.bound_f = Constraint(model.LI, model.E, [0,1], rule=fl_rule)
#generation limit
def fg(model, i, g):
return (0, model.ge_max[g])
model.ge_output = Var(model.G, model.E, initialize = 0, bounds = fg)
#phase angles for the nodes
model.theta = Var(model.E, model.N, within = NonNegativeReals)
def line_equation(model, e, l):
return model.bound_f[l] == (1/model.li_x(l) for l in model.LI) *(sum(model.theta[n] for n in model.N if model.n_name[n] == model.li_sending_bus[l]) - sum(model.theta[n] for n in model.N if model.n_name[n] == model.li_receiving_bus[l]))
model.line_equation = Constraint(model.LI, model.E, rule = line_equation)
def system_balance(model, e, n):
return sum(model.b[n, g] * model.ge_output[g] for g in model.G) \
+ sum(model.I[n, l] * model.f[l] for l in model.LI) \
- sum(model.t_demand[n] - model.demand_curtailed[n]) == 0
model.SystemBalance = Constraint(model.E, model.N, rule=system_balance)
#OBJECTIVE FUNCTION
def objective_mincost(model):
return sum( model.cum_disc_inv_cost[e] for e in model.E * sum (model.li_f_inv[l] * model.inv_cost_var[l] * model.li_length[l]) + model.cum_disc_op_cost[e] * (model.tau_period * (sum(model.ge_max[g] * (model.ge_marginal_cost[g])) + sum(model.demand_curtailed[n] * model.vll))))
model.objective = Objective(rule = objective_mincost, sense = minimize)
opt = SolverFactory('gurobi')
results = opt.solve(model) # solves and updates instance
model.display()
But received the following new error message:
ERROR: Rule failed when generating expression for constraint line_equation with index ('L3', 0):
KeyError: "Error accessing indexed component: Index 'L3' is not valid for array component 'bound_f'"
ERROR: Constructing component 'line_equation' from data=None failed:
KeyError: "Error accessing indexed component: Index 'L3' is not valid for array component 'bound_f'"
Please what do you think might be wrong?
Thank you

As the error indicates, you are not passing valid indices to model.li_f_inv in the rule for model.f. Your Var model.li_f_inv is declared as:
model.li_f_inv = Var(model.E, model.LI, bounds = fl_inv)
Your current rule for model.f is only passing a single index to model.li_f_inv, which is invalid. That means your rule for model.f needs to be updated to pass the correct indices:
#Transmission line investment and operation contraints
model.f = Var(model.LI, model.E, initialize=0)
def fl_rule(model, l, j, i):
if i:
return model.f[l,j] >= -(model.li_f_inv[j,l] + model.li_f[l])
else:
return model.f[l,j] <= (model.li_f_inv[j,l] + model.li_f[l])
model.bound_f = Constraint(model.LI, model.E, [0,1], rule=fl_rule)
Also note that you are transposing the indexing sets between the definition of model.f and the sets implied by the fl_rule function.
EDIT: I failed to notice that in your original post, you were attempting to use variables (li_f_inv) in the bounds of another variable (f). This isn't a valid math program (not to mention, not valid Pyomo). You need to express the variable bounds as a Constraint. Also, while Pyomo allows you to express range constraints (lb <= body <= ub), both lb and ub must not be potentially variable. Since that is not the case here, you must express the two bounds constraints separately.

Related

TensorFlow train function with multiple layers

I am new to tensor and trying to understand it. I managed to create one layer model. But I would like now to add 2 more. How can I make my train function working? I would like to train it with hundreds of values X and Y. I implemented all values what I need: Weight and Bias of each layer, but I dont understand how can I use them in my train function. And when it will be trained, how can I use it. Like I do in the last part of a code.
import numpy as np
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
x = np.array([
[10, 10, 30, 20],
])
y = np.array([[10, 1, 1, 1]])
class Model(object):
def __init__(self, x, y):
# get random values.
self.W = tf.Variable(tf.random.normal((len(x), len(x[0]))))
self.b = tf.Variable(tf.random.normal((len(y),)))
self.W1 = tf.Variable(tf.random.normal((len(x), len(x[0]))))
self.b1 = tf.Variable(tf.random.normal((len(y),)))
self.W2 = tf.Variable(tf.random.normal((len(x), len(x[0]))))
self.b2 = tf.Variable(tf.random.normal((len(y),)))
def __call__(self, x):
out1 = tf.multiply(x, self.W) + self.b
out2 = tf.multiply(out1, self.W1) + self.b1
last_layer = tf.multiply(out2, self.W2) + self.b2
# Input_Leyer = self.W * x + self.b
return last_layer
def loss(predicted_y, desired_y):
return tf.reduce_sum(tf.square(predicted_y - desired_y))
optimizer = tf.optimizers.Adam(0.1)
def train(model, inputs, outputs):
with tf.GradientTape() as t:
current_loss = loss(model(inputs), outputs)
grads = t.gradient(current_loss, [model.W, model.b])
optimizer.apply_gradients(zip(grads, [model.W, model.b]))
print(current_loss)
model = Model(x, y)
for i in range(10000):
train(model, x, y)
for i in range(3):
InputX = np.array([
[input(), input(), input(), input()],
])
returning = tf.math.multiply(
InputX, model.W, name=None
)
print("I think that output can be:", returning)
Just add new variables to the list:
grads = t.gradient(current_loss, [model.W, model.b, model.W1, model.b1, model.W2, model.b2])
optimizer.apply_gradients(zip(grads, [model.W, model.b, model.W1, model.b1, model.W2, model.b2]))

Custom Loss Function becomes zero when backpropagated

I am trying to write my own custom loss function that is based on the false positive and negative rates. I made a dummy code so you can check the first 2 defenitions as well. I added the rest, so you can see how it is implemented. However, still somewhere the gradient turns out to be zero. What is now the step where the gradient turns zero, or how can I check this? Please I would like to know how I can fix this :).
I tried providing you with more information so you can play around as well, but if you miss anything please do let me know!
The gradient stays True during every step. However, still during the training of the model the loss is not updated, therefore the NN does not train.
y = Variable(torch.tensor((0, 0, 0, 1, 1,1), dtype=torch.float), requires_grad = True)
y_pred = Variable(torch.tensor((0.333, 0.2, 0.01, 0.99, 0.49, 0.51), dtype=torch.float), requires_grad = True)
x = Variable(torch.tensor((0, 0, 0, 1, 1,1), dtype=torch.float), requires_grad = True)
x_pred = Variable(torch.tensor((0.55, 0.25, 0.01, 0.99, 0.65, 0.51), dtype=torch.float), requires_grad = True)
def binary_y_pred(y_pred):
y_pred.register_hook(lambda grad: print(grad))
y_pred = y_pred+torch.tensor(0.5, requires_grad=True, dtype=torch.float)
y_pred = y_pred.pow(5) # this is my way working around using torch.where()
y_pred = y_pred.pow(10)
y_pred = y_pred.pow(15)
m = nn.Sigmoid()
y_pred = m(y_pred)
y_pred = y_pred-torch.tensor(0.5, requires_grad=True, dtype=torch.float)
y_pred = y_pred*2
y_pred.register_hook(lambda grad: print(grad))
return y_pred
def confusion_matrix(y_pred, y):
TP = torch.sum(y*y_pred)
TN = torch.sum((1-y)*(1-y_pred))
FP = torch.sum((1-y)*y_pred)
FN = torch.sum(y*(1-y_pred))
k_eps = torch.tensor(1e-12, requires_grad=True, dtype=torch.float)
FN_rate = FN/(TP + FN + k_eps)
FP_rate = FP/(TN + FP + k_eps)
return FN_rate, FP_rate
def dif_rate(FN_rate_y, FN_rate_x):
dif = (FN_rate_y - FN_rate_x).pow(2)
return dif
def custom_loss_function(y_pred, y, x_pred, x):
y_pred = binary_y_pred(y_pred)
FN_rate_y, FP_rate_y = confusion_matrix(y_pred, y)
x_pred= binary_y_pred(x_pred)
FN_rate_x, FP_rate_x = confusion_matrix(x_pred, x)
FN_dif = dif_rate(FN_rate_y, FN_rate_x)
FP_dif = dif_rate(FP_rate_y, FP_rate_x)
cost = FN_dif+FP_dif
return cost
# I added the rest so you can see how it is implemented, but this peace does not fully run well! If you want this part to run as well, I can add more code.
class FeedforwardNeuralNetModel(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(FeedforwardNeuralNetModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_dim)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.sigmoid(out)
return out
model = FeedforwardNeuralNetModel(input_dim, hidden_dim, output_dim)
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, betas=[0.9, 0.99], amsgrad=True)
criterion = torch.nn.BCELoss(weight=None, size_average=None, reduce=None, reduction='mean')
for epoch in range(num_epochs):
train_err = 0
for i, (samples, truths) in enumerate(train_loader):
samples = Variable(samples)
truths = Variable(truths)
optimizer.zero_grad() # Reset gradients
outputs = model(samples) # Do the forward pass
loss2 = criterion(outputs, truths) # Calculate loss
samples_y = Variable(samples_y)
samples_x = Variable(samples_x)
y_pred = model(samples_y)
y = Variable(y, requires_grad=True)
x_pred = model(samples_x)
x= Variable(x, requires_grad=True)
cost = custom_loss_function(y_pred, y, x_pred, x)
loss = loss2*0+cost #checking only if cost works.
loss.backward()
optimizer.step()
train_err += loss.item()
train_loss.append(train_err)
I expect the model to update during training. There is no error message.
With your definitions:TP+FN=y and TN+FP=1-y. Then you'll get FN_rate=1-y_pred and FP_rate=y_pred. Your cost is then FN_rate+FP_rate=1, the gradient of which is 0.
You can check this by hand or using a library for symbolic mathematics (e.g., SymPy):
from sympy import symbols
y, y_pred = symbols("y y_pred")
TP = y * y_pred
TN = (1-y)*(1-y_pred)
FP = (1-y)*y_pred
FN = y*(1-y_pred)
# let's ignore the eps for now
FN_rate = FN/(TP + FN)
FP_rate = FP/(TN + FP)
cost = FN_rate + FP_rate
from sympy import simplify
print(simplify(cost))
# output: 1

TypeError: '_SumExpression' object is not iterable

Please I am new to Pyomo. I have tried to run the following codes:
from pyutilib.misc import import_file
from pyomo.environ import *
model = ConcreteModel()
model.name = "Transmission Investment planning problem_"
#Sets
#Epoch
model.E = RangeSet(0,3) #No. of Epochs
#System nodes
model.N = ['N1', 'N2', 'N3'] #Names of Bus nodes
#model.n_name= Param(model.N)
model.G = ['G1', 'G2', 'G3'] #Names of generators
model.LI = ['L1', 'L2', 'L3'] #Nnames of Transmission lines
#Scalar Parameters
model.int_rate = 0.05 #interest rate
model.vll = 3000 #value of loss load(£/MWh)
model.tau_period = 8760 #Time duration of demand period (hours)
model.base = 100 #MVA base
model.ref = {'N3'} #reference node
model.vadegree = 0 #phase angle of reference node
#Discount factors
L = 5
Y= len(model.E)*L
irate = range(0,(Y-1))
def disc_factor(i):
disc = [1/((1 + model.int_rate)**i) for i in irate]
return disc
model.cum_disc_inv_cost = [sum(disc_factor(i)[(i*L):]) for i in model.E] #investment discount factor
model.cum_disc_op_cost = [sum(disc_factor(i)[(i*L):((i+1)*L)]) for i in model.E] #operation discount factor
#Demand Periods
model.t_demand = {'N1': 105, 'N2': 210, 'N3': 735} #demand at nodes (MW)
model.demand_curtailed = Var(model.E, model.N, initialize = 0) #curtailed demand (MW)
#Generation Units
model.ge_max = {'G1': 200, 'G2': 200, 'G3': 1000} #maximum stable power generation(MW)
model.ge_marginal_cost = {'G1': 30, 'G2': 35, 'G3': 40} #marginal cost of generation units (£/MWh)
#Bus to generation incidence matrix
model.B = {('N1','G1'): 1, ('N1','G2'): 0, ('N1','G3'): 0, ('N2','G1'): 0, ('N2','G2'): 1, ('N2','G3'): 0, ('N3','G1'): 0, ('N3','G2'): 0, ('N3','G3'): 1,}
#Transmission lines
model.li_x = {'L1': 0.2, 'L2': 0.2, 'L3': 0.2} #reactance of transmission line(p.u)
model.li_max_f = 150 #maximum capaciy provided for line expansion (MW)
model.li_f = {'L1': 100, 'L2': 100, 'L3': 100} #initial capacity for line l (MW)
model.li_sending_bus = {'L1': 'N1', 'L2': 'N1', 'L3': 'N2'} #sending bus for line l
model.li_receiving_bus = {'L1': 'N2', 'L2': 'N3', 'L3': 'N3'} #receiving bus for line l
model.li_length = {'L1': 100, 'L2': 100, 'L3': 100} #length of line l (km)
#Expansion Options
model.inv_cost_var = 4000000 #Annuitized variable investment cost for line l (£/MW.km.yr)
#Bus to line incidence matrix
model.I = {('N1','L1'): 1, ('N1','L2'): 1, ('N1','L3'): 0, ('N2','L1'): -1, ('N2','L2'): 0, ('N2','L3'): 1, ('N3','L1'): 0, ('N3','L2'): -1, ('N3','L3'): -1,}
#Variables
#Transmission line power flow limits
def fl_inv(model, i, l):
return (0, model.li_max_f)
model.li_f_inv = Var(model.E, model.LI, bounds = fl_inv) #transmission capacity to be built for line l (MW)
#Transmission line investment and operation contraints
model.f = Var(model.LI, model.E, initialize=0)
def fl_rule(model, l, j, i):
if i:
return model.f[l,j] >= -(model.li_f_inv[j,l] + model.li_f[l])
else:
return model.f[l,j] <= (model.li_f_inv[j,l] + model.li_f[l])
model.bound_f = Constraint(model.LI, model.E, [0,1], rule=fl_rule)
##generation limit
def fg(model, i, g):
return (0, model.ge_max[g])
model.ge_output = Var(model.E, model.G, initialize = 0, bounds = fg)
#phase angles for the nodes
def theta(model, e, n):
for n in model.N:
if n == model.ref:
model.theta[e, n].fixed = True
return model.vadegree
else: return 0
model.theta = Var(model.E, model.N, initialize = theta)
def line_equation(model, l, e):
return model.f[l, e] == model.base/model.li_x[l] *(sum(model.theta[e, n] for n in model.N if n == model.li_sending_bus[l]) - sum(model.theta[e, n] for n in model.N if n == model.li_receiving_bus[l]))
model.line_equation = Constraint(model.LI, model.E, rule = line_equation)
def system_balance(model, e, n):
return sum(model.B[n, g] * model.ge_output[e, g] for g in model.G) \
+ sum(model.I[n, l] * model.f[l, e] for l in model.LI) \
- model.t_demand[n] + model.demand_curtailed[e, n] == 0
model.SystemBalance = Constraint(model.E, model.N, rule=system_balance)
#OBJECTIVE FUNCTION
def objective_mincost(model):
for i in model.E:
return sum(model.cum_disc_inv_cost[i] * sum(model.li_f_inv[i, l] * model.inv_cost_var * model.li_length[l] for l in model.LI) + model.cum_disc_op_cost[i] * (model.tau_period * (sum(model.ge_max[g] * (model.ge_marginal_cost[g]) for g in model.G) + sum(model.demand_curtailed[i, n] for n in model.N * model.vll))))
model.objective = Objective(rule = objective_mincost, sense = minimize)
opt = SolverFactory('gurobi')
results = opt.solve(model) # solves and updates instance
model.display()
I got the following errorcodes:
ERROR: Rule failed when generating expression for objective objective:
TypeError: '_SumExpression' object is not iterable ERROR: Constructing component 'objective' from data=None failed:
TypeError: '_SumExpression' object is not iterable
Please any suggestions on where the problem might be and possible solution?
Thank you.
You have two places where model.E is multiplied by something that is not a set. You probably meant to parenthesize your sums differently (i.e., this is mostly a problem with how your sums are organized)

TypeError( "Cannot convert object of type '%s' (value = %s) to a numeric value."

I am new to Pyomo and using it to practice some optimisation problems in Transmission Expansion Planning. I tried solving the model below:
from pyutilib.misc import import_file
from pyomo.environ import *
import networkx as nx
model = ConcreteModel()
model.name = "DTEPM_trial_concrete"
#Sets
#Epoch
model.E = Set(initialize = [0, 1, 2, 3])
model.E_n = Set(model.E, initialize = {0:[1,2,3,4,5], 1:[6,7,8,9,10], 2:[11,12,13,14,15], 3:[16,17,18,19,20]})
#System nodes
model.N = ['N1', 'N2', 'N3']
#model.n_name= Param(model.N)
#T = Set()
model.G = ['G1', 'G2', 'G3']
model.LI = ['L1', 'L2', 'L3']
#Scalar Parameters
model.int_rate = 0.05
model.vll = 3000
model.tau_period = 8760
model.base = 100
model.ref = ['N3']
model.vadegree = 0
def R_discount_inv_init(model, i):
return sum(1 / (1 + model.int_rate)**(i - 1) for i in model.E)
model.cum_disc_inv_cost = Param(model.E, initialize = R_discount_inv_init)
def R_discount_op_init(model, i):
for index in model.E_n:
return sum(1 / (1 + model.int_rate)**(i - 1) for i in model.E_n[index])
model.cum_disc_op_cost = Param(model.E, initialize = R_discount_op_init)
#Demand Periods
model.t_demand = {'N1': 105, 'N2': 210, 'N3': 735}
model.demand_curtailed = Var(model.E, model.N, within = NonNegativeReals)
#Generation Units
model.ge_max = {'G1': 200, 'G2': 200, 'G3': 1000}
model.ge_marginal_cost = {'G1': 30, 'G2': 35, 'G3': 40}
model.B = {('N1','G1'): 1, ('N1','G2'): 0, ('N1','G3'): 0, ('N2','G1'): 0, ('N2','G2'): 1, ('N2','G3'): 0, ('N3','G1'): 0, ('N3','G2'): 0, ('N3','G3'): 1,}
#Transmission lines
model.li_x = {'L1': 0.2, 'L2': 0.2, 'L3': 0.2}
model.li_max_f = 150
model.li_f = {'L1': 100, 'L2': 100, 'L3': 100}
model.li_sending_bus = {'L1': 'N1', 'L2': 'N1', 'L3': 'N2'}
model.li_receiving_bus = {'L1': 'N2', 'L2': 'N3', 'L3': 'N3'}
model.li_length = {'L1': 100, 'L2': 100, 'L3': 100}
#Expansion Options
model.inv_cost_var = 4000000
nodes = ['N1', 'N2', 'N3']
edges = [['N1', 'N2'], ['N1', 'N3'], ['N2', 'N3']]
I = nx.DiGraph()
I.add_nodes_from(nodes)
I.add_edges_from(edges)
model.I = -nx.incidence_matrix(I, oriented=True) # this returns a scipy sparse matrix
#Variables
#Transmission line power flow limits
def fl_inv(model, i, l):
return (0, model.li_max_f)
model.li_f_inv = Var(model.E, model.LI, bounds = fl_inv)
#Transmission line investment and operation contraints
model.f = Var(model.LI, model.E, initialize=0)
def fl_rule(model, l, j, i):
if i:
return model.f[l,j] >= -(model.li_f_inv[j,l] + model.li_f[l])
else:
return model.f[l,j] <= (model.li_f_inv[j,l] + model.li_f[l])
model.bound_f = Constraint(model.LI, model.E, [0,1], rule=fl_rule)
##generation limit
def fg(model, i, g):
return (0, model.ge_max[g])
model.ge_output = Var(model.E, model.G, initialize = 0, bounds = fg)
#phase angles for the nodes
def theta(model, e, n):
for n in model.N:
if n == model.ref:
model.theta[e, n].fixed = True
return model.vadegree
else: return 0
model.theta = Var(model.E, model.N, initialize = theta)
def line_equation(model, l, e):
return model.f[l, e] == model.base/model.li_x[l] *(sum(model.theta[e, n] for n in model.N if n == model.li_sending_bus[l]) - sum(model.theta[e, n] for n in model.N if n == model.li_receiving_bus[l]))
model.line_equation = Constraint(model.LI, model.E, rule = line_equation)
def system_balance(model, e, n):
return sum(model.b[n, g] * model.ge_output[g] for g in model.G) \
+ sum(model.I[n, l] * model.f[l, e] for l in model.LI) \
- sum(model.t_demand[n] - model.demand_curtailed[n]) == 0
model.SystemBalance = Constraint(model.E, model.N, rule=system_balance)
#OBJECTIVE FUNCTION
def objective_mincost(model):
return sum( model.cum_disc_inv_cost[e] for e in model.E * sum (model.li_f_inv[e, l] * model.inv_cost_var[l] * model.li_length[l]) + model.cum_disc_op_cost[e] * (model.tau_period * (sum(model.ge_max[g] * (model.ge_marginal_cost[g])) + sum(model.demand_curtailed[n] * model.vll))))
model.objective = Objective(rule = objective_mincost, sense = minimize)
opt = SolverFactory('gurobi')
results = opt.solve(model) # solves and updates instance
model.display()
I got the following error messages from running the codes:
ERROR: Rule failed when generating expression for constraint line_equation with index ('L2', 0):
TypeError: Cannot convert object of type 'generator' (value = . at 0x000001B6F840E360>) to a numeric value.
ERROR: Constructing component 'line_equation' from data=None failed:
TypeError: Cannot convert object of type 'generator' (value = . at 0x000001B6F840E360>) to a numeric value.
Please how do you suggest I solve it?
Thank you.
It looks like you are missing a sum() in your line equation definition.
'(model.base/model.li_x(l) for l in model.LI)' is a generator not a numeric value, it's a grammar error.
You could check your DC power flow equations carefully.

Why I'm getting "TypeError: Failed to convert object of type <type 'dict'> to Tensor."?

I'm new to TF and ML.
Details about data: Features(x) - (70 x 70 x 70) tensor for each sample, y - a float for each sample.
TFRecords created with the following code:
def convert_to_tf_records():
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float64_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
tfrecords_filename = 'A-100-h2-h2o.tfrecords'
writer = tf.python_io.TFRecordWriter(tfrecords_filename)
# Get data from db for now.
db = connect('results-60-70.db')
data = db.select(selection='Ti')
i = 0
for row in data:
desc = np.array(json.loads(row.descriptor), dtype=np.float32)
print(desc.shape)
be = float(row.binding_energy) * 23 # Convert to Kcal/mol ?
desc = desc.flatten()
desc = desc.tostring()
example = tf.train.Example(features=tf.train.Features(feature={'voxel_grid': _bytes_feature(desc), 'binding_energy': _float64_feature(be)}))
writer.write(example.SerializeToString())
i += 1
if i >= 10:
break
Input function:
def my_input_function(fname, perform_shuffle=False, repeat_count=None):
def _parse_elements(example):
features = tf.parse_single_example(example, features={'voxel_grid': tf.FixedLenFeature([], tf.string), 'binding_energy': tf.FixedLenFeature([], tf.float32)})
vg = tf.decode_raw(features['voxel_grid'], tf.float32)
vg = tf.reshape(vg, [70, 70, 70])
vg = tf.convert_to_tensor(vg, dtype=tf.float32)
vg = {'voxel_grid': vg}
e = tf.cast(features['binding_energy'], tf.float32)
return vg, e
def input_function():
dataset = tf.data.TFRecordDataset(fname).map(_parse_elements)
dataset = dataset.repeat(repeat_count)
dataset = dataset.batch(5)
dataset = dataset.prefetch(1)
if perform_shuffle:
dataset.shuffle(20)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
return input_function
Model function:
def my_model_function(features, labels, mode):
if mode == tf.estimator.ModeKeys.PREDICT:
tf.logging.info("my_model_fn: PREDICT, {}".format(mode))
elif mode == tf.estimator.ModeKeys.EVAL:
tf.logging.info("my_model_fn: EVAL, {}".format(mode))
elif mode == tf.estimator.ModeKeys.TRAIN:
tf.logging.info("my_model_fn: TRAIN, {}".format(mode))
feature_columns = [tf.feature_column.numeric_column('voxel_grid', shape=(70, 70, 70), dtype=tf.float32)]
# Create the layer of input
input_layer = tf.feature_column.input_layer(features, feature_columns)
input_layer = tf.reshape(input_layer, [-1, 70, 70, 70, 1])
# Convolution layers
conv1 = tf.layers.conv3d(inputs=input_layer, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
conv2 = tf.layers.conv3d(inputs=conv1, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
pool3 = tf.layers.max_pooling3d(inputs=conv2, pool_size=[2, 2, 2], strides=2)
flat = tf.layers.flatten(pool3)
dense1 = tf.layers.dense(inputs=flat, units=10, activation=tf.nn.relu)
dense2 = tf.layers.dense(inputs=dense1, units=10, activation=tf.nn.relu)
output = tf.layers.dense(inputs=dense2, units=1)
predictions = {'binding_energy': output}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics
eval_metric_ops = {"mse": tf.metrics.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
When calling model.train using
model = tf.estimator.Estimator(model_fn=my_model_function, model_dir='./model_dir')
model.train(input_fn=my_input_function('A-100-h2-h2o.tfrecords'), steps=100)
I get the following error.
TypeError: Failed to convert object of type to Tensor.
Found it!
changing
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
to
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])
solves the issue.