I'm encoutering some issues trying to translate a Contrete Model into Abstract one.
Months = RangeSet(6)
RequiredHours = {1: 8000, 2: 9000, 3: 9800, 4: 9900, 5: 10050, 6: 10500}
Notifications = {1: 2, 2: 0, 3: 2, 4: 0, 5: 1, 6: 0}
Costs = {'Attendants': 5100, 'Trainees': 3600}
Hours = {'Attendants': 150, 'Trainees': 25}
TraineeMax = 5
model = ConcreteModel()
model.TraineesNb = Var(Months,domain=NonNegativeIntegers,bounds=(0,TraineeMax))
AvailableAttendants = {}
AvailableTrainees = {}
for m in Months:
if m == 1:
AvailableAttendants[m] = 62
AvailableTrainees[m] = model.TraineesNb[m]
if m == 2:
AvailableAttendants[m] = AvailableAttendants[m-1] - Notifications[m-1]
AvailableTrainees[m] = model.TraineesNb[m] + model.TraineesNb[m-1]
if m > 2:
AvailableAttendants[m] = AvailableAttendants[m-1] - Notifications[m-1] + model.TraineesNb[m-2]
AvailableTrainees[m] = model.TraineesNb[m] + model.TraineesNb[m-1]
# Objective
model.Costs = Objective(
expr = sum(AvailableAttendants[m] for m in Months)*Costs["Attendants"] +
sum(AvailableTrainees[m] for m in Months)*Costs["Trainees"] ,
sense = minimize)
# declare constraints
model.NeededHours = ConstraintList()
for m in Months:
model.NeededHours.add(expr = AvailableAttendants[m]*Hours["Attendants"] +
AvailableTrainees[m]*Hours["Trainees"] >= RequiredHours[m])
The concrete model is forwarding the adequate resulys (perhaps not the most elegant code, but it works). In Discrete version of this model, an error occurs at the loop [for m in model.Months]
Thanks for your help and comments
Naji
Related
I am new to pyomo. I would like to ask if there is a way to achieved this requirement.
I want my asset to be assigned to 5 different bins. Each bin will have max capacity. for example, y1 has max 50, y2 has max 20,..
some of my assets can only go to certain bin. For example, A can only go to y1, y2. B can go to y4 and y5.
minimise the number of the bin used
Currently my code shown below and the year will all be filled with at least 1 asset . But I would like if only 2 or 3 of the year are used(minimize the number of bins) and would like if assets can be placed from smallest year to highest year
from pyomo.opt import SolverFactory
value_asset = {'J': 2, 'B': 4, 'D': 18, 'C': 34, 'A': 20, 'E': 31}
bins = {'y1': 50, 'y2': 20, 'y3': 30, 'y4': 70, 'y5': 40}
Assets = {'A': ['y1', 'y2'], 'J': ['y1', 'y2'], 'E': ["y4", "y5"], 'B': ["y4", "y5"],
'D': ['y5', "y4", "y3"],
'C': ["y1", "y2", 'y3', 'y4', 'y5']}
model = pyo.ConcreteModel()
model.Assets = pyo.Set(initialize=Assets.keys())
model.budget = pyo.Set(initialize=bins.keys())
model.x = pyo.Var(model.Assets, model.budget, within=pyo.Integers, bounds=(0, None))
model.less_budget = pyo.ConstraintList()
# make sure that all the total are always less than or equal to the budget
for b in model.budget:
model.less_budget.add(expr=sum(model.x[asset, b]*value_asset[asset] for asset in model.Assets) <= bins[b])
# we want to exclude certain year that some assets cannot do
model.excluded = pyo.ConstraintList()
for asset in model.Assets:
inc = Assets[asset]
exc = list(bins.keys() - inc)
for t in exc:
model.excluded.add(expr=model.x[asset, t] == 0)
# each item can only go to 1 bin
model.one_bins = pyo.ConstraintList()
for asset in model.Assets:
model.one_bins.add(expr=sum(model.x[asset, b] for b in (model.budget )) <= 1)
model.obj = pyo.Objective(expr=sum(model.x[asset, b] for asset in model.Assets for b in model.budget),sense=pyo.maximize)
solver = pyo.SolverFactory('cbc', executable=r'C:\Users\cc\Downloads\Cbc-2.10-win64-msvc15-md\bin\cbc.exe')
solver.solve(model)
model.x.display()
I'm new to TF and ML.
Details about data: Features(x) - (70 x 70 x 70) tensor for each sample, y - a float for each sample.
TFRecords created with the following code:
def convert_to_tf_records():
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float64_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
tfrecords_filename = 'A-100-h2-h2o.tfrecords'
writer = tf.python_io.TFRecordWriter(tfrecords_filename)
# Get data from db for now.
db = connect('results-60-70.db')
data = db.select(selection='Ti')
i = 0
for row in data:
desc = np.array(json.loads(row.descriptor), dtype=np.float32)
print(desc.shape)
be = float(row.binding_energy) * 23 # Convert to Kcal/mol ?
desc = desc.flatten()
desc = desc.tostring()
example = tf.train.Example(features=tf.train.Features(feature={'voxel_grid': _bytes_feature(desc), 'binding_energy': _float64_feature(be)}))
writer.write(example.SerializeToString())
i += 1
if i >= 10:
break
Input function:
def my_input_function(fname, perform_shuffle=False, repeat_count=None):
def _parse_elements(example):
features = tf.parse_single_example(example, features={'voxel_grid': tf.FixedLenFeature([], tf.string), 'binding_energy': tf.FixedLenFeature([], tf.float32)})
vg = tf.decode_raw(features['voxel_grid'], tf.float32)
vg = tf.reshape(vg, [70, 70, 70])
vg = tf.convert_to_tensor(vg, dtype=tf.float32)
vg = {'voxel_grid': vg}
e = tf.cast(features['binding_energy'], tf.float32)
return vg, e
def input_function():
dataset = tf.data.TFRecordDataset(fname).map(_parse_elements)
dataset = dataset.repeat(repeat_count)
dataset = dataset.batch(5)
dataset = dataset.prefetch(1)
if perform_shuffle:
dataset.shuffle(20)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
return input_function
Model function:
def my_model_function(features, labels, mode):
if mode == tf.estimator.ModeKeys.PREDICT:
tf.logging.info("my_model_fn: PREDICT, {}".format(mode))
elif mode == tf.estimator.ModeKeys.EVAL:
tf.logging.info("my_model_fn: EVAL, {}".format(mode))
elif mode == tf.estimator.ModeKeys.TRAIN:
tf.logging.info("my_model_fn: TRAIN, {}".format(mode))
feature_columns = [tf.feature_column.numeric_column('voxel_grid', shape=(70, 70, 70), dtype=tf.float32)]
# Create the layer of input
input_layer = tf.feature_column.input_layer(features, feature_columns)
input_layer = tf.reshape(input_layer, [-1, 70, 70, 70, 1])
# Convolution layers
conv1 = tf.layers.conv3d(inputs=input_layer, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
conv2 = tf.layers.conv3d(inputs=conv1, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
pool3 = tf.layers.max_pooling3d(inputs=conv2, pool_size=[2, 2, 2], strides=2)
flat = tf.layers.flatten(pool3)
dense1 = tf.layers.dense(inputs=flat, units=10, activation=tf.nn.relu)
dense2 = tf.layers.dense(inputs=dense1, units=10, activation=tf.nn.relu)
output = tf.layers.dense(inputs=dense2, units=1)
predictions = {'binding_energy': output}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics
eval_metric_ops = {"mse": tf.metrics.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
When calling model.train using
model = tf.estimator.Estimator(model_fn=my_model_function, model_dir='./model_dir')
model.train(input_fn=my_input_function('A-100-h2-h2o.tfrecords'), steps=100)
I get the following error.
TypeError: Failed to convert object of type to Tensor.
Found it!
changing
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
to
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])
solves the issue.
I'm trying to plot two files of data of this type:
name1.fits 0 0 2.40359218172
name2.fits 0 0 2.15961244263
The third column has values from 0 to 5. I want to plot column 2 vs column 4, but, for lines with values in col 3 less than 2 (0 and 1), I want to shift col 2 by -0.1, and for lines with values greater than 3 (4 and 5) I want to shift col 2 by +0.1.
However my code seems to be shifting all values by +0.1. Here is what I have so far:
import matplotlib.pyplot as plt
import numpy as np
with open('file1.txt') as data, open('file2.txt') as stds:
lines1 = data.readlines()
lines2 = stds.readlines()
x1a = []
x2a = []
x1b = []
x2b = []
x1c = []
x2c = []
y1a = []
y2a = []
y1b = []
y2b = []
y1c = []
y2c = []
for line1 in lines1:
p = line1.split()
if p[2] < 2:
x1a.append(float(p[1]))
y1a.append(float(p[3]))
elif 1 < p[2] < 4:
x1b.append(float(p[1]))
y1b.append(float(p[3]))
elif p[2] > 3:
x1c.append(float(p[1]))
y1c.append(float(p[3]))
for line2 in lines2:
q = line2.split()
if q[2] < 2:
x2a.append(float(q[1]))
y2a.append(float(q[3]))
elif 1 < q[2] < 4:
x2b.append(float(q[1]))
y2b.append(float(q[3]))
elif q[2] > 3:
x2c.append(float(q[1]))
y2c.append(float(q[3]))
x1a = np.array(x1a)
x2a = np.array(x2a)
x1b = np.array(x1b)
x2b = np.array(x2b)
x1c = np.array(x1c)
x2c = np.array(x2c)
y1a = np.array(y1a)
y2a = np.array(y2a)
y1b = np.array(y1b)
y2b = np.array(y2b)
y1c = np.array(y1c)
y2c = np.array(y2c)
minorLocator = AutoMinorLocator(5)
fig, ax = plt.subplots(figsize=(8, 8))
fig.subplots_adjust(left=0.11, right=0.95, top=0.94)
plt.plot(x1a-0.1,y1a,'b^',mec='blue',label=r'B0',ms=8)
plt.plot(x2a-0.1,y2a,'r^',mec='red',fillstyle='none',mew=0.8,ms=8)
plt.plot(x1b,y1b,'bo',mec='blue',label=r'B0',ms=8)
plt.plot(x2b,y2b,'ro',mec='red',fillstyle='none',mew=0.8,ms=8)
plt.plot(x1c+0.1,y1c,'bx',mec='blue',label=r'B0',ms=8)
plt.plot(x2c+0.1,y2c,'rx',mec='red',fillstyle='none',mew=0.8,ms=8)
plt.axis([-1.0, 3.0, 0., 4])
ax.xaxis.set_tick_params(labeltop='on')
ax.yaxis.set_minor_locator(minorLocator)
plt.show()
Here is the plot:
plot
I'm pretty sure the problem is in my "ifs". I hope you can clear the way and/or show me a better option for this.
When you do your queries (if) you must ensure the conversion happens before the question so:
for line1 in lines1:
p = line1.split()
if p[2] < 2:
x1a.append(float(p[1]))
y1a.append(float(p[3]))
elif 1 < p[2] < 4:
x1b.append(float(p[1]))
y1b.append(float(p[3]))
elif p[2] > 3:
x1c.append(float(p[1]))
y1c.append(float(p[3]))
, should actually be:
for line1 in lines1:
p = line1.split()
if float(p[2]) < 2: # changed here
x1a.append(float(p[1]))
y1a.append(float(p[3]))
elif 1 < float(p[2]) < 4: # There seems to be a problem with this if
x1b.append(float(p[1]))
y1b.append(float(p[3]))
elif float(p[2]) > 3: # changed here
x1c.append(float(p[1]))
y1c.append(float(p[3]))
The same for your q variables. Also notice that asking 1 < x < 4 will intercept with x > 3 and x < 2. You should also correct this.
I have a script as follows:
import numpy as np
import pandas as pd
import pdb
# conventions: W = fitness, A = affinity ; sex: 1=M, 0=F; alien: 1=alien,
# 0=native
# pop array order: W, A, sex, alien
def mkpop(n):
W = np.repeat(a=1, repeats=n)
A = np.random.normal(1, 0.1, size=n)
A[A < 0] = 0
alien = np.repeat(a=False, repeats=n)
sex = np.random.randint(0, 2, n)
pop = np.array([W, A, sex, alien])
pop = np.transpose(pop)
return pop
def migrate(pop, n=10, gParams=[1, 0.1]):
W = np.random.gamma(shape=gParams[0], scale=gParams[1], size=n)
A = np.repeat(1, n)
# 0 is native; 1 is alien
alien = np.repeat(True, n)
# 0 is female
sex = np.random.randint(0, 2, n)
popAlien = np.array([W, A, sex, alien])
popAlien = np.transpose(popAlien)
pop = np.vstack((pop, popAlien))
return pop
def mate(pop):
# split into male and female
f = pop[pop[:, 2] == 0]
m = pop[pop[:, 2] == 1]
# create transition matricies for native and alien mates
# m with native = m.!alien.transpose * f.alien
# negate alien
naLog = list(np.asarray(m[:, 3]) == False)
naPdMat = np.outer(naLog, f[:, 1])
# mate with alien = m.alien.transpose * affinity
alPdMat = np.outer(m[:, 3], f[:, 1])
# add transition matrices for probability density matrix
pdMat = alPdMat + naPdMat
# transition matrix is equal to the pd matrix / column sumso
colSums = np.sum(pdMat, axis=0)
pMat = pdMat / colSums
# select mates
def choice(x):
ch = np.random.choice(a=range(0, len(x)), p=x)
return ch
mCh = np.apply_along_axis(choice, 0, pMat)
mCh = m[mCh, :]
WMid = (f[:, 0] + mCh[:, 0]) / 2
AMid = (f[:, 1] + mCh[:, 1]) / 2
# assign fitness based on group affiliation; only native/alien matings have
# modified fitness
# reassign fitness and affinity based on group id and midparent vals
W1 = np.where(
(f[:, 3] == mCh[:, 3]) |
((f[:, 3] == 1) & (mCh[:, 3] == 0))
)
WMid[W1] = 1
# number of offspring is a poisson-distributed variable with lambda=2W
nOff = map(lambda x: np.random.poisson(lam=x), 2 * WMid)
# generate offspring
# expand list of nOff to numbers of offspring per pair
# realized offspring is index posisions of W and A vals to be replicated
# for offspring
# this can be rewritten to return a matrix of the appropriate length. This
# should work
midVals = np.array([WMid, AMid]).T
realOff = np.array([0, 0])
for i in range(0, len(nOff)):
sibs = np.repeat([np.array(midVals[i])], [nOff[i]], axis=0)
realOff = np.vstack((realOff, sibs))
offspring = np.delete(realOff, 0, 0)
sex = np.random.randint(0, 2, len(offspring))
alien = np.repeat(0, len(offspring))
otherStats = np.array([sex, alien]).T
offspring = np.hstack([offspring, otherStats])
return offspring # should return offspring
def sim(nInit, nGen=100, nAlien=10, gParams=[1, 0.1]):
gen = 0
pop = mkpop
stats = pd.DataFrame(columns=('gen', 'W', 'WMean', 'AMean', 'WVar', 'AVar'))
while gen < nGen:
pop = migrate(pop, nAlien, gParams)
offspring = mate(pop)
var = np.var(offspring, axis=0)
mean = np.mean(offspring, axis=0)
N = len(offspring)
W = N / nInit
genStats = N.append(W, gen, mean, var)
stats = stats.append(genStats)
print(N, gen)
gen = gen + 1
return stats
print mkpop(100)
print mate(mkpop(100))
#
sim(100, 100, 10, [1, 0.1])
Running this script, outputs NameError: name 'sim' is not defined. It is apparent from the commands before the final one that all the other functions defined within this script work without a hitch. I'm not sure what is going on here, and there is probably some very easy fix that I'm overlooking. Ctags recognizes this function just fine. It's entirely possibe that sim() doesn't actually work yet, as I haven't been able to debug it.
Your sim function defined in mate function scope so it's invisible to global scope. You need to fix your indentation for sim function
For this function, i'm trying to write test sets that that cover Decision, then another set that covers Condition. I'm having trouble differentiating the two. For condition coverage, I have this:
Test case 1: A = 3, B = 4, C = 1 :: Line 4 True / True
Test case 2: A = 3, B = 4 C = 5 :: Line 4 True / False
Test case 3: A = 4, B = 3, C = 1 :: Line 4 False / True
Test case 4: A = 3, B = 4, C= 3 :: Line 5 True / True
Test case 5: A = 3, B = 4, C = 1 :: Line 5 True / False
Test case 6: A = 3, B = 3, C = 1 :: Line 5 False / False
Test case 7: A = 3, B =3, C = 3 :: Line 6 True / True
Test case 8: A = 3, B = 3, C = 1 :: Line 6 True / False
Test case 9: A = 3, B = 10, C = 1:: Line 6 False / False
Would this be correct for condition coverage?
EDIT --------------
This is my Decision coverage:
Test case 1: A = 4, B = 6, C = 2
Test case 2: A = 4, B = 6, C = 4
Test case3: A = 4, B = 2, C = 4
Is this correct for Decision?