Is this method of calculating the top-5 accuracy in pytorch correct? - computer-vision

I am trying to validate the findings of a paper by testing it on the same model architecture as well as the same dataset reported by the paper. I have been using the imagenet script provided in the official pytorch repository's examples section to do the same.
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def init(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision#k for the specified values of k"""
maxk = max(topk)
batchsize = target.size(0)
, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correctk.mul(100.0 / batch_size))
return res
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
# measure data loading time
print(f"Processing {batch_idx+1}/{len(test_loader)}")
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)
# compute output
outputs = model(inputs)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
print(prec1,prec5)
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
print(top1)
print(top5)
However the top 5 error which I am getting by using this script is not matching with the one in the paper. Can anyone tell me what is wrong in this particular snippet?

Related

Redis session does not store variables when modified in thread - Flask

I have a thread that is running inside a route where the thread job is to do some expensive work, store variables and than I'll need to use these variables in another flask route.
When I am using the session variable (Redis) as a parameter in the thread function in order to add the data and extract it later it does not find the variables that I have stored in it.
In contrast, when I declare a global_dict and pass it to the thread function instead of session, the code works great.
As the thread function can be used by multiple users simultaneously, storing it in a global_dict is not a good practice.
Why using session in my code does not work?
In the following code, if I replace global_dict with session I won't be able to access it in the /result route.
Per doc:
"Redis can handle up to 2^32 keys, and was tested in practice to handle at least 250 million keys per instance.
Every hash, list, set, and sorted set, can hold 2^32 elements.
In other words your limit is likely the available memory in your system."
P.S Sorry for the long code blocks.
#app.route("/build",methods=["GET", "POST"])
#login_required
def build():
if request.method == "POST":
global th
global finished
finished= False
#copy_current_request_context
def operation(global_dict):
global finished
symbols = request.form.get("symbols")
mc.set("symbols", symbols)
if contains_multiple_words(symbols) == False:
flash("The app purpose is to optimize a portfolio given a list of stocks. Please enter a list of stocks seperated by a new row.")
return redirect("/build")
Build(session["user_id"], symbols.upper(), request.form.get("start"), request.form.get("end"), request.form.get("funds"), request.form.get("short"), request.form.get("volatility"), request.form.get("gamma"), request.form.get("return"))
db.session.commit()
try:
df = yf.download(symbols, start=request.form.get("start"), end=request.form.get("end"), auto_adjust = False, prepost = False, threads = True, proxy = None)["Adj Close"].dropna(axis=1, how='all')
failed=(list(shared._ERRORS.keys()))
df = df.replace(0, np.nan)
try:
global_dict['listofna']=df.columns[df.isna().iloc[-2]].tolist()+failed
except IndexError:
flash("Please enter valid stocks from Yahoo Finance.")
return redirect("/build")
df = df.loc[:,df.iloc[-2,:].notna()]
except ValueError:
flash("Please enter a valid symbols (taken from Yahoo Finance)")
return redirect("/build")
def enter_sql_data(app, df, nasdaq_exchange_info, Stocks):
for ticker in df.columns:
ticker=ticker.upper()
if any(sublist[1]==ticker in sublist for sublist in nasdaq_exchange_info) is False:
ticker_ln = yf.Ticker(ticker).stats()["price"].get('longName')
if not ticker_ln:
ticker_ln = ticker
ticker_list=[ticker_ln, ticker]
with app.app_context():
new_stock=Stocks(ticker, ticker_ln)
db.session.add(new_stock)
db.session.commit()
nasdaq_exchange_info.extend([ticker_list])
global nasdaq_exchange_info
app1 = app._get_current_object()
p1 = Process(target=enter_sql_data, args=[app1, df, nasdaq_exchange_info, Stocks])
p1.start()
prices = df.copy()
fig = px.line(prices, x=prices.index, y=prices.columns, title='Price Graph')
fig = fig.update_xaxes(rangeslider_visible=True)
fig.update_layout(width=1350, height=900)
global_dict['plot_json'] = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
exp_cov = risk_models.exp_cov(prices, frequency=252)
#plotting the covariance matrix
heat = go.Heatmap(
z = risk_models.cov_to_corr(exp_cov),
x = exp_cov.columns.values,
y = exp_cov.columns.values,
zmin = 0, # Sets the lower bound of the color domain
zmax = 1,
xgap = 1, # Sets the horizontal gap (in pixels) between bricks
ygap = 1,
colorscale = 'RdBu'
)
title = 'Covariance matrix'
layout = go.Layout(
title_text=title,
title_x=0.5,
width=800,
height=800,
xaxis_showgrid=False,
yaxis_showgrid=False,
yaxis_autorange='reversed'
)
fig1=go.Figure(data=[heat], layout=layout)
fig1.update_layout(width=500, height=500)
global_dict['plot_json1'] = json.dumps(fig1, cls=plotly.utils.PlotlyJSONEncoder)
S = risk_models.CovarianceShrinkage(prices).ledoit_wolf()
heat = go.Heatmap(
z = risk_models.cov_to_corr(S),
x = S.columns.values,
y = S.columns.values,
zmin = 0, # Sets the lower bound of the color domain
zmax = 1,
xgap = 1, # Sets the horizontal gap (in pixels) between bricks
ygap = 1,
colorscale = 'RdBu'
)
title = 'Ledoit-Wolf shrinkage'
layout = go.Layout(
title_text=title,
title_x=0.5,
width=800,
height=800,
xaxis_showgrid=False,
yaxis_showgrid=False,
yaxis_autorange='reversed'
)
fig2=go.Figure(data=[heat], layout=layout)
fig2.update_layout(width=500, height=500)
global_dict['plot_json2'] = json.dumps(fig2, cls=plotly.utils.PlotlyJSONEncoder)
#Section 2 -Return estimation
#it is often a bad idea to provide returns using a simple estimate like the mean of past returns. Research suggests that better off not providing expected returns – you can then just find the min_volatility() portfolio or use HRP.
mu = pypfopt.expected_returns.capm_return(prices)
fig3 = px.bar(mu, orientation='h')
fig3.update_layout(width=700, height=500)
global_dict['plot_json3'] = json.dumps(fig3, cls=plotly.utils.PlotlyJSONEncoder)
#using risk models optimized for the Efficient frontier to reduce to min volitility, good for crypto currencies - not implemented in the website now.
ef = EfficientFrontier(None, S)
try:
ef.min_volatility()
weights = ef.clean_weights()
nu = pd.Series(weights)
fig4 = px.bar(nu, orientation='h')
fig4.update_layout(width=700, height=500)
global_dict['plot_json4'] = json.dumps(fig4, cls=plotly.utils.PlotlyJSONEncoder)
av=ef.portfolio_performance()[1]
global_dict['av']=round(av, 3)*1
#if we want to buy the portfolio mentioned above
df = df.iloc[[-1]]
for col in df.columns:
if col.endswith(".L"):
df.loc[:,col] = df.loc[:,col]*GBPtoUSD()
try:
latest_prices = df.iloc[-1]
except IndexError:
flash("There is an issue with Yahoo API please try again later")
return redirect("/")
# prices as of the day you are allocating
if float(request.form.get("funds")) <= 0 or float(request.form.get("funds")) == " ":
flash("Amount need to be a positive number")
return redirect("/build")
if float(request.form.get("funds")) < float(latest_prices.min()):
flash("Amount is not high enough to cover the lowest priced stock")
return redirect("/build")
try:
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=float(request.form.get("funds")))
except TypeError:
delisted=df.columns[df.isna().any()].tolist()
delisted= ", ".join(delisted)
flash("Can't get latest prices for the following stock/s, please remove to contiue : %s" % delisted)
return redirect("/build")
alloc, global_dict['leftover'] = da.lp_portfolio()
global_dict['alloc']=alloc
global_dict['latest_prices']=latest_prices
except ValueError:
pass
#Maximise return for a given risk, with L2 regularisation
try:
ef = EfficientFrontier(mu, S)
ef.add_objective(objective_functions.L2_reg, gamma=(float(request.form.get("gamma")))) # gamme is the tuning parameter
ef.efficient_risk(int(request.form.get("volatility"))/100)
weights = ef.clean_weights()
su = pd.DataFrame([weights])
fig5 = px.pie(su, values=weights.values(), names=su.columns)
fig5.update_traces(textposition='inside')
fig5.update_layout(width=500, height=500, uniformtext_minsize=12, uniformtext_mode='hide')
global_dict['plot_json5'] = json.dumps(fig5, cls=plotly.utils.PlotlyJSONEncoder)
global_dict['perf'] =ef.portfolio_performance()
except Exception as e:
flash(str(e))
return redirect("/build")
#if we want to buy the portfolio mentioned above
for col in df.columns:
if col.endswith(".L"):
df.loc[:,col] = df.loc[:,col]*GBPtoUSD()
latest_prices1 = df.iloc[-1] # prices as of the day you are allocating
if float(request.form.get("funds")) <= 0 or float(request.form.get("funds")) == " ":
flash("Amount need to be a positive number")
return redirect("/build")
if float(request.form.get("funds")) < float(latest_prices.min()):
flash("Amount is not high enough to cover the lowest priced stock")
return redirect("/build")
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=float(request.form.get("funds")))
alloc1, global_dict['leftover1'] = da.lp_portfolio()
global_dict['alloc1']=alloc1
global_dict['latest_prices1']=latest_prices1
#Efficient semi-variance optimization
returns = pypfopt.expected_returns.returns_from_prices(prices)
returns = returns.dropna()
es = EfficientSemivariance(mu, returns)
try:
es.efficient_return(float(request.form.get("return"))/100)
except ValueError as e:
flash(str(e))
return redirect("/build")
global_dict['perf2']=es.portfolio_performance()
weights = es.clean_weights()
#if we want to buy the portfolio mentioned above
for col in df.columns:
if col.endswith(".L"):
df.loc[:,col] = df.loc[:,col]*GBPtoUSD()
latest_prices2 = df.iloc[-1] # prices as of the day you are allocating
if float(request.form.get("funds")) <= 0 or float(request.form.get("funds")) == " ":
flash("Amount need to be a positive number")
return redirect("/build")
if float(request.form.get("funds")) < float(latest_prices.min()):
flash("Amount is not high enough to cover the lowest priced stock")
return redirect("/build")
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=float(request.form.get("funds")))
alloc2, global_dict['leftover2'] = da.lp_portfolio()
global_dict['alloc2']=alloc2
global_dict['latest_prices2']=latest_prices2
mc.delete("symbols")
global_dict['ret']=float(request.form.get("return"))
global_dict['gamma']=request.form.get("gamma")
global_dict['volatility']=request.form.get("volatility")
finished = True
global global_dict
th = Thread(target=operation, args=[global_dict])
th.start()
return render_template("loading.html")
else:
if mc.get("symbols"):
cached_symbols=mc.get("symbols")
else:
cached_symbols=''
availableCash=db.session.query(Users.cash).filter_by(id=session["user_id"]).first().cash
return render_template("build.html", availableCash=round(availableCash, 4), GBP=GBPtoUSD(), nasdaq_exchange_info=nasdaq_exchange_info, cached_symbols=cached_symbols, top_50_crypto=top_50_crypto, top_world_stocks=top_world_stocks, top_US_stocks=top_US_stocks, top_div=top_div)
app.route('/result')
def result():
return render_template("built.html",av=global_dict['av'], leftover=global_dict['leftover'], alloc=global_dict['alloc'], ret=global_dict['ret'],gamma=global_dict['gamma'],volatility=global_dict['volatility'],perf=global_dict['perf'], perf2=global_dict['perf2'], alloc1=global_dict['alloc1'], alloc2=global_dict['alloc2'], plot_json=global_dict['plot_json'], plot_json1=global_dict['plot_json1'], plot_json2=global_dict['plot_json2'], plot_json3=global_dict['plot_json3'], plot_json4=global_dict['plot_json4'], plot_json5=global_dict['plot_json5'], leftover1=global_dict['leftover1'], leftover2=global_dict['leftover2'],listofna=(', '.join(global_dict['listofna'])))

Solver does not find optimal solution when expanding problems

I have noticed that when using Pyomo + Ipopt, some optimization dae problems that converge to an optimal solution, when expanded in complexity (e.g. larger distance in a car example) and consequetly in the number of finite elements to keep accuracy, the solver displays:
EXIT: Solved To Acceptable Level.
instead of the previous "Optimal solution found".
As an example of stated above, I will use a modified code of "ampl car sample" from Pyomo repository.
# Ampl Car Example
#
# Shows how to convert a minimize final time optimal control problem
# to a format pyomo.dae can handle by removing the time scaling from
# the ContinuousSet.
#
# min tf
# dxdt = v
# dvdt = a-R*v^2
# x(0)=0; x(tf)=L
# v(0)=0; v(tf)=0
# -3<=a<=1
from pyomo.environ import *
from pyomo.dae import *
m = ConcreteModel()
m.R = Param(initialize=0.001) # Friction factor
m.L = Param(initialize=1000000.0) # Final position
m.tau = ContinuousSet(bounds=(0,1)) # Unscaled time
m.time = Var(m.tau) # Scaled time
m.tf = Var()
m.x = Var(m.tau,bounds=(0,m.L+50))
m.v = Var(m.tau,bounds=(0,None))
m.a = Var(m.tau, bounds=(-3.0,1.0),initialize=0)
m.dtime = DerivativeVar(m.time)
m.dx = DerivativeVar(m.x)
m.dv = DerivativeVar(m.v)
m.obj = Objective(expr=m.tf)
def _ode1(m,i):
if i == 0 :
return Constraint.Skip
return m.dx[i] == m.tf * m.v[i]
m.ode1 = Constraint(m.tau, rule=_ode1)
def _ode2(m,i):
if i == 0 :
return Constraint.Skip
return m.dv[i] == m.tf*(m.a[i] - m.R*m.v[i]**2)
m.ode2 = Constraint(m.tau, rule=_ode2)
def _ode3(m,i):
if i == 0:
return Constraint.Skip
return m.dtime[i] == m.tf
m.ode3 = Constraint(m.tau, rule=_ode3)
def _init(m):
yield m.x[0] == 0
yield m.x[1] == m.L
yield m.v[0] == 0
yield m.v[1] == 0
yield m.time[0] == 0
m.initcon = ConstraintList(rule=_init)
discretizer = TransformationFactory('dae.finite_difference')
discretizer.apply_to(m,nfe=5000,scheme='BACKWARD')
solver = SolverFactory('ipopt')
solver.solve(m,tee=True)
print("final time = %6.2f" %(value(m.tf)))
x = []
v = []
a = []
time=[]
for i in m.tau:
time.append(value(m.time[i]))
x.append(value(m.x[i]))
v.append(value(m.v[i]))
a.append(value(m.a[i]))
import matplotlib.pyplot as plt
plt.subplot(131)
plt.plot(time,x,label='x')
plt.title('location')
plt.xlabel('time')
plt.subplot(132)
plt.plot(time,v,label='v')
plt.xlabel('time')
plt.title('velocity')
plt.subplot(133)
plt.plot(time,a,label='a')
plt.xlabel('time')
plt.title('acceleration')
plt.show()
NOTE: The original source code can be colsulted here to compare with mine modified: https://github.com/Pyomo/pyomo/blob/main/examples/dae/car_example.py
Is there anything I can do about this? May I lower the ipopt tolerance so it keeps finding for an optimal solution?
You can disable the heuristic that makes Ipopt stop with an "acceptable" solution by setting option acceptable_iter to 0. See https://coin-or.github.io/Ipopt/OPTIONS.html#OPT_Termination for all options that determine termination of Ipopt.

Is it possible to find all integer solutions?

I wanna get all integer solutions in a limited time, is it possible?
This is a linear, integer constraint satisfaction problem, which can be solved efficiently by OR Tools' CP-SAT. I've modified their example to solve your problem in Python:
from ortools.sat.python import cp_model
class VarArraySolutionPrinter(cp_model.CpSolverSolutionCallback):
"""Print intermediate solutions."""
def __init__(self, variables):
cp_model.CpSolverSolutionCallback.__init__(self)
self.__variables = variables
self.__solution_count = 0
def on_solution_callback(self):
self.__solution_count += 1
for v in self.__variables:
print('%s=%i' % (v, self.Value(v)), end=' ')
print()
def solution_count(self):
return self.__solution_count
def SearchForAllSolutionsSampleSat():
"""Showcases calling the solver to search for all solutions."""
# Creates the model.
model = cp_model.CpModel()
p = [1, 2, 3, 4]
ceq = 30
cgeq = 2
N = len(p)
# Creates the variables
x = [model.NewIntVar(0, 100, f'x{i}') for i in range(N)]
# Create the constraints.
model.Add(sum([xi*pi for xi, pi in zip(x, p)]) == ceq)
model.Add(sum(x) >= cgeq)
# Create a solver and solve.
solver = cp_model.CpSolver()
solution_printer = VarArraySolutionPrinter(x)
status = solver.SearchForAllSolutions(model, solution_printer)
print('Status = %s' % solver.StatusName(status))
print('Number of solutions found: %i' % solution_printer.solution_count())
SearchForAllSolutionsSampleSat()

No GPU Usage apparent in Google Cloud Vm with pytorch already installed and Cuda10

I have been using in my machine a network, that is nothing really special. I wanted to do it faster so I started using google cloud. But I notice something weird that my machine with a GTX 1050 ti was faster than a V100 GPU. This didn't add up so I checked the usage and it seems that even though I put some stress by creating a big network and passing a lot of data to it the gpu by using a simple .cuda() in both the model and the data: there wasn't ussage shown in nvidia-smi command as shown in the image
you can check my code here:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("The device is:",device,torch.cuda.get_device_name(0),"and how many are they",torch.cuda.device_count())
# # We load the training data
Samples , Ocupancy, num_samples, Samples_per_slice = common.load_samples(args.samples_filename)
Samples = Samples * args.scaling_todo
print(Samples_per_slice)
# Divide into Slices
Organize_Positions,Orginezed_Ocupancy, batch_size = common.organize_sample_data(Samples,Ocupancy,num_samples,Samples_per_slice,args.num_batches)
phi = common.MLP(3, 1).cuda()
x_test = torch.from_numpy(Organize_Positions.astype(np.float32)).cuda()
y_test = torch.from_numpy(Orginezed_Ocupancy.astype(np.float32)).cuda()
all_data = common.CustomDataset(x_test, y_test)
#Dive into Slices the data
Slice_data = DataLoader(dataset=all_data, batch_size = batch_size, shuffle=False) # only take batch_size = n/b TODO Don't shuffle
#Chunky_data = DataLoader(dataset=Slice_data, batch_size = chunch_size, shuffle=False)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(phi.parameters(), lr = 0.0001)
epoch = args.num_epochs
fit_start_time = time.time()
phi.train()
for epoch in range(epoch):
curr_epoch_loss = 0
batch = 0
for x_batch, y_batch in Slice_data:
optimizer.zero_grad()
x_train = x_batch
#print(x_train,batch_size)
y_train = y_batch
y_pred = phi(x_train)
#print(y_pred,x_train)
loss = criterion(y_pred.squeeze(), y_train.squeeze())
curr_epoch_loss += loss
print('Batch {}: train loss: {}'.format(batch, loss.item())) # Backward pass
loss.backward()
optimizer.step() # Optimizes only phi parameters
batch+=1
print('Epoch {}: train loss: {}'.format(epoch, loss.item()))
fit_end_time = time.time()
print("Total time = %f" % (fit_end_time - fit_start_time))
# Save Model
torch.save({'state_dict': phi.state_dict()}, args.model_filename)
and the model here:
class MLP(nn.Module):
def __init__(self, in_dim: int, out_dim: int):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.fc1 = nn.Linear(in_dim, 128)
self.fc1_bn = nn.BatchNorm1d(128)
self.fc2 = nn.Linear(128, 256)
self.fc2_bn = nn.BatchNorm1d(256)
self.fc3 = nn.Linear(256, 512)
self.fc3_bn = nn.BatchNorm1d(512)
self.fc4 = nn.Linear(512, 512)
self.fc4_bn = nn.BatchNorm1d(512)
self.fc5 = nn.Linear(512, out_dim,bias=False)
self.relu = nn.LeakyReLU()
def forward(self, x):
x = self.relu(self.fc1_bn(self.fc1(x)))
x = self.relu(self.fc2_bn(self.fc2(x)))# leaky
x = self.relu(self.fc3_bn(self.fc3(x)))
x = self.relu(self.fc4_bn(self.fc4(x)))
x = self.fc5(x)
return x
class CustomDataset(Dataset):
def __init__(self, x_tensor, y_tensor):
self.x = x_tensor
self.y = y_tensor
def __getitem__(self, index):
return (self.x[index], self.y[index])
def __len__(self):
return len(self.x)

Reformulating the AMPL car example

I am trying migrating the ampl car problem that comes in the Ipopt source code tarball as example. I am having got problems with the end condition (reach a place with zero speed at final iteration) and with the cost function (minimize final time).
Can someone help me revise the following model?
# min tf
# dx/dt = 0
# dv/dt = a - R*v^2
# x(0) = 0; x(tf) = 100
# v(0) = 0; v(tf) = 0
# -3 <= a <= 1 (a is the control variable)
#!Python3.5
from pyomo.environ import *
from pyomo.dae import *
N = 20;
T = 10;
L = 100;
m = ConcreteModel()
# Parameters
m.R = Param(initialize=0.001)
# Variables
def x_init(m, i):
return i*L/N
m.t = ContinuousSet(bounds=(0,1000))
m.x = Var(m.t, bounds=(0,None), initialize=x_init)
m.v = Var(m.t, bounds=(0,None), initialize=L/T)
m.a = Var(m.t, bounds=(-3.0,1.0), initialize=0)
# Derivatives
m.dxdt = DerivativeVar(m.x, wrt=m.t)
m.dvdt = DerivativeVar(m.v, wrt=m.t)
# Objetives
m.obj = Objective(expr=m.t[N])
# DAE
def _ode1(m, i):
if i==0:
return Constraint.Skip
return m.dxdt[i] == m.v[i]
m.ode1 = Constraint(m.t, rule=_ode1)
def _ode2(m, i):
if i==0:
return Constraint.Skip
return m.dvdt[i] == m.a[i] - m.R*m.v[i]**2
m.ode2 = Constraint(m.t, rule=_ode2)
# Constraints
def _init(m):
yield m.x[0] == 0
yield m.v[0] == 0
yield ConstraintList.End
m.init = ConstraintList(rule=_init)
'''
def _end(m, i):
if i==N:
return m.x[i] == L amd m.v[i] == 0
return Constraint.Skip
m.end = ConstraintList(rule=_end)
'''
# Discretize
discretizer = TransformationFactory('dae.finite_difference')
discretizer.apply_to(m, nfe=N, wrt=m.t, scheme='BACKWARD')
# Solve
solver = SolverFactory('ipopt', executable='C:\\EXTERNOS\\COIN-OR\\win32-msvc12\\bin\\ipopt')
results = solver.solve(m, tee=True)
Currently, a ContinuousSet in Pyomo has to be bounded. This means that in order to solve a minimum time optimal control problem using this tool, the problem must be reformulated to remove the time scaling from the ContinuousSet. In addition, you have to introduce an extra variable to represent the final time. I've added an example to the Pyomo github repository showing how this can be done for your problem.