I have one model which looks like this:
class Measurement(models.Model):
date = models.DateField('date')
time = models.TimeField('time')
Q = models.DecimalField(max_digits=10, decimal_places=6)
P = models.DecimalField(max_digits=10, decimal_places=6)
f = models.DecimalField(max_digits=10, decimal_places=6)
In my views, I would like to represent it. So I made this function:
def plotMeas(request):
# Count the events
c = Measurement.objects.all()
c = c.count()
# Variables
i = 0
a = [0]
P = a*c
Q = a*c
t = a*c
# Save dP_L1 & dQ_L1 in lists
for i in range(c):
meas = Measurement.objects.get(pk = i+1)
P [i] = meas.P
Q [i] = meas.Q
t [c-1-i] = i*10
if c > 100:
P = P[-100:]
Q = Q[-100:]
t [i] = t[-100:]
# Construct the graph
fig = Figure()
q = fig.add_subplot(211)
q.set_xlabel("time (minutes ago)")
q.set_ylabel("Q (VAR)")
p = fig.add_subplot(212)
p.set_xlabel("time (minutes ago)")
p.set_ylabel("P (W)")
p.plot(t,P, 'go-')
q.plot(t,Q, 'o-')
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
However, I would like that the horizontal axis would show the date and the time (saved in the model). Does anyone know how to do it?
Have a look at the documentation for plot_date. Conveniently plot_date takes similar arguments to plot. A call might look like:
p.plot_date(sequence_of_datetime_objects, y_axis_values, 'go-')
Using matplotlib.dates you can then customize the format of your x-axis labels.
A simple example:
The following will specify that the x-axis displays only every third month in the format Jan '09 (assuming English-speaking locale).
p.xaxis.set_major_locator(mdates.MonthLocator(interval=3))
p.xaxis.set_major_formatter(mdates.DateFormatter("%b '%y"))
Since you have dates and times stored separately you may either want to
change your model to use a DateTimeField, or
use Python to combine them.
For example:
import datetime as dt
t1 = dt.time(21,0,1,2) # 21:00:01.2
d1 = dt.date.today()
dt1 = dt.datetime.combine(d1,t1)
# result: datetime.datetime(2011, 4, 15, 21, 0, 1, 2)
To iterate over two sequences and combine them you might use zip (code for illustrative purposes only, not necessarily optimized):
sequence_of_datetime_objects = []
for a_date, a_time in zip(sequence_of_date_objects, sequence_of_time_objects):
sequence_of_datetime_objects.append(dt.datetime.combine(a_date, a_time))
Feel free to open another question if you get stuck implementing the specifics.
Related
I have a thread that is running inside a route where the thread job is to do some expensive work, store variables and than I'll need to use these variables in another flask route.
When I am using the session variable (Redis) as a parameter in the thread function in order to add the data and extract it later it does not find the variables that I have stored in it.
In contrast, when I declare a global_dict and pass it to the thread function instead of session, the code works great.
As the thread function can be used by multiple users simultaneously, storing it in a global_dict is not a good practice.
Why using session in my code does not work?
In the following code, if I replace global_dict with session I won't be able to access it in the /result route.
Per doc:
"Redis can handle up to 2^32 keys, and was tested in practice to handle at least 250 million keys per instance.
Every hash, list, set, and sorted set, can hold 2^32 elements.
In other words your limit is likely the available memory in your system."
P.S Sorry for the long code blocks.
#app.route("/build",methods=["GET", "POST"])
#login_required
def build():
if request.method == "POST":
global th
global finished
finished= False
#copy_current_request_context
def operation(global_dict):
global finished
symbols = request.form.get("symbols")
mc.set("symbols", symbols)
if contains_multiple_words(symbols) == False:
flash("The app purpose is to optimize a portfolio given a list of stocks. Please enter a list of stocks seperated by a new row.")
return redirect("/build")
Build(session["user_id"], symbols.upper(), request.form.get("start"), request.form.get("end"), request.form.get("funds"), request.form.get("short"), request.form.get("volatility"), request.form.get("gamma"), request.form.get("return"))
db.session.commit()
try:
df = yf.download(symbols, start=request.form.get("start"), end=request.form.get("end"), auto_adjust = False, prepost = False, threads = True, proxy = None)["Adj Close"].dropna(axis=1, how='all')
failed=(list(shared._ERRORS.keys()))
df = df.replace(0, np.nan)
try:
global_dict['listofna']=df.columns[df.isna().iloc[-2]].tolist()+failed
except IndexError:
flash("Please enter valid stocks from Yahoo Finance.")
return redirect("/build")
df = df.loc[:,df.iloc[-2,:].notna()]
except ValueError:
flash("Please enter a valid symbols (taken from Yahoo Finance)")
return redirect("/build")
def enter_sql_data(app, df, nasdaq_exchange_info, Stocks):
for ticker in df.columns:
ticker=ticker.upper()
if any(sublist[1]==ticker in sublist for sublist in nasdaq_exchange_info) is False:
ticker_ln = yf.Ticker(ticker).stats()["price"].get('longName')
if not ticker_ln:
ticker_ln = ticker
ticker_list=[ticker_ln, ticker]
with app.app_context():
new_stock=Stocks(ticker, ticker_ln)
db.session.add(new_stock)
db.session.commit()
nasdaq_exchange_info.extend([ticker_list])
global nasdaq_exchange_info
app1 = app._get_current_object()
p1 = Process(target=enter_sql_data, args=[app1, df, nasdaq_exchange_info, Stocks])
p1.start()
prices = df.copy()
fig = px.line(prices, x=prices.index, y=prices.columns, title='Price Graph')
fig = fig.update_xaxes(rangeslider_visible=True)
fig.update_layout(width=1350, height=900)
global_dict['plot_json'] = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
exp_cov = risk_models.exp_cov(prices, frequency=252)
#plotting the covariance matrix
heat = go.Heatmap(
z = risk_models.cov_to_corr(exp_cov),
x = exp_cov.columns.values,
y = exp_cov.columns.values,
zmin = 0, # Sets the lower bound of the color domain
zmax = 1,
xgap = 1, # Sets the horizontal gap (in pixels) between bricks
ygap = 1,
colorscale = 'RdBu'
)
title = 'Covariance matrix'
layout = go.Layout(
title_text=title,
title_x=0.5,
width=800,
height=800,
xaxis_showgrid=False,
yaxis_showgrid=False,
yaxis_autorange='reversed'
)
fig1=go.Figure(data=[heat], layout=layout)
fig1.update_layout(width=500, height=500)
global_dict['plot_json1'] = json.dumps(fig1, cls=plotly.utils.PlotlyJSONEncoder)
S = risk_models.CovarianceShrinkage(prices).ledoit_wolf()
heat = go.Heatmap(
z = risk_models.cov_to_corr(S),
x = S.columns.values,
y = S.columns.values,
zmin = 0, # Sets the lower bound of the color domain
zmax = 1,
xgap = 1, # Sets the horizontal gap (in pixels) between bricks
ygap = 1,
colorscale = 'RdBu'
)
title = 'Ledoit-Wolf shrinkage'
layout = go.Layout(
title_text=title,
title_x=0.5,
width=800,
height=800,
xaxis_showgrid=False,
yaxis_showgrid=False,
yaxis_autorange='reversed'
)
fig2=go.Figure(data=[heat], layout=layout)
fig2.update_layout(width=500, height=500)
global_dict['plot_json2'] = json.dumps(fig2, cls=plotly.utils.PlotlyJSONEncoder)
#Section 2 -Return estimation
#it is often a bad idea to provide returns using a simple estimate like the mean of past returns. Research suggests that better off not providing expected returns – you can then just find the min_volatility() portfolio or use HRP.
mu = pypfopt.expected_returns.capm_return(prices)
fig3 = px.bar(mu, orientation='h')
fig3.update_layout(width=700, height=500)
global_dict['plot_json3'] = json.dumps(fig3, cls=plotly.utils.PlotlyJSONEncoder)
#using risk models optimized for the Efficient frontier to reduce to min volitility, good for crypto currencies - not implemented in the website now.
ef = EfficientFrontier(None, S)
try:
ef.min_volatility()
weights = ef.clean_weights()
nu = pd.Series(weights)
fig4 = px.bar(nu, orientation='h')
fig4.update_layout(width=700, height=500)
global_dict['plot_json4'] = json.dumps(fig4, cls=plotly.utils.PlotlyJSONEncoder)
av=ef.portfolio_performance()[1]
global_dict['av']=round(av, 3)*1
#if we want to buy the portfolio mentioned above
df = df.iloc[[-1]]
for col in df.columns:
if col.endswith(".L"):
df.loc[:,col] = df.loc[:,col]*GBPtoUSD()
try:
latest_prices = df.iloc[-1]
except IndexError:
flash("There is an issue with Yahoo API please try again later")
return redirect("/")
# prices as of the day you are allocating
if float(request.form.get("funds")) <= 0 or float(request.form.get("funds")) == " ":
flash("Amount need to be a positive number")
return redirect("/build")
if float(request.form.get("funds")) < float(latest_prices.min()):
flash("Amount is not high enough to cover the lowest priced stock")
return redirect("/build")
try:
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=float(request.form.get("funds")))
except TypeError:
delisted=df.columns[df.isna().any()].tolist()
delisted= ", ".join(delisted)
flash("Can't get latest prices for the following stock/s, please remove to contiue : %s" % delisted)
return redirect("/build")
alloc, global_dict['leftover'] = da.lp_portfolio()
global_dict['alloc']=alloc
global_dict['latest_prices']=latest_prices
except ValueError:
pass
#Maximise return for a given risk, with L2 regularisation
try:
ef = EfficientFrontier(mu, S)
ef.add_objective(objective_functions.L2_reg, gamma=(float(request.form.get("gamma")))) # gamme is the tuning parameter
ef.efficient_risk(int(request.form.get("volatility"))/100)
weights = ef.clean_weights()
su = pd.DataFrame([weights])
fig5 = px.pie(su, values=weights.values(), names=su.columns)
fig5.update_traces(textposition='inside')
fig5.update_layout(width=500, height=500, uniformtext_minsize=12, uniformtext_mode='hide')
global_dict['plot_json5'] = json.dumps(fig5, cls=plotly.utils.PlotlyJSONEncoder)
global_dict['perf'] =ef.portfolio_performance()
except Exception as e:
flash(str(e))
return redirect("/build")
#if we want to buy the portfolio mentioned above
for col in df.columns:
if col.endswith(".L"):
df.loc[:,col] = df.loc[:,col]*GBPtoUSD()
latest_prices1 = df.iloc[-1] # prices as of the day you are allocating
if float(request.form.get("funds")) <= 0 or float(request.form.get("funds")) == " ":
flash("Amount need to be a positive number")
return redirect("/build")
if float(request.form.get("funds")) < float(latest_prices.min()):
flash("Amount is not high enough to cover the lowest priced stock")
return redirect("/build")
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=float(request.form.get("funds")))
alloc1, global_dict['leftover1'] = da.lp_portfolio()
global_dict['alloc1']=alloc1
global_dict['latest_prices1']=latest_prices1
#Efficient semi-variance optimization
returns = pypfopt.expected_returns.returns_from_prices(prices)
returns = returns.dropna()
es = EfficientSemivariance(mu, returns)
try:
es.efficient_return(float(request.form.get("return"))/100)
except ValueError as e:
flash(str(e))
return redirect("/build")
global_dict['perf2']=es.portfolio_performance()
weights = es.clean_weights()
#if we want to buy the portfolio mentioned above
for col in df.columns:
if col.endswith(".L"):
df.loc[:,col] = df.loc[:,col]*GBPtoUSD()
latest_prices2 = df.iloc[-1] # prices as of the day you are allocating
if float(request.form.get("funds")) <= 0 or float(request.form.get("funds")) == " ":
flash("Amount need to be a positive number")
return redirect("/build")
if float(request.form.get("funds")) < float(latest_prices.min()):
flash("Amount is not high enough to cover the lowest priced stock")
return redirect("/build")
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=float(request.form.get("funds")))
alloc2, global_dict['leftover2'] = da.lp_portfolio()
global_dict['alloc2']=alloc2
global_dict['latest_prices2']=latest_prices2
mc.delete("symbols")
global_dict['ret']=float(request.form.get("return"))
global_dict['gamma']=request.form.get("gamma")
global_dict['volatility']=request.form.get("volatility")
finished = True
global global_dict
th = Thread(target=operation, args=[global_dict])
th.start()
return render_template("loading.html")
else:
if mc.get("symbols"):
cached_symbols=mc.get("symbols")
else:
cached_symbols=''
availableCash=db.session.query(Users.cash).filter_by(id=session["user_id"]).first().cash
return render_template("build.html", availableCash=round(availableCash, 4), GBP=GBPtoUSD(), nasdaq_exchange_info=nasdaq_exchange_info, cached_symbols=cached_symbols, top_50_crypto=top_50_crypto, top_world_stocks=top_world_stocks, top_US_stocks=top_US_stocks, top_div=top_div)
app.route('/result')
def result():
return render_template("built.html",av=global_dict['av'], leftover=global_dict['leftover'], alloc=global_dict['alloc'], ret=global_dict['ret'],gamma=global_dict['gamma'],volatility=global_dict['volatility'],perf=global_dict['perf'], perf2=global_dict['perf2'], alloc1=global_dict['alloc1'], alloc2=global_dict['alloc2'], plot_json=global_dict['plot_json'], plot_json1=global_dict['plot_json1'], plot_json2=global_dict['plot_json2'], plot_json3=global_dict['plot_json3'], plot_json4=global_dict['plot_json4'], plot_json5=global_dict['plot_json5'], leftover1=global_dict['leftover1'], leftover2=global_dict['leftover2'],listofna=(', '.join(global_dict['listofna'])))
I'm trying to practice using pymc3 on the kinds of data I come across in my research, but I'm having trouble thinking through how to fit the model when each person gives me multiple data points, and each person comes from a different group (so trying a hierarchical model).
Here's the practice scenario I'm using: Suppose we have 2 groups of people, N = 30 in each group. All 60 people go through a 10 question survey, where each person can response ("1") or not respond ("0") to each question. So, for each person, I have an array of length 10 with 1's and 0's.
To model these data, I assume each person has some latent trait "theta", and each item has a "discrimination" a and a "difficulty" b (this is just a basic item response model), and the probability of responding ("1") is given by: (1 + exp(-a(theta - b)))^(-1). (Logistic applied to a(theta - b) .)
Here is how I tried to fit it using pymc3:
traces = {}
for grp in range(2):
group = prac_data["Group ID"] == grp
data = prac_data[group]["Response"]
with pm.Model() as irt:
# Priors
a_tmp = pm.Normal('a_tmp',mu=0, sd = 1, shape = 10)
a = pm.Deterministic('a', np.exp(a_tmp))
# We do this transformation since we must have a >= 0
b = pm.Normal('b', mu = 0, sd = 1, shape = 10)
# Now for the hyperpriors on the groups:
theta_mu = pm.Normal('theta_mu', mu = 0, sd = 1)
theta_sigma = pm.Uniform('theta_sigma', upper = 2, lower = 0)
theta = pm.Normal('theta', mu = theta_mu,
sd = theta_sigma, shape = N)
p = getProbs(Disc, Diff, theta, N)
y = pm.Bernoulli('y', p = p, observed = data)
traces[grp] = pm.sample(1000)
The function "getProbs" is supposed to give me an array of probabilities for the Bernoulli random variable, as the probability of responding 1 changes across trials/survey questions for each person. But this method gives me an error because it says to "specify one of p or logit_p", but I thought I did with the function?
Here's the code for "getProbs" in case it's helpful:
def getProbs(Disc, Diff, THETA, Nprt):
# Get a large array of probabilities for the bernoulli random variable
n = len(Disc)
m = Nprt
probs = np.array([])
for th in range(m):
for t in range(n):
p = item(Disc[t], Diff[t], THETA[th])
probs = np.append(probs, p)
return probs
I added the Nprt parameter because if I tried to get the length of THETA, it would give me an error since it is a FreeRV object. I know I can try and vectorize the "item" function, which is just the logistic function I put above, instead of doing it this way, but that also got me an error when I tried to run it.
I think I can do something with pm.Data to fix this, but the documentation isn't exactly clear to me.
Basically, I'm used to building models in JAGS, where you loop through each data point, but pymc3 doesn't seem to work like that. I'm confused about how to build/index my random variables in the model to make sure that the probabilities change how I'd like them to from trial-to-trial, and to make sure that the parameters I'm estimating correspond to the right person in the right group.
Thanks in advance for any help. I'm pretty new to pymc3 and trying to get the hang of it, and wanted to try something different from JAGS.
EDIT: I was able to solve this by first building the array I needed by looping through the trials, then transforming the array using:
p = theano.tensor.stack(p, axis = 0)
I then put this new variable in the "p" argument of the Bernoulli instance and it worked! Here's the updated full model: (below, I imported theano.tensor as T)
group = group.astype('int')
data = prac_data["Response"]
with pm.Model() as irt:
# Priors
# Item parameters:
a = pm.Gamma('a', alpha = 1, beta = 1, shape = 10) # Discrimination
b = pm.Normal('b', mu = 0, sd = 1, shape = 10) # Difficulty
# Now for the hyperpriors on the groups: shape = 2 as there are 2 groups
theta_mu = pm.Normal('theta_mu', mu = 0, sd = 1, shape = 2)
theta_sigma = pm.Uniform('theta_sigma', upper = 2, lower = 0, shape = 2)
# Individual-level person parameters:
# group is a 2*N array that lets the model know which
# theta_mu to use for each theta to estimate
theta = pm.Normal('theta', mu = theta_mu[group],
sd = theta_sigma[group], shape = 2*N)
# Here, we're building an array of the probabilities we need for
# each trial:
p = np.array([])
for n in range(2*N):
for t in range(10):
x = -a[t]*(theta[n] - b[t])
p = np.append(p, x)
# Here, we turn p into a tensor object to put as an argument to the
# Bernoulli random variable
p = T.stack(p, axis = 0)
y = pm.Bernoulli('y', logit_p = p, observed = data)
# On my computer, this took about 5 minutes to run.
traces = pm.sample(1000, cores = 1)
print(az.summary(traces)) # Summary of parameter distributions
I have this 7 quasi-lorentzian curves which are fitted to my data.
and I would like to join them, to make one connected curved line. Do You have any ideas how to do this? I've read about ComposingModel at lmfit documentation, but it's not clear how to do this.
Here is a sample of my code of two fitted curves.
for dataset in [Bxfft]:
dataset = np.asarray(dataset)
freqs, psd = signal.welch(dataset, fs=266336/300, window='hamming', nperseg=16192, scaling='spectrum')
plt.semilogy(freqs[0:-7000], psd[0:-7000]/dataset.size**0, color='r', label='Bx')
x = freqs[100:-7900]
y = psd[100:-7900]
# 8 Hz
model = Model(lorentzian)
params = model.make_params(amp=6, cen=5, sig=1, e=0)
result = model.fit(y, params, x=x)
final_fit = result.best_fit
print "8 Hz mode"
print(result.fit_report(min_correl=0.25))
plt.plot(x, final_fit, 'k-', linewidth=2)
# 14 Hz
x2 = freqs[220:-7780]
y2 = psd[220:-7780]
model2 = Model(lorentzian)
pars2 = model2.make_params(amp=6, cen=10, sig=3, e=0)
pars2['amp'].value = 6
result2 = model2.fit(y2, pars2, x=x2)
final_fit2 = result2.best_fit
print "14 Hz mode"
print(result2.fit_report(min_correl=0.25))
plt.plot(x2, final_fit2, 'k-', linewidth=2)
UPDATE!!!
I've used some hints from user #MNewville, who posted an answer and using his code I got this:
So my code is similar to his, but extended with each peak. What I'm struggling now is replacing ready LorentzModel with my own.
The problem is when I do this, the code gives me an error like this.
C:\Python27\lib\site-packages\lmfit\printfuncs.py:153: RuntimeWarning:
invalid value encountered in double_scalars [[Model]] spercent =
'({0:.2%})'.format(abs(par.stderr/par.value))
About my own model:
def lorentzian(x, amp, cen, sig, e):
return (amp*(1-e)) / ((pow((1.0 * x - cen), 2)) + (pow(sig, 2)))
peak1 = Model(lorentzian, prefix='p1_')
peak2 = Model(lorentzian, prefix='p2_')
peak3 = Model(lorentzian, prefix='p3_')
# make composite by adding (or multiplying, etc) components
model = peak1 + peak2 + peak3
# make parameters for the full model, setting initial values
# using the prefixes
params = model.make_params(p1_amp=6, p1_cen=8, p1_sig=1, p1_e=0,
p2_ampe=16, p2_cen=14, p2_sig=3, p2_e=0,
p3_amp=16, p3_cen=21, p3_sig=3, p3_e=0,)
rest of the code is similar like at #MNewville
[![enter image description here][3]][3]
A composite model for 3 Lorentzians would look like this:
from lmfit import Model, LorentzianModel
peak1 = LorentzianModel(prefix='p1_')
peak2 = LorentzianModel(prefix='p2_')
peak3 = LorentzianModel(prefix='p3_')
# make composite by adding (or multiplying, etc) components
model = peak1 + peaks2 + peak3
# make parameters for the full model, setting initial values
# using the prefixes
params = model.make_params(p1_amplitude=10, p1_center=8, p1_sigma=3,
p2_amplitude=10, p2_center=15, p2_sigma=3,
p3_amplitude=10, p3_center=20, p3_sigma=3)
# perhaps set bounds to prevent peaks from swapping or crazy values
params['p1_amplitude'].min = 0
params['p2_amplitude'].min = 0
params['p3_amplitude'].min = 0
params['p1_sigma'].min = 0
params['p2_sigma'].min = 0
params['p3_sigma'].min = 0
params['p1_center'].min = 2
params['p1_center'].max = 11
params['p2_center'].min = 10
params['p2_center'].max = 18
params['p3_center'].min = 17
params['p3_center'].max = 25
# then do a fit over the full data range
result = model.fit(y, params, x=x)
I think the key parts you were missing were: a) just add models together, and b) use prefix to avoid name collisions of parameters.
I hope that is enough to get you started...
Hei all,
I am trying to set up an abstract model for a very simple QP of the form
min (x-x0)^2
s.t.
A x = b
C x <= d
I would like to use an abstract model, as I need to resolve with changing parameters (mainly x0, but potentially also A, b, C, d). I am right now struggeling with simply setting the parameters in the model instance. I do not want to use an external data file, but rather internal python variables. All examples I find online use AMPL formatted data files.
This is the code I have right now
import pyomo.environ as pe
model = pe.AbstractModel()
# the sets
model.n = pe.Param(within=pe.NonNegativeIntegers)
model.m = pe.Param(initialize = 1)
model.ss = pe.RangeSet(1, model.n)
model.os = pe.RangeSet(1, model.m)
# the starting point and the constraint parameters
model.x_hat = pe.Param(model.ss)
model.A = pe.Param(model.os, model.ss)
model.b = pe.Param(model.os)
model.C = pe.Param(model.os, model.os)
model.d = pe.Param(model.ss, model.os)
# the decision variables
model.x_projected = pe.Var(model.ss)
# the cosntraints
# A x = b
def sum_of_elements_rule(model):
value = model.A * model.x_projected
return value == model.d
model.sumelem = pe.Constraint(model.os, rule=sum_of_elements_rule)
# C x <= d
def positivity_constraint(model):
return model.C*model.x_projected <= model.d
model.bounds = pe.Constraint(model.ss, rule=positivity_constraint)
# the cost
def cost_rule(model):
return sum((model.x_projected[i] - model.x[i])**2 for i in model.ss)
model.cost = pe.Objective(rule=cost_rule)
instance = model.create_instance()
And somehow here I am stuck. How do I set the parameters now?
Thanks and best, Theo
I know this is an old post but a solution to this could have helped me so here is the solution to this problem:
## TEST
data_init= {None: dict(
n = {None : 3},
d = {0:0, 1:1, 2:2},
x_hat = {0:10, 1:-1, 2:-100},
b = {None: 10}
)}
# create instance
instance = model.create_instance(data_init)
This creates the instance in an equivalent way than what you did but in a more formal way.
Ok, I seemed to have figured out what the problem is. If I want to set a parameter after I create an instance, I need the
mutable=True
flag. Then, I can set the parameter with something like
for i in range(model_dimension):
getattr(instance, 'd')[i] = i
The model dimension I need to choose before i create an instance (which is ok for my case). The instance can be reused with different parameters for the constraints.
The code below should work for the problem
min (x-x_hat)' * (x-x_hat)
s.t.
sum(x) = b
x[i] >= d[i]
with x_hat, b, d as parameters.
import pyomo.environ as pe
model = pe.AbstractModel()
# model dimension
model.n = pe.Param(default=2)
# state space set
model.ss = pe.RangeSet(0, model.n-1)
# equality
model.b = pe.Param(default=5, mutable=True)
# inequality
model.d = pe.Param(model.ss, default=0.0, mutable=True)
# decision var
model.x = pe.Var(model.ss)
model.x_hat = pe.Param(model.ss, default=0.0, mutable=True)
# the cost
def cost_rule(model):
return sum((model.x[i] - model.x_hat[i])**2 for i in model.ss)
model.cost = pe.Objective(rule=cost_rule)
# CONSTRAINTS
# each x_i bigger than d_i
def lb_rule(model, i):
return (model.x[i] >= model.d[i])
model.state_bound = pe.Constraint(model.ss, rule=lb_rule)
# sum of x == P_tot
def sum_rule(model):
return (sum(model.x[i] for i in model.ss) == model.b)
model.state_sum = pe.Constraint(rule=sum_rule)
## TEST
# define model dimension
model_dimension = 3
model.n = model_dimension
# create instance
instance = model.create_instance()
# set d
for i in range(model_dimension):
getattr(instance, 'd')[i] = i
# set x_hat
xh = (10,1,-100)
for i in range(model_dimension):
getattr(instance, 'x_hat')[i] = xh[i]
# set b
instance.b = 10
# solve
solver = pe.SolverFactory('ipopt')
result = solver.solve(instance)
instance.display()
I currently have a piece of code that works in two segments. The first segment opens the existing text file from a specific path on my local drive and then arranges, based on certain indices, into a list of sub list. In the second segment I take the sub-lists I have created and group them on a similar index to simplify them (starts at def merge_subs). I am getting no error code but I am not receiving a result when I try to print the variable answer. Am I not correctly looping the original list of sub-lists? Ultimately I would like to have a variable that contains the final product from these loops so that I may write the contents of it to a new text file. Here is the code I am working with:
from itertools import groupby, chain
from operator import itemgetter
with open ("somepathname") as g:
# reads text from lines and turns them into a list sub-lists
lines = g.readlines()
for line in lines:
matrix = line.split()
JD = matrix [2]
minTime= matrix [5]
maxTime= matrix [7]
newLists = [JD,minTime,maxTime]
L = newLists
def merge_subs(L):
dates = {}
for sub in L:
date = sub[0]
if date not in dates:
dates[date] = []
dates[date].extend(sub[1:])
answer = []
for date in sorted(dates):
answer.append([date] + dates[date])
new code
def openfile(self):
filename = askopenfilename(parent=root)
self.lines = open(filename)
def simplify(self):
g = self.lines.readlines()
for line in g:
matrix = line.split()
JD = matrix[2]
minTime = matrix[5]
maxTime = matrix[7]
self.newLists = [JD, minTime, maxTime]
print(self.newLists)
dates = {}
for sub in self.newLists:
date = sub[0]
if date not in dates:
dates[date] = []
dates[date].extend(sub[1:])
answer = []
for date in sorted(dates):
print(answer.append([date] + dates[date]))
enter code here
enter code here