Objective and Constraint rules for SubModels in PAO - pyomo

My question refers to the PAO package, I hope this is the right place to ask.
I am wondering whether it is possible to create lower-level objectives or constraints in a bilevel pao model using "rule"-functions.
I would like to create a lower-level objective and tried the following:
import pyomo.environ as pe
import pyomo.opt as po
from pao.pyomo import *
M = pe.ConcreteModel()
M.S = pe.RangeSet(1, 10)
M.x = pe.Var(M.S, bounds=(0, None))
M.L = SubModel(fixed=M.x)
M.L.y = pe.Var(M.S, bounds=(0, None))
def ul_obj_rule(M):
"""upper-level objective"""
return(sum(M.x[i] + M.L.y[i] for i in M.S))
M.obj = pe.Objective(rule=ul_obj_rule, sense=pe.minimize)
def ll_obj_rule(M):
return(sum(M.L.y[i] for i in M.S))
M.L.obj = pe.Objective(rule=ll_obj_rule, sense=pe.maximize)
This resulted in an error:
AttributeError: 'SubModel' object has no attribute 'S'
I tried adding a Set identical to M.S to the SubModel:
import pyomo.environ as pe
import pyomo.opt as po
from pao.pyomo import *
M = pe.ConcreteModel()
M.S = pe.RangeSet(1, 10)
M.x = pe.Var(M.S, bounds=(0, None))
M.L = SubModel(fixed=M.x)
M.L.y = pe.Var(M.S, bounds=(0, None))
M.L.S = pe.RangeSet(1, 10)
def ul_obj_rule(M):
"""upper-level objective"""
return(sum(M.x[i] + M.L.y[i] for i in M.S))
M.obj = pe.Objective(rule=ul_obj_rule, sense=pe.minimize)
def ll_obj_rule(M):
return(sum(M.L.y[i] for i in M.S))
M.L.obj = pe.Objective(rule=ll_obj_rule, sense=pe.maximize)
which resulted in:
AttributeError: 'SubModel' object has no attribute 'L'
Creating the same objective but using "expr" instead of "rule" works fine:
import pyomo.environ as pe
import pyomo.opt as po
from pao.pyomo import *
M = pe.ConcreteModel()
M.S = pe.RangeSet(1, 10)
M.x = pe.Var(M.S, bounds=(0, None))
M.L = SubModel(fixed=M.x)
M.L.y = pe.Var(M.S, bounds=(0, None))
M.obj = pe.Objective(expr=sum(M.x[i] + M.L.y[i] for i in M.S), sense=pe.minimize)
M.L.obj = pe.Objective(expr=sum(M.L.y[i] for i in M.S), sense=pe.maximize)
I encountered the same problem when I tried to initialize constraints. Does anyone know if and how I could set up objective or constraint rules for bilevel models in PAO without encountering the described problems?
Thank you very much in advance!
Edit: solved it using M.model(). This script works:
import pyomo.environ as pe
import pyomo.opt as po
from pao.pyomo import *
M = pe.ConcreteModel()
M.S = pe.RangeSet(1, 10)
M.x = pe.Var(M.S, bounds=(0, None))
M.L = SubModel(fixed=M.x)
M.L.y = pe.Var(M.S, bounds=(0, None))
def ul_obj_rule(M):
"""upper-level objective"""
mo = M.model()
return(sum(mo.x[i] + mo.L.y[i] for i in M.S))
M.obj = pe.Objective(rule=ul_obj_rule, sense=pe.minimize)
def ll_obj_rule(M):
mo = M.model()
return(sum(mo.L.y[i] for i in mo.S))
M.L.obj = pe.Objective(rule=ll_obj_rule, sense=pe.maximize)

This is because the 'argument' passed into the function ll_obj_rule when it is called in the last line is M.L (instead of the M that you are expecting.) {And M.L (The submodel) does not have an attribute L.}
A simple fix would be omitting the L in the ll_obj_rule:
def ll_obj_rule(M):
return(sum(M.y[i] for i in M.S))
On an unrelated note, I have been trying to run pao on google colab unsuccessfully, often encountering this error. "ImportError: cannot import name 'SimpleBlock' from 'pyomo.core'." Have you encountered this error before? Any help would be greatly appreciated

Related

Nested Disjunctions in Abstract modelling of Pyomo

I am working on a small optimization model with some disjunctions. The way I did in a concrete model worked well:
from pyomo.environ import *
m = ConcreteModel()
m.d1 = Disjunct()
m.d2 = Disjunct()
m.d1.sub1 = Disjunct()
m.d1.sub2 = Disjunct()
m.d1.disj = Disjunction(expr=[m.d1.sub1, m.d1.sub2])
m.disj = Disjunction(expr=[m.d1, m.d2])
But now I tranfered the concrete model into an abstract formulation. I was able to fix everything instead of nesting the disjunctions. The way I did it was like:
#Disjunct 1
def _op_mode1(self, op_mode, t):
m = op_mode.model()
op_mode.c1 = po.Constraint(expr=m.x[t] == True)
#Disjunct 2
def _op_mode2(self, op_mode, t):
m = op_mode.model()
op_mode.c1 = po.Constraint(expr=m.x[t] == False)
#Disjunction 1
def _op_modes(self,m, t):
return [m.mode1[t], m.mode2[t]]
#Adding Components
self.model.del_component("mode1")
self.model.del_component("mode1_index")
self.model.add_component("mode1", pogdp.Disjunct(self.model.T, rule=self._op_mode1))
self.model.del_component("mode2")
self.model.del_component("mode2_index")
self.model.add_component("mode2", pogdp.Disjunct(self.model.T, rule=self._op_mode1))
self.model.del_component("modes")
self.model.del_component("modes_index")
self.model.add_component("modes", pogdp.Disjunction(self.model.T, rule=self._op_modes))`
As I previously mentioned, this works fine. But I haven`t found any way to nest the disjunctions. Pyomo alsways complains about the second layer of the disjuncts like "sub1".
Would anybody could give me a hint?
Many greetings
Joerg
The issue with the latest model above is that you are declaring m.d1 and m.d2 for each element of m.T, but they overwrite each other each time since they have the same name. You should be seeing warning messages logged for this. So if you uncomment your pprint of the model, you'll see that you only have the last ones you declared (with constraints on x[10]). So the first 9 Disjunctions in m.disjunction_ are disjunctions of Disjuncts that do not exist. The simplest fix for this is to give the disjuncts unique names when you declare them:
import pyomo.environ as pyo
import pyomo.gdp as pogdp
model = pyo.ConcreteModel()
model.T = pyo.RangeSet(0, 10)
model.x=pyo.Var(model.T,bounds=(-2, 10))
model.y=pyo.Var(model.T,bounds=(20, 30))
# This was also a duplicate declaration:
#model.disjunction_ = pogdp.Disjunction(model.T)
def d1(m, t):
disj = pogdp.Disjunct()
disj.c1= pyo.Constraint(expr=m.x[t] <= 10)
m.add_component('d1_%s' % t, disj)
return disj
def d2(m, t):
disj = pogdp.Disjunct()
disj.c1= pyo.Constraint(expr=m.x[t] >= 10)
m.add_component('d2_%s' % t, disj)
return disj
# sum x,y
def obj_rule(m):
return pyo.quicksum(pyo.quicksum([m.x[t] + m.y[t]], linear=False) for t in
m.T)
model.obj = pyo.Objective(rule=obj_rule)
def _op_mode_test(m, t):
disj1 = d1(m, t)
disj2 = d2(m, t)
return [disj1, disj2]
model.disjunction_ = pogdp.Disjunction(model.T, rule=_op_mode_test)
However, it would be cleaner (and probably easier down the line) to index the Disjuncts by m.T as well, since that's basically what the unique names are doing.
Block (and hence Disjunct rules) are passed the block (or disjunct) to be populated as the first argument. So, an "abstract" equivalent too your concrete model might look something like this:
model = AbstractModel()
#model.Disjunct()
def d1(d):
# populate the `d` disjunct (i.e., `model.d1`) here
pass
#model.Disjunct()
def d2(d):
#d.Disjunct()
def sub1(sd):
# populate the 'sub1' disjunct here
pass
#d.Disjunct()
def sub2(sd):
# populate the 'sub2' disjunct here
pass
d.disj = Disjunction(expr=[d.sub1, d.sub2])
model.disj = Disjunction(expr=[model.d1, model.d2])
There is a more fundamental question as to why you are converting your model over to "abstract" form. Pyomo Abstract models were mostly devised to be familiar to people coming from modeling in AMPL. While they will work with block-structured models, as AMPL was never really designed with blocks in mind, similarly block-oriented Abstract models tend to be unnecessarily cumbersome.
Here ist our new model:
import pyomo.environ as pyo
import pyomo.gdp as pogdp
model = pyo.ConcreteModel()
model.T = pyo.RangeSet(0,10)
model.x=pyo.Var(model.T,bounds=(-2, 10))
model.y=pyo.Var(model.T,bounds=(20, 30))
model.disjunction_=pogdp.Disjunction(model.T)
def d1(m,t):
m.d1 = pogdp.Disjunct()
m.d1.c1= pyo.Constraint(expr=m.x[t] <=10)
def d2(m,t):
m.d2 = pogdp.Disjunct()
m.d2.c1= pyo.Constraint(expr=m.x[t] >=10)
# sum x,y
def obj_rule(m):
return pyo.quicksum(pyo.quicksum([m.x[t] + m.y[t]], linear=False) for t in m.T)
model.obj = pyo.Objective(rule=obj_rule)
def _op_mode_test(m,t):
d1(m,t)
d2(m,t)
return [m.d1,m.d2]
model.disjunction_=pogdp.Disjunction(model.T,rule=_op_mode_test)
#model.pprint()
pyo.TransformationFactory('gdp.bigm').apply_to(model)
solver = pyo.SolverFactory('baron')
solver.solve(model)
print(pyo.value(model.obj))
I think it has something to do with the RangeSet. For a single step it works, but with more than one steps it throws an error: AttributeError: 'NoneType' object has no attribute 'component'
It would be great if you could have a look on it.
Many thanks
Here is the code which works pretty fine with bigm, but not with mbigm or hull transformation:
import pyomo.environ as pyo
import pyomo.gdp as pogdp
model = pyo.ConcreteModel()
model.T = pyo.RangeSet(2)
model.x=pyo.Var(model.T,bounds=(1, 10))
model.y=pyo.Var(model.T,bounds=(1, 100))
def _op_mode_sub(m, t):
m.disj1[t].sub1 = pogdp.Disjunct()
m.disj1[t].sub1.c1= pyo.Constraint(expr=m.y[t] == 60)
m.disj1[t].sub2 = pogdp.Disjunct()
m.disj1[t].sub2.c1= pyo.Constraint(expr=m.y[t] == 100)
return [m.disj1[t].sub1, m.disj1[t].sub2]
def _op_mode(m, t):
m.disj2[t].c1= pyo.Constraint(expr=m.y[t] >= 3)
m.disj2[t].c2= pyo.Constraint(expr=m.y[t] <= 5)
return [m.disj1[t], m.disj2[t]]
model.disj1 = pogdp.Disjunct(model.T)
model.disj2 = pogdp.Disjunct(model.T)
model.disjunction1sub = pogdp.Disjunction(model.T, rule=_op_mode_sub)
model.disjunction1 = pogdp.Disjunction(model.T, rule=_op_mode)
def obj_rule(m, t):
return pyo.quicksum(pyo.quicksum([m.x[t] + m.y[t]], linear=False) for t in m.T)
model.obj = pyo.Objective(rule=obj_rule)
model.pprint()
gdp_relax=pyo.TransformationFactory('gdp.bigm')
gdp_relax.apply_to(model)
solver = pyo.SolverFactory('glpk')
solver.solve(model)
print(pyo.value(model.obj))

Pyomo error when passing rule from objective

I am running the following pyomo code
C = list(datadict.keys())
model = ConcreteModel()
model.IDX = range(23)
model.zIDX = range(1)
def _initialize_rule(model, i):
return datadict[C[i]]['Int']
def _bounds_rule(model, i):
return (datadict[C[i]]['Min'], datadict[C[i]]['Max'])
# declare decision variables
model.x = Var(model.IDX, initialize=_initialize_rule, domain=NonNegativeReals, bounds=_bounds_rule)
model.z = Var(model.zIDX, initialize=1, domain=NonNegativeReals, bounds=(0, None))
model.c1 = Constraint(
expr = sum(model.x[i]*datadict[C[i]]["A"] for i in model.IDX) == budget
)
def _maa_rule(m):
v = BlockVector(23)
for i in m.IDX:
if channelData[C[i]]["A"]==0:
v.set_block(i, 1)
else:
v.set_block(i, m.x[i])
inputArr = v.flatten()
# Month
inputArr = np.append(inputArr, [dateData['Month']])
# DOW
inputArr = np.append(inputArr, [dateData['DOW']])
# DOY
inputArr = np.append(inputArr, [dateData['DOY']])
# Quarter
inputArr = np.append(inputArr, [dateData['Quarter']])
# fracDOY
inputArr = np.append(inputArr, [dateData['fracDOY']])
X_arr = X_scaler.transform(inputArr)
I_arr = Y_Inflow_scaler.inverse_transform(automlInflow.predict(X_arr).reshape(-1, 1))
O_arr = Y_Outflow_scaler.inverse_transform(automlOutflow.predict(X_arr).reshape(-1, 1))
m.z[i] = I_arr[0][0] - O_arr[0][0]
return m.z[i]
model.obj = Objective(rule=_maa_rule, sense=maximize)
But I get the following error when I try to run the code...
ERROR: Rule failed when generating expression for Objective obj with index
None: AssertionError: Blocks need to be numpy arrays or BlockVectors
ERROR: Constructing component 'obj' from data=None failed: AssertionError:
Blocks need to be numpy arrays or BlockVectors
---------------------------------------------------------------------------
AssertionError: Blocks need to be numpy arrays or BlockVectors
I have tried creating a blockVector and then assigning the predict values to it, but nothing seems to work.
Any help would be GREATLY appreciated.

Feature selection in scikit learn for multiple variables and thousands+ features

I am trying to perform feature selection for logistic regression classifier. Originally there are 4 variables: name, location, gender, and label = ethnicity. The three variables, namely the name, give rise to tens of thousands of more "features", for example, name "John Snow" will give rise to 2-letter substrings like 'jo', 'oh', 'hn'... etc. The feature set undergoes DictVectorization.
I am trying to follow this tutorial (http://scikit-learn.org/stable/auto_examples/feature_selection/plot_feature_selection.html) but I am not sure if I am doing it right since the tutorial is using a small number of features while mine has tens of thousands after vectorization. And also the plt.show() shows a blank figure.
# coding=utf-8
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
import re
import random
import time
from random import randint
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from sklearn import svm
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve
# Assign X and y variables
X = df.raw_name.values
X2 = df.name.values
X3 = df.gender.values
X4 = df.location.values
y = df.ethnicity_scan.values
# Feature extraction functions
def feature_full_name(nameString):
try:
full_name = nameString
if len(full_name) > 1: # not accept name with only 1 character
return full_name
else: return '?'
except: return '?'
def feature_avg_wordLength(nameString):
try:
space = 0
for i in nameString:
if i == ' ':
space += 1
length = float(len(nameString) - space)
name_entity = float(space + 1)
avg = round(float(length/name_entity), 0)
return avg
except:
return 0
def feature_name_entity(nameString2):
space = 0
try:
for i in nameString2:
if i == ' ':
space += 1
return space+1
except: return 0
def feature_gender(genString):
try:
gender = genString
if len(gender) >= 1:
return gender
else: return '?'
except: return '?'
def feature_noNeighborLoc(locString):
try:
x = re.sub(r'^[^, ]*', '', locString) # remove everything before and include first ','
y = x[2:] # remove subsequent ',' and ' '
return y
except: return '?'
def list_to_dict(substring_list):
try:
substring_dict = {}
for i in substring_list:
substring_dict['substring='+str(i)] = True
return substring_dict
except: return '?'
# Transform format of X variables, and spit out a numpy array for all features
my_dict13 = [{'name-entity': feature_name_entity(feature_full_name(i))} for i in X2]
my_dict14 = [{'avg-length': feature_avg_wordLength(feature_full_name(i))} for i in X]
my_dict15 = [{'gender': feature_full_name(i)} for i in X3]
my_dict16 = [{'location': feature_noNeighborLoc(feature_full_name(i))} for i in X4]
my_dict17 = [{'dummy1': 1} for i in X]
my_dict18 = [{'dummy2': random.randint(0,2)} for i in X]
all_dict = []
for i in range(0, len(my_dict)):
temp_dict = dict(my_dict13[i].items() + my_dict14[i].items()
+ my_dict15[i].items() + my_dict16[i].items() + my_dict17[i].items() + my_dict18[i].items()
)
all_dict.append(temp_dict)
newX = dv.fit_transform(all_dict)
# Separate the training and testing data sets
half_cut = int(len(df)/2.0)*-1
X_train = newX[:half_cut]
X_test = newX[half_cut:]
y_train = y[:half_cut]
y_test = y[half_cut:]
# Fitting X and y into model, using training data
lr = LogisticRegression()
lr.fit(X_train, y_train)
dv = DictVectorizer()
# Feature selection
plt.figure(1)
plt.clf()
X_indices = np.arange(X_train.shape[-1])
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X_train, y_train)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
plt.show()
Warning:
E:\Program Files Extra\Python27\lib\site-packages\sklearn\feature_selection\univariate_selection.py:111: UserWarning: Features [[0 0 0 ..., 0 0 0]] are constant.
It looks like the way you split your data into training and testing sets is not working:
# Separate the training and testing data sets
X_train = newX[:half_cut]
X_test = newX[half_cut:]
If you already use sklearn, it is much more convenient to use the builtin splitting routine for this:
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.5, random_state=0)

How to get a graph for stock market analysis?

I updated the code and it now provides the graph, however after giving me the graph it produces the following error messages.
Warning (from warnings module):
File "C:\Python27\lib\site-packages\matplotlib\collections.py", line 590
if self._edgecolors == str('face'):
FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
import urllib2
import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.dates as mdates
from matplotlib.finance import candlestick_ochl
import matplotlib
import pylab
matplotlib.rcParams.update({'font.size': 9})
def rsiFunc(prices, n=14):
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1.+rs)
for i in range(n, len(prices)):
delta = deltas[i-1] # cause the diff is 1 shorter
if delta>0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(n-1) + upval)/n
down = (down*(n-1) + downval)/n
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
return rsi
def movingaverage(values,window):
weigths = np.repeat(1.0, window)/window
smas = np.convolve(values, weigths, 'valid')
return smas # as a numpy array
def ExpMovingAverage(values, window):
weights = np.exp(np.linspace(-1., 0., window))
weights /= weights.sum()
a = np.convolve(values, weights, mode='full')[:len(values)]
a[:window] = a[window]
return a
def computeMACD(x, slow=26, fast=12):
"""
compute the MACD (Moving Average Convergence/Divergence) using a fast and slow exponential moving avg'
return value is emaslow, emafast, macd which are len(x) arrays
"""
emaslow = ExpMovingAverage(x, slow)
emafast = ExpMovingAverage(x, fast)
return emaslow, emafast, emafast - emaslow
def graphData(stock,MA1,MA2):
'''
Use this to dynamically pull a stock:
'''
try:
print 'Currently Pulling',stock
print str(datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d %H:%M:%S'))
#Keep in mind this is close high low open data from Yahoo
urlToVisit = 'http://chartapi.finance.yahoo.com/instrument/1.0/'+stock+'/chartdata;type=quote;range=10y/csv'
stockFile =[]
try:
sourceCode = urllib2.urlopen(urlToVisit).read()
splitSource = sourceCode.split('\n')
for eachLine in splitSource:
splitLine = eachLine.split(',')
if len(splitLine)==6:
if 'values' not in eachLine:
stockFile.append(eachLine)
except Exception, e:
print str(e), 'failed to organize pulled data.'
except Exception,e:
print str(e), 'failed to pull pricing data'
try:
date, closep, highp, lowp, openp, volume = np.loadtxt(stockFile,delimiter=',', unpack=True,
converters={ 0: mdates.strpdate2num('%Y%m%d')})
x = 0
y = len(date)
newAr = []
while x < y:
appendLine = date[x],openp[x],closep[x],highp[x],lowp[x],volume[x]
newAr.append(appendLine)
x+=1
Av1 = movingaverage(closep, MA1)
Av2 = movingaverage(closep, MA2)
SP = len(date[MA2-1:])
fig = plt.figure(facecolor='#07000d')
ax1 = plt.subplot2grid((6,4), (1,0), rowspan=4, colspan=4, axisbg='#07000d')
candlestick_ochl(ax1, newAr[-SP:], width=.6, colorup='#53c156', colordown='#ff1717')#width=.6, plot_day_summary_ohlc
Label1 = str(MA1)+' SMA'
Label2 = str(MA2)+' SMA'
ax1.plot(date[-SP:],Av1[-SP:],'#e1edf9',label=Label1, linewidth=1.5)
ax1.plot(date[-SP:],Av2[-SP:],'#4ee6fd',label=Label2, linewidth=1.5)
ax1.grid(True, color='w')
ax1.xaxis.set_major_locator(mticker.MaxNLocator(10))
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax1.yaxis.label.set_color("w")
ax1.spines['bottom'].set_color("#5998ff")
ax1.spines['top'].set_color("#5998ff")
ax1.spines['left'].set_color("#5998ff")
ax1.spines['right'].set_color("#5998ff")
ax1.tick_params(axis='y', colors='w')
plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper')) #gca()
ax1.tick_params(axis='x', colors='w')
plt.ylabel('Stock price and Volume')
maLeg = plt.legend(loc=9, ncol=2, prop={'size':7},
fancybox=True, borderaxespad=0.)
maLeg.get_frame().set_alpha(0.4)
textEd = plt.gca().get_legend().get_texts()#pylab.gca() changed to plt.gca()
plt.setp(textEd[0:5], color = 'w')#changed pylab.setp to plt.setp
volumeMin = 0
ax0 = plt.subplot2grid((6,4), (0,0), sharex=ax1, rowspan=1, colspan=4, axisbg='#07000d')
rsi = rsiFunc(closep)
rsiCol = '#c1f9f7'
posCol = '#386d13'
negCol = '#8f2020'
ax0.plot(date[-SP:], rsi[-SP:], rsiCol, linewidth=1.5)
ax0.axhline(70, color=negCol)
ax0.axhline(30, color=posCol)
ax0.fill_between(date[-SP:], rsi[-SP:], 70, where=(rsi[-SP:]>=70), facecolor=negCol, edgecolor=negCol, alpha=0.5)
ax0.fill_between(date[-SP:], rsi[-SP:], 30, where=(rsi[-SP:]<=30), facecolor=posCol, edgecolor=posCol, alpha=0.5)
ax0.set_yticks([30,70])
ax0.yaxis.label.set_color("w")
ax0.spines['bottom'].set_color("#5998ff")
ax0.spines['top'].set_color("#5998ff")
ax0.spines['left'].set_color("#5998ff")
ax0.spines['right'].set_color("#5998ff")
ax0.tick_params(axis='y', colors='w')
ax0.tick_params(axis='x', colors='w')
plt.ylabel('RSI')
ax1v = ax1.twinx()
ax1v.fill_between(date[-SP:],volumeMin, volume[-SP:], facecolor='#00ffe8', alpha=.4)
ax1v.axes.yaxis.set_ticklabels([])
ax1v.grid(False)
ax1v.set_ylim(0, 3*volume.max())
ax1v.spines['bottom'].set_color("#5998ff")
ax1v.spines['top'].set_color("#5998ff")
ax1v.spines['left'].set_color("#5998ff")
ax1v.spines['right'].set_color("#5998ff")
ax1v.tick_params(axis='x', colors='w')
ax1v.tick_params(axis='y', colors='w')
ax2 = plt.subplot2grid((6,4), (5,0), sharex=ax1, rowspan=1, colspan=4, axisbg='#07000d')
# START NEW INDICATOR CODE #
# END NEW INDICATOR CODE #
plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper'))
ax2.spines['bottom'].set_color("#5998ff")
ax2.spines['top'].set_color("#5998ff")
ax2.spines['left'].set_color("#5998ff")
ax2.spines['right'].set_color("#5998ff")
ax2.tick_params(axis='x', colors='w')
ax2.tick_params(axis='y', colors='w')
ax2.yaxis.set_major_locator(mticker.MaxNLocator(nbins=5, prune='upper'))
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
plt.suptitle(stock.upper(),color='w')
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax1.get_xticklabels(), visible=False)
'''ax1.annotate('Big news!',(date[510],Av1[510]),
xytext=(0.8, 0.9), textcoords='axes fraction',
arrowprops=dict(facecolor='white', shrink=0.05),
fontsize=14, color = 'w',
horizontalalignment='right', verticalalignment='bottom')'''
plt.subplots_adjust(left=.09, bottom=.14, right=.94, top=.95, wspace=.20, hspace=0)
plt.show()
fig.savefig('example.png',facecolor=fig.get_facecolor())
except Exception,e:
print 'main loop',str(e)
while True:
stock = raw_input('Stock to plot: ')
graphData(stock,10,50)
Please look at the thread Violin plot: warning with matplotlib 1.4.3 and pyplot fill_between warning since upgrade of numpy to 1.10.10
It seems there is a bug in matplotlib 1.4.3 (which has only started causing that error since the upgrade to numpy 1.10). This is reportedly corrected in 1.5.0 (which should be released soon). Hope this helps.

Python AttributeError instance has no attribute, When I add a new method.

I am Trying to call a another method within my class, for some reason I am getting the AttributeError: portfinder instance has no attribute 'generatePortNumber' See my code below:
when I tried to call the generatePortNumber I'm getting the same error. I have never come across this issue.
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3 as lite
import sys
import random
class portfinder:
"""docstring for ClassName"""
def __init__(self):
self.portsToCheck = ['agentport','BatchProcessingAgentPort','databaseport','indexserviceport','monitorport','servicefacadeport','webdriverport']
self.dataBasePort = (u'60025',)
self.portInUse = False
self.x = 0
def generatePortNumber(self):
self.newPortNumber = random.randrange(8000, 9000)
print self.newPortNumber
return self.newPortNumber
def findUsedPortsinDB(self):
con = lite.connect('D:\play\Opes\db.sqlite3')
with con:
cur = con.cursor()
sqlStatement = "Select " + self.portsToCheck[2] +' From Jobs_jobs'
print sqlStatement
cur.execute(sqlStatement)
rows = cur.fetchall()
for row in rows:
print row
if row == self.dataBasePort:
self.portInUse = "true"
self.generatePortNumber()
if __name__ == "__main__":
m = portfinder()
m.findUsedPortsinDB()
Found what was wrong I had a extra indentation in my method