My code can run,but when I debug,it can enter the Subroutine.And the error is:"decoding Unicode is not supported".
I use anaconda.When I open the untitled0.py,the encoding is UTF-8 at the bottom of the screen,but when I open the fhmm_exact.py,the encoding is UTF-8-GUESSED.
Traceback (most recent call last):
File "<ipython-input-1-f6910c2dfa77>", line 1, in <module>
debugfile('/home/wenwu/untitled0.py', wdir='/home/wenwu')
File "/home/wenwu/anaconda/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 702, in debugfile
debugger.run("runfile(%r, args=%r, wdir=%r)" % (filename, args, wdir))
File "/home/wenwu/anaconda/lib/python2.7/bdb.py", line 400, in run
exec cmd in globals, locals
File "<string>", line 1, in <module>
File "/home/wenwu/anaconda/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 682, in runfile
execfile(filename, namespace)
File "/home/wenwu/anaconda/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 78, in execfile
builtins.execfile(filename, *where)
File "/home/wenwu/untitled0.py", line 37, in <module>
fhmm.disaggregate(test_elec.mains(),output,sample_period = 60)
File "/home/wenwu/nilmtk/nilmtk/disaggregate/fhmm_exact.py", line 287, in disaggregate
mains_data_location = '{}/elec/meter1'.format(building_path)
File "/home/wenwu/nilmtk/nilmtk/disaggregate/fhmm_exact.py", line 287, in disaggregate
mains_data_location = '{}/elec/meter1'.format(building_path)
File "/home/wenwu/anaconda/lib/python2.7/bdb.py", line 49, in trace_dispatch
return self.dispatch_line(frame)
File "/home/wenwu/anaconda/lib/python2.7/bdb.py", line 67, in dispatch_line
self.user_line(frame)
File "/home/wenwu/anaconda/lib/python2.7/pdb.py", line 158, in user_line
self.interaction(frame, None)
File "/home/wenwu/anaconda/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 488, in interaction
self.notify_spyder(frame) #-----Spyder-specific-------------------------
File "/home/wenwu/anaconda/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 432, in notify_spyder
fname = unicode(fname, "utf-8")
TypeError: decoding Unicode is not supported
The following is code.
untitled0.py*
from matplotlib import rcParams
import matplotlib.pyplot as plt
rcParams['figure.figsize'] = (13,6)
plt.style.use('ggplot')
from nilmtk import DataSet,TimeFrame,MeterGroup,HDFDataStore
train = DataSet('/home/wenwu/redd.h5')
test = DataSet('/home/wenwu/redd.h5')
building = 1
train.set_window(end = '30-4-2011')
test.set_window(start = '30-4-2011')
train_elec = train.buildings[1].elec
test_elec = test.buildings[1].elec
fridge_meter = train_elec['fridge']
fridge_df = fridge_meter.load().next()
fridge_df.head()
mains = train_elec.mains()
mains_df = mains.load().next()
top_5_train_elec = train_elec.submeters().select_top_k(k = 5)
from nilmtk.disaggregate import fhmm_exact
from nilmtk.metrics import f1_score
fhmm = fhmm_exact.FHMM()
fhmm.train(top_5_train_elec,sample_period = 60)
disag_filename = '/home/wenwu/redd-disag-fhmm.h5'
output = HDFDataStore(disag_filename,'w')
fhmm.disaggregate(test_elec.mains(),output,sample_period = 60)
output.close()
disag_fhmm = DataSet(disag_filename)
disag_fhmm_elec = disag_fhmm.buildings[building].elec
f1_fhmm = f1_score(disag_fhmm_elec,test_elec)
f1_fhmm.plot(kind = 'barh')
disaggreate part of fhmm_exact.py
def disaggregate(self, mains, output_datastore, **load_kwargs):
'''Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
output_name : string, optional
The `name` to use in the metadata for the `output_datastore`.
e.g. some sort of name for this experiment. Defaults to
"NILMTK_FHMM_<date>"
resample_seconds : number, optional
The desired sample period in seconds.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
'''
import warnings
warnings.filterwarnings("ignore", category=Warning)
MIN_CHUNK_LENGTH = 100
if not self.model:
raise RuntimeError(
"The model needs to be instantiated before"
" calling `disaggregate`. For example, the"
" model can be instantiated by running `train`.")
# Extract optional parameters from load_kwargs
date_now = datetime.now().isoformat().split('.')[0]
output_name = load_kwargs.pop('output_name', 'NILMTK_FHMM_' + date_now)
resample_seconds = load_kwargs.pop('resample_seconds', 60)
resample_rule = '{:d}S'.format(resample_seconds)
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = '{}/elec/meter1'.format(building_path)
data_is_available = False
for chunk in mains.power_series(**load_kwargs):
# Check that chunk is sensible size before resampling
if len(chunk) < MIN_CHUNK_LENGTH:
continue
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
chunk = chunk.resample(rule=resample_rule)
# Check chunk size *again* after resampling
if len(chunk) < MIN_CHUNK_LENGTH:
continue
# Start disaggregation
predictions = self.disaggregate_chunk(chunk)
for meter in predictions.columns:
data_is_available = True
meter_instance = meter.instance()
cols = pd.MultiIndex.from_tuples([chunk.name])
predicted_power = predictions[[meter]]
output_df = pd.DataFrame(predicted_power)
output_df.columns = pd.MultiIndex.from_tuples([chunk.name])
output_datastore.append('{}/elec/meter{}'
.format(building_path, meter_instance),
output_df)
# Copy mains data to disag output
output_datastore.append(key=mains_data_location,
value=pd.DataFrame(chunk, columns=cols))
if not data_is_available:
return
##################################
# Add metadata to output_datastore
# TODO: `preprocessing_applied` for all meters
# TODO: split this metadata code into a separate function
# TODO: submeter measurement should probably be the mains
# measurement we used to train on, not the mains measurement.
# DataSet and MeterDevice metadata:
meter_devices = {
'FHMM': {
'model': 'FHMM',
'sample_period': resample_seconds,
'max_sample_period': resample_seconds,
'measurements': [{
'physical_quantity': measurement[0],
'type': measurement[1]
}]
},
'mains': {
'model': 'mains',
'sample_period': resample_seconds,
'max_sample_period': resample_seconds,
'measurements': [{
'physical_quantity': measurement[0],
'type': measurement[1]
}]
}
}
merged_timeframes = merge_timeframes(timeframes, gap=resample_seconds)
total_timeframe = TimeFrame(merged_timeframes[0].start,
merged_timeframes[-1].end)
dataset_metadata = {'name': output_name, 'date': date_now,
'meter_devices': meter_devices,
'timeframe': total_timeframe.to_dict()}
output_datastore.save_metadata('/', dataset_metadata)
# Building metadata
# Mains meter:
elec_meters = {
1: {
'device_model': 'mains',
'site_meter': True,
'data_location': mains_data_location,
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': total_timeframe.to_dict()
}
}
}
# TODO: FIX THIS! Ugly hack for now
# Appliances and submeters:
appliances = []
for i, meter in enumerate(self.meters):
meter_instance = meter.instance()
for app in meter.appliances:
appliance = {
'meters': [meter_instance],
'type': app.identifier.type,
'instance': app.identifier.instance
# TODO this `instance` will only be correct when the
# model is trained on the same house as it is tested on.
# https://github.com/nilmtk/nilmtk/issues/194
}
appliances.append(appliance)
elec_meters.update({
meter_instance: {
'device_model': 'FHMM',
'submeter_of': 1,
'data_location': ('{}/elec/meter{}'
.format(building_path, meter_instance)),
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': total_timeframe.to_dict()
}
}
})
# Setting the name if it exists
if meter.name:
if len(meter.name) > 0:
elec_meters[meter_instance]['name'] = meter.name
building_metadata = {
'instance': mains.building(),
'elec_meters': elec_meters,
'appliances': appliances
}
output_datastore.save_metadata(building_path, building_metadata)
Related
I am trying to use tf.data pipeline to get finer control over loading image data but I receive the following error which I think is because of usage of list comprehension. My Code looks like this:
def load_files(data_dir: str, val_split=0.2):
assert len(os.listdir(os.path.join(data_dir + 'images/'))) == \
len(os.listdir(os.path.join(data_dir, 'ground_truth/'))), print("No. of image files != No. of gt files")
image_count = len(os.listdir(os.path.join(data_dir + 'images/')))
files = os.listdir(os.path.join(data_dir + 'images/'))
image_files = [os.path.join(data_dir + 'images/', file) for file in files]
image_files = np.array(image_files)
ds = tf.data.Dataset.from_tensor_slices(files)
ds = ds.map(process_data)
# train_ds = ds.skip(int(val_split * image_count))
# val_ds = ds.take(int(val_split * image_count))
return ds
def process_data(file_path):
image, = tf.io.read_file(file_path)
image = tf.io.decode_jpeg(image, channels=3)
label = tf.strings.split(file_path)
label = tf.io.decode_png(label, channels=0, dtype=tf.uint8)
return image, label
some_dir = "../../../TuSimple_lane_detection/"
img_dir = some_dir + "images/"
mask_dir = some_dir + "ground_truth/"
data_train = load_files(some_dir)
for f in data_train.take(5):
print(f.numpy())
The error looks like this:
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\eager\function.py", line 3210, in _get_concrete_function_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\eager\function.py", line 3557, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\eager\function.py", line 3392, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\framework\func_graph.py", line 1143, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py", line 4510, in wrapped_fn
ret = wrapper_helper(*args)
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py", line 4440, in wrapper_helper
ret = autograph.tf_convert(self._func, ag_ctx)(*nested_args)
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\autograph\impl\api.py", line 699, in wrapper
raise e.ag_error_metadata.to_exception(e)
tensorflow.python.framework.errors_impl.OperatorNotAllowedInGraphError: in user code:
File "E:/Datasets/KITTI_3D_Object_detection/KITTI_2D/EndToEndLaneDetection/Dataloader.py", line 21, in process_data *
image, = tf.io.read_file(file_path)
OperatorNotAllowedInGraphError: iterating over `tf.Tensor` is not allowed in Graph execution. Use Eager execution or decorate this function with #tf.function.
I did not have the problem when I was working with TF1.12 before, when I shifted o TF 2.3 I am encountering this error.
It's my first post, I hope it will be well done.
I'm trying to run the following ZipLine Algo with local AAPL data :
import pandas as pd
from collections import OrderedDict
import pytz
from zipline.api import order, symbol, record, order_target
from zipline.algorithm import TradingAlgorithm
data = OrderedDict()
data['AAPL'] = pd.read_csv('AAPL.csv', index_col=0, parse_dates=['Date'])
panel = pd.Panel(data)
panel.minor_axis = ['Open', 'High', 'Low', 'Close', 'Volume', 'Price']
panel.major_axis = panel.major_axis.tz_localize(pytz.utc)
print panel["AAPL"]
def initialize(context):
context.security = symbol('AAPL')
def handle_data(context, data):
MA1 = data[context.security].mavg(50)
MA2 = data[context.security].mavg(100)
date = str(data[context.security].datetime)[:10]
current_price = data[context.security].price
current_positions = context.portfolio.positions[symbol('AAPL')].amount
cash = context.portfolio.cash
value = context.portfolio.portfolio_value
current_pnl = context.portfolio.pnl
# code (this will come under handle_data function only)
if (MA1 > MA2) and current_positions == 0:
number_of_shares = int(cash / current_price)
order(context.security, number_of_shares)
record(date=date, MA1=MA1, MA2=MA2, Price=
current_price, status="buy", shares=number_of_shares, PnL=current_pnl, cash=cash, value=value)
elif (MA1 < MA2) and current_positions != 0:
order_target(context.security, 0)
record(date=date, MA1=MA1, MA2=MA2, Price=current_price, status="sell", shares="--", PnL=current_pnl, cash=cash,
value=value)
else:
record(date=date, MA1=MA1, MA2=MA2, Price=current_price, status="--", shares="--", PnL=current_pnl, cash=cash,
value=value)
#initializing trading enviroment
algo_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
#run algo
perf_manual = algo_obj.run(panel)
#code
#calculation
print "total pnl : " + str(float(perf_manual[["PnL"]].iloc[-1]))
buy_trade = perf_manual[["status"]].loc[perf_manual["status"] == "buy"].count()
sell_trade = perf_manual[["status"]].loc[perf_manual["status"] == "sell"].count()
total_trade = buy_trade + sell_trade
print "buy trade : " + str(int(buy_trade)) + " sell trade : " + str(int(sell_trade)) + " total trade : " + str(int(total_trade))
I was inspired by https://www.quantinsti.com/blog/introduction-zipline-python/ and https://www.quantinsti.com/blog/importing-csv-data-zipline-backtesting/.
I get this error :
Traceback (most recent call last):
File "C:/Users/main/Desktop/docs/ALGO_TRADING/_DATAS/_zipline_data_bundle /temp.py", line 51, in <module>
algo_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
File "C:\Python27-32\lib\site-packages\zipline\algorithm.py", line 273, in __init__
self.trading_environment = TradingEnvironment()
File "C:\Python27-32\lib\site-packages\zipline\finance\trading.py", line 99, in __init__
self.bm_symbol,
File "C:\Python27-32\lib\site-packages\zipline\data\loader.py", line 166, in load_market_data
environ,
File "C:\Python27-32\lib\site-packages\zipline\data\loader.py", line 230, in ensure_benchmark_data
last_date,
File "C:\Python27-32\lib\site-packages\zipline\data\benchmarks.py", line 50, in get_benchmark_returns
last_date
File "C:\Python27-32\lib\site-packages\pandas_datareader\data.py", line 137, in DataReader
session=session).read()
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 181, in read
params=self._get_params(self.symbols))
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 79, in _read_one_data
out = self._read_url_as_StringIO(url, params=params)
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 90, in _read_url_as_StringIO
response = self._get_response(url, params=params)
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 139, in _get_response
raise RemoteDataError('Unable to read URL: {0}'.format(url))
pandas_datareader._utils.RemoteDataError: Unable to read URL: http://www.google.com/finance/historical?q=SPY&startdate=Dec+29%2C+1989&enddate=Dec+20%2C+2017&output=csv
I don't understand : "http://www.google.com/finance/historical?q=SPY&startdate=Dec+29%2C+1989&enddate=Dec+20%2C+2017&output=csv".
I don't ask for online data request... and not 'SPY' stock but 'APPL'...
What does this error mean to you ?
Thanks a lot for your help !
C.
Only reference and workaround I found regarding this issue is here:
from pandas_datareader.google.daily import GoogleDailyReader
#property
def url(self):
return 'http://finance.google.com/finance/historical'
GoogleDailyReader.url = url
do:
pip install fix_yahoo_finance
then modify the file: zipline/lib/pythonx.x/site-packages/zipline/data/benchmarks.py
add the following two statements to the file:
import fix_yahoo_finance as yf
yf.pdr_override ()
then change following instruction:
data = pd_reader.DataReader (symbol, 'Google' first_date, last_date)
to:
data = pd_reader.get_data_yahoo(symbol,first_date, last_date)
thisfile.py
import cPickle
import gzip
import os
import numpy
import theano
import theano.tensor as T
def load_data(dataset):
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
def shared_dataset(data_xy, borrow=True):
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
class PCA(object):
def __init__(self):
self.param = 0
def dimemsion_transform(self, X):
m_mean = T.mean(X, axis=0)
X = X - m_mean ##################### this line makes error
return X
if __name__ == '__main__':
dataset = 'mnist.pkl.gz'
# load the MNIST data
data = load_data(dataset)
X = T.matrix('X')
m_pca = PCA()
transform = theano.function(
inputs=[],
outputs=m_pca.dimemsion_transform(X),
givens={
X: data
}
)
error showing like below
Traceback (most recent call last):
File ".../thisfile.py", line 101, in <module>
X: data
File ".../Theano/theano/compile/function.py", line 322, in function
output_keys=output_keys)
File ".../Theano/theano/compile/pfunc.py", line 443, in pfunc
no_default_updates=no_default_updates)
File ".../Theano/theano/compile/pfunc.py", line 219, in rebuild_collect_shared
cloned_v = clone_v_get_shared_updates(v, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 93, in clone_v_get_shared_updates
clone_v_get_shared_updates(i, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 93, in clone_v_get_shared_updates
clone_v_get_shared_updates(i, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 93, in clone_v_get_shared_updates
clone_v_get_shared_updates(i, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 96, in clone_v_get_shared_updates
[clone_d[i] for i in owner.inputs], strict=rebuild_strict)
File ".../Theano/theano/gof/graph.py", line 242, in clone_with_new_inputs
new_inputs[i] = curr.type.filter_variable(new)
File ".../Theano/theano/tensor/type.py", line 234, in filter_variable
self=self))
TypeError: Cannot convert Type Generic (of Variable <Generic>) into Type TensorType(float64, matrix). You can try to manually convert <Generic> into a TensorType(float64, matrix).
I am making PCA function with theano but have a problem.
mean value is subtracted from MNIST data in dimension_transform in PCA class
I do not get why it gives type matching error and how do I fix it
Your problem comes from these lines:
data = load_data(dataset)
Here data is a list (as this is what load_data() returns).
transform = theano.function(
inputs=[],
outputs=m_pca.dimemsion_transform(X),
givens={
X: data
}
)
And here you pass it as a value. You have to extract the item you want from the return value of load_data() like so:
[(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)] = load_data(dataset)
and then use
givens={
X: train_set_x
}
or one of the other values.
Hi guys I want ask about json.dump
I use scikit to tune some method with parameters and I want dump it to json, but I got some error here :
I have parameter with method KNN:
KNeighborsClassifier(algorithm=u'auto', leaf_size=30, metric=u'manhattan',
metric_params=None, n_jobs=-1, n_neighbors=300, p=2,
weights=u'distance')
But got error like this:
Traceback (most recent call last):
File "jamu.py", line 1018, in <module>
main(argv)
File "jamu.py", line 863, in main
json.dumps(meta_clf, f)
File "C:\Python27\lib\json\__init__.py", line 250, in d
sort_keys=sort_keys, **kw).encode(obj)
File "C:\Python27\lib\json\encoder.py", line 207, in en
chunks = self.iterencode(o, _one_shot=True)
File "C:\Python27\lib\json\encoder.py", line 270, in it
return _iterencode(o, 0)
File "C:\Python27\lib\json\encoder.py", line 184, in de
raise TypeError(repr(o) + " is not JSON serializable"
TypeError: KNeighborsClassifier(algorithm=u'auto', leaf_s
tan',
metric_params=None, n_jobs=-1, n_neighbors=300
weights=u'distance') is not JSON serializable
Is anything wrong with my code?
It is what it says on the tin, KNeighborsClassifier cannot be serialised with json.
You'll have to use a different way to serialise a model. For example you can use joblib:
from sklearn.external import joblib
# Suppose your KNeighborsClassifier model is called knn
joblib.dump(knn, 'some/kind/of/path/knn.joblib')
Loading a model is equally simple:
knn = joblib.load('some/kind/of/path/knn.joblib')
Check the joblib docs for what else it is good for.
finally i use this way:
meta_clf = KNeighborsClassifier(algorithm=u'auto', leaf_size=30, metric=u'manhattan',
metric_params=None, n_jobs=-1, n_neighbors=300, p=2,
weights=u'distance')
def print_to_json(meta_clf):
meta_clf_str = str(meta_clf)
meta_clf_str = meta_clf_str[meta_clf_str.index("(") + 1:meta_clf_str.rindex(")")]
meta_clf_str = meta_clf_str.replace('\n ', '')
meta_clf_str = meta_clf_str.replace(' ', '')
meta_clf_str = meta_clf_str.replace('=u\'', '=\'')
meta_clf_str = meta_clf_str.replace('\'', '')
meta_clf_str_list = meta_clf_str.split(',')
meta_clf_str_list_len = len(meta_clf_str_list)
meta_clf_str_lists = []
params = {}
for x in meta_clf_str_list:
meta_clf_str_list = x.split('=')
if meta_clf_str_list[1].isdigit() == True:
meta_clf_str_list[1] = int(meta_clf_str_list[1])
meta_clf_str_lists.append(meta_clf_str_list)
params[meta_clf_str_list[0]] = meta_clf_str_list[1]
return params
it's enough for me.
thanks for the asnwer thomas, i appreciate it.
Thanks for your time:
I created a flask server that takes in variables from a form post and outputs a pie or bar graph. While debugging, I noticed this error:
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "C:\Python27\lib\atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "C:\Python27\lib\site-packages\matplotlib\_pylab_helpers.py", line 92, in destroy_all
manager.destroy()
File "C:\Python27\lib\site-packages\matplotlib\backends\backend_tkagg.py", line 618, in destroy
self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)
File "C:\Python27\lib\lib-tk\Tkinter.py", line 616, in after_cancel
self.tk.call('after', 'cancel', id)
TclError: out of stack space (infinite loop?)
Error in sys.exitfunc:
Traceback (most recent call last):
File "C:\Python27\lib\atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "C:\Python27\lib\site-packages\matplotlib\_pylab_helpers.py", line 92, in destroy_all
manager.destroy()
File "C:\Python27\lib\site-packages\matplotlib\backends\backend_tkagg.py", line 618, in destroy
self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)
File "C:\Python27\lib\lib-tk\Tkinter.py", line 616, in after_cancel
self.tk.call('after', 'cancel', id)
_tkinter.TclError: out of stack space (infinite loop?)
This seems to cause the server to reload (successfully for what it's worth) which is a problem. No clue what's going on here, other than tkinter being upset. And, no luck with my google fu.
flask server (w debug settings. Mapped vars are due to a project req.):
# Flask App that functions as a graph end point replacement "DAC-780"
# Standard Library
import os
import uuid
# Third Party
from flask import Flask, request
# Local
from pie import make_pie
from bar import make_bar
app_root = os.path.dirname(os.path.abspath(__file__))
images = os.path.join(app_root, 'static/images')
app = Flask(__name__, static_folder="static")
app._static_folder = os.path.join(app_root, 'static')
#app.route('/charts/<path>', methods=['POST'])
def graph(path):
g_data_list = []
file_name = str(uuid.uuid4())
# if bar graph
if path == "chart4.asp":
# grab vars
g_title = str(request.form['Title'])
x_title = str(request.form['CatTitle'])
y_title = str(request.form['ValTitle'])
ser1 = str(request.form['Ser1'])
ser2 = str(request.form['Ser2'])
cat1 = str(request.form['Cat1'])
cat2 = str(request.form['Cat2'])
cat3 = str(request.form['Cat3'])
cat4 = str(request.form['Cat4'])
cat5 = str(request.form['Cat5'])
cat6 = str(request.form['Cat6'])
cat7 = str(request.form['Cat7'])
cat8 = str(request.form['Cat8'])
cat9 = str(request.form['Cat9'])
cat10 = str(request.form['Cat10'])
cat11 = str(request.form['Cat11'])
cat12 = str(request.form['Cat12'])
cat13 = str(request.form['Cat13'])
s1d1 = int(request.form['S1D1'])
s1d2 = int(request.form['S1D2'])
s1d3 = int(request.form['S1D3'])
s1d4 = int(request.form['S1D4'])
s1d5 = int(request.form['S1D5'])
s1d6 = int(request.form['S1D6'])
s1d7 = int(request.form['S1D7'])
s1d8 = int(request.form['S1D8'])
s1d9 = int(request.form['S1D9'])
s1d10 = int(request.form['S1D10'])
s1d11 = int(request.form['S1D11'])
s1d12 = int(request.form['S1D12'])
s1d13 = int(request.form['S1D13'])
s2d1 = int(request.form['S2D1'])
s2d2 = int(request.form['S2D2'])
s2d3 = int(request.form['S2D3'])
s2d4 = int(request.form['S2D4'])
s2d5 = int(request.form['S2D5'])
s2d6 = int(request.form['S2D6'])
s2d7 = int(request.form['S2D7'])
s2d8 = int(request.form['S2D8'])
s2d9 = int(request.form['S2D9'])
s2d10 = int(request.form['S2D10'])
s2d11 = int(request.form['S2D11'])
s2d12 = int(request.form['S2D12'])
s2d13 = int(request.form['S2D13'])
# vars i mapped but weren't needed for my graph lib
g_type = str(request.form['Type'])
g_cats = str(request.form['Cats'])
g_series = str(request.form['Series'])
cat_title = str(request.form['CatTitle'])
# add data to g_data_list so we can process it
g_data_list.append((ser1, [s1d1, s1d2, s1d3, s1d4, s1d5, s1d6, s1d7, s1d8,
s1d9, s1d10, s1d11, s1d12, s1d13]))
g_data_list.append((ser2, [s2d1, s2d2, s2d3, s2d4, s2d5, s2d6, s2d7, s2d8,
s2d9, s2d10, s2d11, s2d12, s2d13]))
x_labels = [cat1, cat2, cat3, cat4, cat5, cat6, cat7, cat8, cat9, cat10,
cat11, cat12, cat13]
# make a graph to return in html
graph = make_bar(g_title, y_title, x_labels, g_data_list, file_name, cat_title, x_title)
else:
# all others are probably pie graphs
g_title = str(request.form['Title'])
cat1 = str(request.form['Cat1'])
cat2 = str(request.form['Cat2'])
cat3 = str(request.form['Cat3'])
cat4 = str(request.form['Cat4'])
s1d1 = int(request.form['S1D1'])
s1d2 = int(request.form['S1D2'])
s1d3 = int(request.form['S1D3'])
s1d4 = int(request.form['S1D4'])
# vars that aren't needed for replications of the final product, but
# were part of the old code
g_type = str(request.form['Type'])
g_cats = str(request.form['Cats'])
g_series = str(request.form['Series'])
cat_title = str(request.form['CatTitle'])
val_title = str(request.form['ValTitle'])
s1 = str(request.form['Ser1'])
s2 = str(request.form['Ser2'])
# add data
g_data_list.append([cat1, s1d1])
g_data_list.append([cat2, s1d2])
g_data_list.append([cat3, s1d3])
g_data_list.append([cat4, s1d4])
# make graph to send back via html
graph = make_pie(g_title, g_data_list, file_name)
# make a web page with graph and return it
html = """
<html>
<head>
<title>%s</title>
</head>
<body>
<img src="/static/images/%s.png" alt="An Error Occured"/>
</body>
</html>
""" % (g_title, str(file_name))
return html
if __name__ == '__main__':
app.run(port=3456, host="0.0.0.0", debug=True)
bar.py:
# creates a bar chart based on input using matplotlib
import os
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
rcParams['figure.figsize'] = 6.55, 3.8
app_root = os.path.dirname(os.path.abspath(__file__))
images = os.path.join(app_root, 'static/images')
def make_bar(g_title, y_title, x_labels, data_series, file_name, cat_title,
x_title):
n_groups = 13
bar_width = 0.35
opacity = 0.4
fig, ax = plt.subplots()
index = np.arange(n_groups)
error_config = {'ecolor': '0.3'}
plt.bar(index, tuple(data_series[0][1]), bar_width,
alpha=opacity,
color='b',
error_kw=error_config,
label='{}'.format(data_series[0][0]))
plt.bar(index + bar_width, tuple(data_series[1][1]), bar_width,
alpha=opacity,
color='r',
error_kw=error_config,
label='{}'.format(data_series[1][0]))
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.xlabel(x_title, fontsize=10)
plt.ylabel(y_title, fontsize=10)
plt.title(g_title, fontsize=11)
plt.xticks(index + bar_width, tuple(x_labels), fontsize=8)
plt.yticks(fontsize=8)
plt.axis('tight')
lgd = plt.legend(fontsize=8, bbox_to_anchor=(1.15, 0.5))
plt.tight_layout()
plt.draw()
plt.savefig('{}/{}.png'.format(images, file_name),
dpi=100, format='png', bbox_extra_artists=(lgd,),
bbox_inches='tight')
return
pie.py:
# creates a pie chart w/ matplotlib
import os
import matplotlib.pyplot as plt
from pylab import rcParams
app_root = os.path.dirname(os.path.abspath(__file__))
images = os.path.join(app_root, 'static/images')
def make_pie(title, g_data_list, file_name):
rcParams['figure.figsize'] = 5.75, 3
labels = [entry[0] for entry in g_data_list]
sizes = [entry[1] for entry in g_data_list]
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.7, box.height])
patches, texts = ax.pie(sizes, startangle=90)
ax.legend(patches, labels, loc='center left',
bbox_to_anchor=(.9, 0.5), fontsize=8)
plt.axis('equal')
plt.suptitle(g_title, fontsize=12)
plt.draw()
plt.savefig('{}/{}.png'.format(images, file_name), dpi=100, format='png')
return
I noticed that the function that graphed everything, when run separately, would stay running after I closed the plot window. Adding plt.clf() fixed that problem, and appears to be the solution to mine relating to Flask as well.
Had same problem with seaborn
import matplotlib
matplotlib.use('Agg')
helps me.
details: https://matplotlib.org/faq/usage_faq.html#what-is-a-backend