Related
I am trying to use tf.data pipeline to get finer control over loading image data but I receive the following error which I think is because of usage of list comprehension. My Code looks like this:
def load_files(data_dir: str, val_split=0.2):
assert len(os.listdir(os.path.join(data_dir + 'images/'))) == \
len(os.listdir(os.path.join(data_dir, 'ground_truth/'))), print("No. of image files != No. of gt files")
image_count = len(os.listdir(os.path.join(data_dir + 'images/')))
files = os.listdir(os.path.join(data_dir + 'images/'))
image_files = [os.path.join(data_dir + 'images/', file) for file in files]
image_files = np.array(image_files)
ds = tf.data.Dataset.from_tensor_slices(files)
ds = ds.map(process_data)
# train_ds = ds.skip(int(val_split * image_count))
# val_ds = ds.take(int(val_split * image_count))
return ds
def process_data(file_path):
image, = tf.io.read_file(file_path)
image = tf.io.decode_jpeg(image, channels=3)
label = tf.strings.split(file_path)
label = tf.io.decode_png(label, channels=0, dtype=tf.uint8)
return image, label
some_dir = "../../../TuSimple_lane_detection/"
img_dir = some_dir + "images/"
mask_dir = some_dir + "ground_truth/"
data_train = load_files(some_dir)
for f in data_train.take(5):
print(f.numpy())
The error looks like this:
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\eager\function.py", line 3210, in _get_concrete_function_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\eager\function.py", line 3557, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\eager\function.py", line 3392, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\framework\func_graph.py", line 1143, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py", line 4510, in wrapped_fn
ret = wrapper_helper(*args)
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py", line 4440, in wrapper_helper
ret = autograph.tf_convert(self._func, ag_ctx)(*nested_args)
File "E:\Datasets\KITTI_3D_Object_detection\venv\PycharmProjects\lib\site-packages\tensorflow\python\autograph\impl\api.py", line 699, in wrapper
raise e.ag_error_metadata.to_exception(e)
tensorflow.python.framework.errors_impl.OperatorNotAllowedInGraphError: in user code:
File "E:/Datasets/KITTI_3D_Object_detection/KITTI_2D/EndToEndLaneDetection/Dataloader.py", line 21, in process_data *
image, = tf.io.read_file(file_path)
OperatorNotAllowedInGraphError: iterating over `tf.Tensor` is not allowed in Graph execution. Use Eager execution or decorate this function with #tf.function.
I did not have the problem when I was working with TF1.12 before, when I shifted o TF 2.3 I am encountering this error.
It's my first post, I hope it will be well done.
I'm trying to run the following ZipLine Algo with local AAPL data :
import pandas as pd
from collections import OrderedDict
import pytz
from zipline.api import order, symbol, record, order_target
from zipline.algorithm import TradingAlgorithm
data = OrderedDict()
data['AAPL'] = pd.read_csv('AAPL.csv', index_col=0, parse_dates=['Date'])
panel = pd.Panel(data)
panel.minor_axis = ['Open', 'High', 'Low', 'Close', 'Volume', 'Price']
panel.major_axis = panel.major_axis.tz_localize(pytz.utc)
print panel["AAPL"]
def initialize(context):
context.security = symbol('AAPL')
def handle_data(context, data):
MA1 = data[context.security].mavg(50)
MA2 = data[context.security].mavg(100)
date = str(data[context.security].datetime)[:10]
current_price = data[context.security].price
current_positions = context.portfolio.positions[symbol('AAPL')].amount
cash = context.portfolio.cash
value = context.portfolio.portfolio_value
current_pnl = context.portfolio.pnl
# code (this will come under handle_data function only)
if (MA1 > MA2) and current_positions == 0:
number_of_shares = int(cash / current_price)
order(context.security, number_of_shares)
record(date=date, MA1=MA1, MA2=MA2, Price=
current_price, status="buy", shares=number_of_shares, PnL=current_pnl, cash=cash, value=value)
elif (MA1 < MA2) and current_positions != 0:
order_target(context.security, 0)
record(date=date, MA1=MA1, MA2=MA2, Price=current_price, status="sell", shares="--", PnL=current_pnl, cash=cash,
value=value)
else:
record(date=date, MA1=MA1, MA2=MA2, Price=current_price, status="--", shares="--", PnL=current_pnl, cash=cash,
value=value)
#initializing trading enviroment
algo_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
#run algo
perf_manual = algo_obj.run(panel)
#code
#calculation
print "total pnl : " + str(float(perf_manual[["PnL"]].iloc[-1]))
buy_trade = perf_manual[["status"]].loc[perf_manual["status"] == "buy"].count()
sell_trade = perf_manual[["status"]].loc[perf_manual["status"] == "sell"].count()
total_trade = buy_trade + sell_trade
print "buy trade : " + str(int(buy_trade)) + " sell trade : " + str(int(sell_trade)) + " total trade : " + str(int(total_trade))
I was inspired by https://www.quantinsti.com/blog/introduction-zipline-python/ and https://www.quantinsti.com/blog/importing-csv-data-zipline-backtesting/.
I get this error :
Traceback (most recent call last):
File "C:/Users/main/Desktop/docs/ALGO_TRADING/_DATAS/_zipline_data_bundle /temp.py", line 51, in <module>
algo_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
File "C:\Python27-32\lib\site-packages\zipline\algorithm.py", line 273, in __init__
self.trading_environment = TradingEnvironment()
File "C:\Python27-32\lib\site-packages\zipline\finance\trading.py", line 99, in __init__
self.bm_symbol,
File "C:\Python27-32\lib\site-packages\zipline\data\loader.py", line 166, in load_market_data
environ,
File "C:\Python27-32\lib\site-packages\zipline\data\loader.py", line 230, in ensure_benchmark_data
last_date,
File "C:\Python27-32\lib\site-packages\zipline\data\benchmarks.py", line 50, in get_benchmark_returns
last_date
File "C:\Python27-32\lib\site-packages\pandas_datareader\data.py", line 137, in DataReader
session=session).read()
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 181, in read
params=self._get_params(self.symbols))
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 79, in _read_one_data
out = self._read_url_as_StringIO(url, params=params)
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 90, in _read_url_as_StringIO
response = self._get_response(url, params=params)
File "C:\Python27-32\lib\site-packages\pandas_datareader\base.py", line 139, in _get_response
raise RemoteDataError('Unable to read URL: {0}'.format(url))
pandas_datareader._utils.RemoteDataError: Unable to read URL: http://www.google.com/finance/historical?q=SPY&startdate=Dec+29%2C+1989&enddate=Dec+20%2C+2017&output=csv
I don't understand : "http://www.google.com/finance/historical?q=SPY&startdate=Dec+29%2C+1989&enddate=Dec+20%2C+2017&output=csv".
I don't ask for online data request... and not 'SPY' stock but 'APPL'...
What does this error mean to you ?
Thanks a lot for your help !
C.
Only reference and workaround I found regarding this issue is here:
from pandas_datareader.google.daily import GoogleDailyReader
#property
def url(self):
return 'http://finance.google.com/finance/historical'
GoogleDailyReader.url = url
do:
pip install fix_yahoo_finance
then modify the file: zipline/lib/pythonx.x/site-packages/zipline/data/benchmarks.py
add the following two statements to the file:
import fix_yahoo_finance as yf
yf.pdr_override ()
then change following instruction:
data = pd_reader.DataReader (symbol, 'Google' first_date, last_date)
to:
data = pd_reader.get_data_yahoo(symbol,first_date, last_date)
Unhandled exception occurs while passing function inside models.py in Django (installing by pip install numpy django requests)
I am trying to manipulate face detection api on web using OpenCV, Numpy and Redis.
But i got an exception on passing the fuctions inside the models.py
I am just a newbee on Django framework and Python languages.
Here is my error while running my Django server !
Unhandled exception in thread started by
Traceback (most recent call last):
File "/home/xyz/.xyz/lib/python2.7/site-packages/django/utils/autoreload.py", line 227, in wrapper
fn(*args, **kwargs)
File "/home/xyz/.xyz/lib/python2.7/site-packages/django/core/management/commands/runserver.py", line 117, in inner_run autoreload.raise_last_exception()
File "/home/xyz/.xyz/lib/python2.7/site-packages/django/utils/autoreload.py", line 250, in raise_last_exception
six.reraise(*_exception)
File "/home/xyz/.xyz/lib/python2.7/site-packages/django/utils/autoreload.py", line 227, in wrapper
fn(*args, **kwargs)
File "/home/xyz/.xyz/lib/python2.7/site-packages/django/init.py", line 27, in setup
apps.populate(settings.INSTALLED_APPS)
File "/home/xyz/.xyz/lib/python2.7/site-packages/django/apps/registry.py", line 108, in populate
app_config.import_models()
File "/home/xyz/.xyz/lib/python2.7/site-packages/django/apps/config.py", line 202, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python2.7/importlib/init.py", line 37, in import_module import(name)
File "/home/xyz/PycharmProjects/xyz_project/OpencvWeb/opencv_api/Face_detection/models.py", line 28, in
class LFace:
File "/home/xyz/PycharmProjects/xyz_project/OpencvWeb/opencv_api/Face_detection/models.py", line 29, in LFace
def init(self, faceRect = LRect(), eyes=[], id = 0, mt = 0, vel = LVector(0,0)):
NameError: name 'LRect' is not defined
And Here is my code in models.py
This is the class for LFace
class LFace:
def __init__(self, faceRect = LRect(), eyes=[], id = 0, mt = 0, vel = LVector(0,0)):
self.face = faceRect.clone()
self.eyes = [e.clone() for e in eyes]
self.id = id
self.name = ''
self.missingTicks = mt
self.velocity = vel
def clone(self):
return LFace(self.face, self.eyes, self.id)
def setAs(self, f2):
self.face = f2.face.clone()
self.eyes = [e.clone() for e in f2.eyes]
self.id = f2.id
self.missintTicks = f2.missingTicks
self.velocity = f2.velocity.clone()
def dist(self, f2, scalar1 = 1.0, scalar2 = 0.2):
#distance is defined as: center differences * scalar1 + size differences * scalar2
d1 = self.face.center().dist(f2.face.center())
d2 = self.face.wh().dist(f2.face.wh())
return d1 * scalar1 + d2 * scalar2
def interpolateTo(self, f2, t):
self.face.interpolateTo(f2.face, t)
self.eyes = f2.clone().eyes if t > 0.5 else []
def getPredicted(self):
rv = self.clone()
p = rv.face.center()
oldP = p.clone()
p += self.velocity
rv.velocity = LVector(0,0)
rv.face.setFromCenter(p)
return rv
This is the class for LRect
class LRect:
def __init__(self, x=0, y=0, w=0, h=0):
self.x = x
self.y = y
self.w = w
self.h = h
def pos(self):
return LVector(self.x, self.y)
def wh(self):
return LVector(self.w, self.h)
def hwh(self):
return LVector(self.w, self.h) * 0.5
def center(self):
return (self.pos() + self.hwh())
def toString(self):
return '[x={0},y={1},width={2},height={3}]'.format(*self.toList())
def __str__(self):
return self.toString()
def toList(self):
return [self.x, self.y, self.w, self.h]
def clone(self):
return LRect(self.x, self.y, self.w, self.h)
def setFromCenter(self, centerPoint):
hp = self.hwh()
self.x = centerPoint[0] - hp[0]
self.y = centerPoint[1] - hp[1]
def scaleWH(self, scalar):
# scalar can be either an LVector or a number
c = self.center()
hwh = self.wh() * scalar
self.w, self.h = hwh[0], hwh[1]
self.setFromCenter(c)
def interpolateTo(self, r2, t):
c = self.center()
wh = self.wh()
c.interpolateTo(r2.center(), t)
wh.interpolateTo(r2.wh(), t)
self.w, self.h = wh[0], wh[1]
self.setFromCenter(c)
Please Help !!
thisfile.py
import cPickle
import gzip
import os
import numpy
import theano
import theano.tensor as T
def load_data(dataset):
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
def shared_dataset(data_xy, borrow=True):
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
class PCA(object):
def __init__(self):
self.param = 0
def dimemsion_transform(self, X):
m_mean = T.mean(X, axis=0)
X = X - m_mean ##################### this line makes error
return X
if __name__ == '__main__':
dataset = 'mnist.pkl.gz'
# load the MNIST data
data = load_data(dataset)
X = T.matrix('X')
m_pca = PCA()
transform = theano.function(
inputs=[],
outputs=m_pca.dimemsion_transform(X),
givens={
X: data
}
)
error showing like below
Traceback (most recent call last):
File ".../thisfile.py", line 101, in <module>
X: data
File ".../Theano/theano/compile/function.py", line 322, in function
output_keys=output_keys)
File ".../Theano/theano/compile/pfunc.py", line 443, in pfunc
no_default_updates=no_default_updates)
File ".../Theano/theano/compile/pfunc.py", line 219, in rebuild_collect_shared
cloned_v = clone_v_get_shared_updates(v, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 93, in clone_v_get_shared_updates
clone_v_get_shared_updates(i, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 93, in clone_v_get_shared_updates
clone_v_get_shared_updates(i, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 93, in clone_v_get_shared_updates
clone_v_get_shared_updates(i, copy_inputs_over)
File ".../Theano/theano/compile/pfunc.py", line 96, in clone_v_get_shared_updates
[clone_d[i] for i in owner.inputs], strict=rebuild_strict)
File ".../Theano/theano/gof/graph.py", line 242, in clone_with_new_inputs
new_inputs[i] = curr.type.filter_variable(new)
File ".../Theano/theano/tensor/type.py", line 234, in filter_variable
self=self))
TypeError: Cannot convert Type Generic (of Variable <Generic>) into Type TensorType(float64, matrix). You can try to manually convert <Generic> into a TensorType(float64, matrix).
I am making PCA function with theano but have a problem.
mean value is subtracted from MNIST data in dimension_transform in PCA class
I do not get why it gives type matching error and how do I fix it
Your problem comes from these lines:
data = load_data(dataset)
Here data is a list (as this is what load_data() returns).
transform = theano.function(
inputs=[],
outputs=m_pca.dimemsion_transform(X),
givens={
X: data
}
)
And here you pass it as a value. You have to extract the item you want from the return value of load_data() like so:
[(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)] = load_data(dataset)
and then use
givens={
X: train_set_x
}
or one of the other values.
My code can run,but when I debug,it can enter the Subroutine.And the error is:"decoding Unicode is not supported".
I use anaconda.When I open the untitled0.py,the encoding is UTF-8 at the bottom of the screen,but when I open the fhmm_exact.py,the encoding is UTF-8-GUESSED.
Traceback (most recent call last):
File "<ipython-input-1-f6910c2dfa77>", line 1, in <module>
debugfile('/home/wenwu/untitled0.py', wdir='/home/wenwu')
File "/home/wenwu/anaconda/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 702, in debugfile
debugger.run("runfile(%r, args=%r, wdir=%r)" % (filename, args, wdir))
File "/home/wenwu/anaconda/lib/python2.7/bdb.py", line 400, in run
exec cmd in globals, locals
File "<string>", line 1, in <module>
File "/home/wenwu/anaconda/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 682, in runfile
execfile(filename, namespace)
File "/home/wenwu/anaconda/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 78, in execfile
builtins.execfile(filename, *where)
File "/home/wenwu/untitled0.py", line 37, in <module>
fhmm.disaggregate(test_elec.mains(),output,sample_period = 60)
File "/home/wenwu/nilmtk/nilmtk/disaggregate/fhmm_exact.py", line 287, in disaggregate
mains_data_location = '{}/elec/meter1'.format(building_path)
File "/home/wenwu/nilmtk/nilmtk/disaggregate/fhmm_exact.py", line 287, in disaggregate
mains_data_location = '{}/elec/meter1'.format(building_path)
File "/home/wenwu/anaconda/lib/python2.7/bdb.py", line 49, in trace_dispatch
return self.dispatch_line(frame)
File "/home/wenwu/anaconda/lib/python2.7/bdb.py", line 67, in dispatch_line
self.user_line(frame)
File "/home/wenwu/anaconda/lib/python2.7/pdb.py", line 158, in user_line
self.interaction(frame, None)
File "/home/wenwu/anaconda/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 488, in interaction
self.notify_spyder(frame) #-----Spyder-specific-------------------------
File "/home/wenwu/anaconda/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 432, in notify_spyder
fname = unicode(fname, "utf-8")
TypeError: decoding Unicode is not supported
The following is code.
untitled0.py*
from matplotlib import rcParams
import matplotlib.pyplot as plt
rcParams['figure.figsize'] = (13,6)
plt.style.use('ggplot')
from nilmtk import DataSet,TimeFrame,MeterGroup,HDFDataStore
train = DataSet('/home/wenwu/redd.h5')
test = DataSet('/home/wenwu/redd.h5')
building = 1
train.set_window(end = '30-4-2011')
test.set_window(start = '30-4-2011')
train_elec = train.buildings[1].elec
test_elec = test.buildings[1].elec
fridge_meter = train_elec['fridge']
fridge_df = fridge_meter.load().next()
fridge_df.head()
mains = train_elec.mains()
mains_df = mains.load().next()
top_5_train_elec = train_elec.submeters().select_top_k(k = 5)
from nilmtk.disaggregate import fhmm_exact
from nilmtk.metrics import f1_score
fhmm = fhmm_exact.FHMM()
fhmm.train(top_5_train_elec,sample_period = 60)
disag_filename = '/home/wenwu/redd-disag-fhmm.h5'
output = HDFDataStore(disag_filename,'w')
fhmm.disaggregate(test_elec.mains(),output,sample_period = 60)
output.close()
disag_fhmm = DataSet(disag_filename)
disag_fhmm_elec = disag_fhmm.buildings[building].elec
f1_fhmm = f1_score(disag_fhmm_elec,test_elec)
f1_fhmm.plot(kind = 'barh')
disaggreate part of fhmm_exact.py
def disaggregate(self, mains, output_datastore, **load_kwargs):
'''Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
output_name : string, optional
The `name` to use in the metadata for the `output_datastore`.
e.g. some sort of name for this experiment. Defaults to
"NILMTK_FHMM_<date>"
resample_seconds : number, optional
The desired sample period in seconds.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
'''
import warnings
warnings.filterwarnings("ignore", category=Warning)
MIN_CHUNK_LENGTH = 100
if not self.model:
raise RuntimeError(
"The model needs to be instantiated before"
" calling `disaggregate`. For example, the"
" model can be instantiated by running `train`.")
# Extract optional parameters from load_kwargs
date_now = datetime.now().isoformat().split('.')[0]
output_name = load_kwargs.pop('output_name', 'NILMTK_FHMM_' + date_now)
resample_seconds = load_kwargs.pop('resample_seconds', 60)
resample_rule = '{:d}S'.format(resample_seconds)
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = '{}/elec/meter1'.format(building_path)
data_is_available = False
for chunk in mains.power_series(**load_kwargs):
# Check that chunk is sensible size before resampling
if len(chunk) < MIN_CHUNK_LENGTH:
continue
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
chunk = chunk.resample(rule=resample_rule)
# Check chunk size *again* after resampling
if len(chunk) < MIN_CHUNK_LENGTH:
continue
# Start disaggregation
predictions = self.disaggregate_chunk(chunk)
for meter in predictions.columns:
data_is_available = True
meter_instance = meter.instance()
cols = pd.MultiIndex.from_tuples([chunk.name])
predicted_power = predictions[[meter]]
output_df = pd.DataFrame(predicted_power)
output_df.columns = pd.MultiIndex.from_tuples([chunk.name])
output_datastore.append('{}/elec/meter{}'
.format(building_path, meter_instance),
output_df)
# Copy mains data to disag output
output_datastore.append(key=mains_data_location,
value=pd.DataFrame(chunk, columns=cols))
if not data_is_available:
return
##################################
# Add metadata to output_datastore
# TODO: `preprocessing_applied` for all meters
# TODO: split this metadata code into a separate function
# TODO: submeter measurement should probably be the mains
# measurement we used to train on, not the mains measurement.
# DataSet and MeterDevice metadata:
meter_devices = {
'FHMM': {
'model': 'FHMM',
'sample_period': resample_seconds,
'max_sample_period': resample_seconds,
'measurements': [{
'physical_quantity': measurement[0],
'type': measurement[1]
}]
},
'mains': {
'model': 'mains',
'sample_period': resample_seconds,
'max_sample_period': resample_seconds,
'measurements': [{
'physical_quantity': measurement[0],
'type': measurement[1]
}]
}
}
merged_timeframes = merge_timeframes(timeframes, gap=resample_seconds)
total_timeframe = TimeFrame(merged_timeframes[0].start,
merged_timeframes[-1].end)
dataset_metadata = {'name': output_name, 'date': date_now,
'meter_devices': meter_devices,
'timeframe': total_timeframe.to_dict()}
output_datastore.save_metadata('/', dataset_metadata)
# Building metadata
# Mains meter:
elec_meters = {
1: {
'device_model': 'mains',
'site_meter': True,
'data_location': mains_data_location,
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': total_timeframe.to_dict()
}
}
}
# TODO: FIX THIS! Ugly hack for now
# Appliances and submeters:
appliances = []
for i, meter in enumerate(self.meters):
meter_instance = meter.instance()
for app in meter.appliances:
appliance = {
'meters': [meter_instance],
'type': app.identifier.type,
'instance': app.identifier.instance
# TODO this `instance` will only be correct when the
# model is trained on the same house as it is tested on.
# https://github.com/nilmtk/nilmtk/issues/194
}
appliances.append(appliance)
elec_meters.update({
meter_instance: {
'device_model': 'FHMM',
'submeter_of': 1,
'data_location': ('{}/elec/meter{}'
.format(building_path, meter_instance)),
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': total_timeframe.to_dict()
}
}
})
# Setting the name if it exists
if meter.name:
if len(meter.name) > 0:
elec_meters[meter_instance]['name'] = meter.name
building_metadata = {
'instance': mains.building(),
'elec_meters': elec_meters,
'appliances': appliances
}
output_datastore.save_metadata(building_path, building_metadata)