Pyomo: Cannot apply a Set operator to an indexed Set component - pyomo

I'm a new Pyomo user, i try to convert an ampl model to pyomo and i found it's too hard to do this work.
When i create the var x with the set lev witch is indexed by a another set A, i get this error warning:
Cannot apply a Set operator to an indexed Set component (lev).
Thanks in advance.
Ampl model and pyomo code is shown below
Ampl
set T=1..7; #set of epochs
set A ordered; #set of appliances
set L; # set of energy consumption levels
set Lev {A} within L; #set of energy consumption levels of appliance A
var x{a in A, l in Lev[a], t in T}, binary; #1 if appliance a operates at consumption level l at epoch t
Pyomo
model=pyo.AbstractModel()
model.T = pyo.RangeSet(1,48)
model.A=pyo.Set(ordered=True)
model.L=pyo.Set()
model.lev=pyo.Set(model.A,within=model.L)
model.y=pyo.Var(model.A,model.T,domain=pyo.Binary)
model.x=pyo.Var(model.A,model.lev,model.T,domain=pyo.Binary)
error warning
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-74-f8fe2ec9e77d> in <module>()
10
11 model.y=pyo.Var(model.A,model.T,domain=pyo.Binary)
---> 12 model.x=pyo.Var(model.A,model.lev,model.T,domain=pyo.Binary)
C:\Users\HaichengLing\anaconda3\envs\nilmtk-env\lib\site-packages\pyomo\core\base\var.py in __init__(self, *args, **kwd)
522 #
523 kwd.setdefault('ctype', Var)
--> 524 IndexedComponent.__init__(self, *args, **kwd)
525 #
526 # Determine if the domain argument is a functor or other object
C:\Users\HaichengLing\anaconda3\envs\nilmtk-env\lib\site-packages\pyomo\core\base\indexed_component.py in __init__(self, *args, **kwds)
215 # "transferred" to the model).
216 #
--> 217 tmp = [process_setarg(x) for x in args]
218 self._implicit_subsets = tmp
219 self._index = tmp[0].cross(*tmp[1:])
C:\Users\HaichengLing\anaconda3\envs\nilmtk-env\lib\site-packages\pyomo\core\base\indexed_component.py in <listcomp>(.0)
215 # "transferred" to the model).
216 #
--> 217 tmp = [process_setarg(x) for x in args]
218 self._implicit_subsets = tmp
219 self._index = tmp[0].cross(*tmp[1:])
C:\Users\HaichengLing\anaconda3\envs\nilmtk-env\lib\site-packages\pyomo\core\base\set.py in process_setarg(arg)
118 raise TypeError("Cannot apply a Set operator to an "
119 "indexed %s component (%s)"
--> 120 % (arg.ctype.__name__, arg.name,))
121 elif isinstance(arg, Component):
122 raise TypeError("Cannot apply a Set operator to a non-Set "
TypeError: Cannot apply a Set operator to an indexed Set component (lev)

The easiest way to do this is to use an intermediate set:
model.A=pyo.Set(ordered=True)
model.L=pyo.Set()
model.lev=pyo.Set(model.A, within=model.L)
def AL_rule(m):
return [(a,l) for a in m.A for l in m.lev[a]]
model.AL = Set(within=model.A*model.L, initialize=AL_rule)
model.x=pyo.Var(model.AL, model.T, domain=pyo.Binary)

Related

django queryset error when use len(qs) TypeError: argument must be int or float

I dont know what happened with my db but now I can not len my queryset.
I can make a qs with a lot of obj with qs.SignalSma.objects.all()
But somehow I can not use len(qs) on that qs or make a loop with that qs
I am getting in this error if I try to do so.
In [9]: len(qs)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[9], line 1
----> 1 len(qs)
File ~\OneDrive\Desktop\dev-2023\signal\lib\site-packages\django\db\models\query.py:262, in QuerySet.__len__(self)
261 def __len__(self):
--> 262 self._fetch_all()
263 return len(self._result_cache)
File ~\OneDrive\Desktop\dev-2023\signal\lib\site-packages\django\db\models\query.py:1324, in QuerySet._fetch_all(self)
1322 def _fetch_all(self):
1323 if self._result_cache is None:
-> 1324 self._result_cache = list(self._iterable_class(self))
1325 if self._prefetch_related_lookups and not self._prefetch_done:
1326 self._prefetch_related_objects()
File ~\OneDrive\Desktop\dev-2023\signal\lib\site-packages\django\db\models\query.py:68, in ModelIterable.__iter__(self)
59 related_populators = get_related_populators(klass_info, select, db)
60 known_related_objects = [
61 (field, related_objs, operator.attrgetter(*[
62 field.attname
(...)
66 ])) for field, related_objs in queryset._known_related_objects.items()
67 ]
---> 68 for row in compiler.results_iter(results):
69 obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
70 for rel_populator in related_populators:
File ~\OneDrive\Desktop\dev-2023\signal\lib\site-packages\django\db\models\sql\compiler.py:1122, in SQLCompiler.apply_converters(self, rows, converters)
1120 value = row[pos]
1121 for converter in convs:
-> 1122 value = converter(value, expression, connection)
1123 row[pos] = value
1124 yield row
File ~\OneDrive\Desktop\dev-2023\signal\lib\site-packages\django\db\backends\sqlite3\operations.py:313, in DatabaseOperations.get_decimalfield_converter.<locals>.converter(value, expression, connection)
311 def converter(value, expression, connection):
312 if value is not None:
--> 313 return create_decimal(value).quantize(quantize_value, context=expression.output_field.context)
TypeError: argument must be int or float
Any idea what is happening?? and how can I fix this?
QuerySet objects have it's own counting method. Use it:
qs = SignalSma.objects.all()
qs.count() # returns number of objects inside the queryset

AttributeError: 'list' object has no attribute 'lower' in TF IDF modeling

can anyone please help me move forward in my modeling, I have no idea where is that .lower attribute I have called upon and how to fix it.. appreciate any help
HERE IS THE ONLY PART WHERE I APPLIED .LOWER
wordnet_lemmatizer = WordNetLemmatizer()wordnet_lemmatizer = WordNetLemmatizer()
def create_tokens(df2):
df2['low'] = df2['Movie'].str.lower()
df2['stopwords_out'] = df2['low'].apply(lambda x: " ".join([word for word in x.split()if word not in stops]))
df2['tokenized'] = df2.apply(lambda row: nltk.word_tokenize(row['stopwords_out']), axis=1)
df2['eng_only'] = df2['tokenized'].apply(lambda x: [word for word in x if word.isalpha()])
df2['lemmatized'] = df2['eng_only'].apply(lambda x: [wordnet_lemmatizer.lemmatize(word) for word in x])
HERE IS WHEN I HAVE CHANGED MY LEMMATIZED COLUMN TO LIST
a = df2.lemmatized.to_list()
b = (list(itertools.chain.from_iterable(a)))
bow = Counter (b)
HERE IS WHEN I TRY TO CREATE TF IDF AND WHERE THE ERROR APPEARS
cv = CountVectorizer(min_df=0, max_df=1)
tf = cv.fit_transform(df2.lemmatized)
THE ERROR
AttributeError Traceback (most recent call last)
C:\AppData\Local\Temp/ipykernel_24552/1530549768.py in
2
3 cv = CountVectorizer(min_df=0, max_df=1)
----> 4 tf = cv.fit_transform(df2.lemmatized)
5
6 print(df2.lemmatized)
~\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py in fit_transform(self, raw_documents, y)
1200 max_features = self.max_features
1201
-> 1202 vocabulary, X = self.count_vocab(raw_documents,
1203 self.fixed_vocabulary)
1204
~\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py in _count_vocab(self, raw_documents, fixed_vocab)
1112 for doc in raw_documents:
1113 feature_counter = {}
-> 1114 for feature in analyze(doc):
1115 try:
1116 feature_idx = vocabulary[feature]
~\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py in _analyze(doc, analyzer, tokenizer, ngrams, preprocessor, decoder, stop_words)
102 else:
103 if preprocessor is not None:
--> 104 doc = preprocessor(doc)
105 if tokenizer is not None:
106 doc = tokenizer(doc)
~\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py in _preprocess(doc, accent_function, lower)
67 """
68 if lower:
---> 69 doc = doc.lower()
70 if accent_function is not None:
71 doc = accent_function(doc)
AttributeError: 'list' object has no attribute 'lower'
print(df2.lemmatized)

error: look-behind requires fixed-width pattern Spacy

I created a customized Spacy model using https://github.com/explosion/spaCy/blob/master/examples/training/train_ner.py and when I am loading this model. It shows an error - look-behind requires fixed-width pattern. I am confused about how to solve this issue. Please help me. Any help will be appreciated. Thanks in advance.
output_dir = 'NLP_entity/model'
print("Loading from", output_dir)
nlp2 = spacy.load("NLP_entity/model")
test_text = "Remove from account"
#print()
doc2 = nlp1(test_text)
print(test_text)
#print()
if doc2.ents:
for ent in doc2.ents:
print("entity = {}, text = {}".format(ent.label_, ent.text))
else:
print("Entities in None")
Error:
('Loading from', 'NLP_entity/model')
errorTraceback (most recent call last)
<ipython-input-1-94981b2ca322> in <module>()
2 output_dir = 'NLP_entity/model'
3 print("Loading from", output_dir)
----> 4 nlp2 = spacy.load("NLP_entity/model")
5 test_text = "Remove from account".decode("utf-8")
6 #print()
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/__init__.pyc in load(name, **overrides)
25 if depr_path not in (True, False, None):
26 deprecation_warning(Warnings.W001.format(path=depr_path))
---> 27 return util.load_model(name, **overrides)
28
29
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/util.pyc in load_model(name, **overrides)
131 return load_model_from_package(name, **overrides)
132 if Path(name).exists(): # path to model data directory
--> 133 return load_model_from_path(Path(name), **overrides)
134 elif hasattr(name, "exists"): # Path or Path-like to model data
135 return load_model_from_path(name, **overrides)
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/util.pyc in load_model_from_path(model_path, meta, **overrides)
171 component = nlp.create_pipe(name, config=config)
172 nlp.add_pipe(component, name=name)
--> 173 return nlp.from_disk(model_path)
174
175
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/language.pyc in from_disk(self, path, exclude, disable)
784 # Convert to list here in case exclude is (default) tuple
785 exclude = list(exclude) + ["vocab"]
--> 786 util.from_disk(path, deserializers, exclude)
787 self._path = path
788 return self
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/util.pyc in from_disk(path, readers, exclude)
609 # Split to support file names like meta.json
610 if key.split(".")[0] not in exclude:
--> 611 reader(path / key)
612 return path
613
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/site-packages/spacy/language.pyc in <lambda>(p)
774 deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p))
775 deserializers["vocab"] = lambda p: self.vocab.from_disk(p) and _fix_pretrained_vectors_name(self)
--> 776 deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(p, exclude=["vocab"])
777 for name, proc in self.pipeline:
778 if name in exclude:
tokenizer.pyx in spacy.tokenizer.Tokenizer.from_disk()
tokenizer.pyx in spacy.tokenizer.Tokenizer.from_bytes()
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/re.pyc in compile(pattern, flags)
192 def compile(pattern, flags=0):
193 "Compile a regular expression pattern, returning a pattern object."
--> 194 return _compile(pattern, flags)
195
196 def purge():
/home/ubuntu/anaconda3/envs/python2/lib/python2.7/re.pyc in _compile(*key)
249 p = sre_compile.compile(pattern, flags)
250 except error, v:
--> 251 raise error, v # invalid expression
252 if not bypass_cache:
253 if len(_cache) >= _MAXCACHE:
error: look-behind requires fixed-width pattern

Bokeh - Grouped Bar chart

Maintainer note: This question as-is is obsolete, since the bokeh.charts API was deprecated and removed years ago. But see the answer below for how to create grouped bar charts with the stable bokeh.plotting API in newer versions of Bokeh
I want to create a simple bar chart (like the one in the oficial example page)
I tried executing the code in this old answer Plotting Bar Charts with Bokeh
but it show the error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-2-ba53ce344126> in <module>()
11
12 bar = Bar(xyvalues, cat, title="Stacked bars",
---> 13 xlabel="category", ylabel="language")
14
15 output_file("stacked_bar.html")
/usr/local/lib/python2.7/dist-packages/bokeh/charts/builders/bar_builder.pyc in Bar(data, label, values, color, stack, group, agg, xscale, yscale, xgrid, ygrid, continuous_range, **kw)
318 kw['y_range'] = y_range
319
--> 320 chart = create_and_build(BarBuilder, data, **kw)
321
322 # hide x labels if there is a single value, implying stacking only
/usr/local/lib/python2.7/dist-packages/bokeh/charts/builder.pyc in create_and_build(builder_class, *data, **kws)
60 # create the new builder
61 builder_kws = {k: v for k, v in kws.items() if k in builder_props}
---> 62 builder = builder_class(*data, **builder_kws)
63
64 # create a chart to return, since there isn't one already
/usr/local/lib/python2.7/dist-packages/bokeh/charts/builder.pyc in __init__(self, *args, **kws)
280
281 # handle input attrs and ensure attrs have access to data
--> 282 attributes = self._setup_attrs(data, kws)
283
284 # remove inputs handled by dimensions and chart attributes
/usr/local/lib/python2.7/dist-packages/bokeh/charts/builder.pyc in _setup_attrs(self, data, kws)
331 attributes[attr_name].iterable = custom_palette
332
--> 333 attributes[attr_name].setup(data=source, columns=attr)
334
335 else:
/usr/local/lib/python2.7/dist-packages/bokeh/charts/attributes.pyc in setup(self, data, columns)
193
194 if columns is not None and self.data is not None:
--> 195 self.set_columns(columns)
196
197 if self.columns is not None and self.data is not None:
/usr/local/lib/python2.7/dist-packages/bokeh/charts/attributes.pyc in set_columns(self, columns)
185 # assume this is now the iterable at this point
186 self.iterable = columns
--> 187 self._setup_default()
188
189 def setup(self, data=None, columns=None):
/usr/local/lib/python2.7/dist-packages/bokeh/charts/attributes.pyc in _setup_default(self)
142 def _setup_default(self):
143 """Stores the first value of iterable into `default` property."""
--> 144 self.default = next(self._setup_iterable())
145
146 def _setup_iterable(self):
/usr/local/lib/python2.7/dist-packages/bokeh/charts/attributes.pyc in _setup_iterable(self)
320
321 def _setup_iterable(self):
--> 322 return iter(self.items)
323
324 def get_levels(self, columns):
TypeError: 'NoneType' object is not iterable
The oficial example did work
URL: http://docs.bokeh.org/en/0.11.0/docs/user_guide/charts.html#userguide-charts-data-types
from bokeh.charts import Bar, output_file, show
from bokeh.sampledata.autompg import autompg as df
p = Bar(df, label='yr', values='mpg', agg='median', group='origin',
title="Median MPG by YR, grouped by ORIGIN", legend='top_right')
output_file("bar.html")
show(p)
BUT, I don't want to use pandas, I want to use a simple python dictionary like this:
my_simple_dict = {
'Group 1': [22,33,44,55],
'Group 2': [44,66,0,24],
'Group 3': [2,99,33,51]
}
How cant I achive a Bar chart that shows the tree groups (Group 1, Group 2, Group 3) with the x-axis going from 1 to 4?
NOTE: I am working with python 2.7
The question and other answers are obsolete, as bokeh.charts was deprecated and removed several years ago. However. support for grouped and stacked bar charts using the stable bokeh.plotting API has improved greatly since then:
https://docs.bokeh.org/en/latest/docs/user_guide/categorical.html
Here is a full example:
from bokeh.io import show
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.plotting import figure
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
years = ['2015', '2016', '2017']
data = {'fruits' : fruits,
'2015' : [2, 1, 4, 3, 2, 4],
'2016' : [5, 3, 3, 2, 4, 6],
'2017' : [3, 2, 4, 4, 5, 3]}
# this creates [ ("Apples", "2015"), ("Apples", "2016"), ("Apples", "2017"), ("Pears", "2015), ... ]
x = [ (fruit, year) for fruit in fruits for year in years ]
counts = sum(zip(data['2015'], data['2016'], data['2017']), ()) # like an hstack
source = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(x_range=FactorRange(*x), plot_height=250, title="Fruit Counts by Year",
toolbar_location=None, tools="")
p.vbar(x='x', top='counts', width=0.9, source=source)
p.y_range.start = 0
p.x_range.range_padding = 0.1
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
show(p)
For now the solution I found is changing the dict structure
from bokeh.charts import Bar, output_file, show, hplot
import pandas as pd
my_simple_dict = {
'Group 1': [22,33,44,55],
'Group 2': [44,66,0,24],
'Group 3': [2,99,33,51]
}
my_data_transformed_dict = {}
my_data_transformed_dict['x-axis'] = []
my_data_transformed_dict['value'] = []
my_data_transformed_dict['group-name'] = []
for group, group_list in my_simple_dict.iteritems():
x_axis = 0
for item in group_list:
x_axis += 1
my_data_transformed_dict['x-axis'].append(x_axis)
my_data_transformed_dict['value'].append(item)
my_data_transformed_dict['group-name'].append(group)
my_bar = Bar(my_data_transformed_dict, values='value',label='x-axis',group='group-name',legend='top_right')
output_file("grouped_bar.html")
show(my_bar)
If someone knows a better way please tell me

Why I am getting the following AttributeError in Python?

I am using the sklearn's GradientBoostingRegression method. So after fitting it with 2000 estimators, I wanted to add more estimators to it. Since it is taking too long to rerun the entire fitting process, I used the set_params() method. Note that it is a multi-target problem, meaning, I have 3 targets to fit. So I am using the following code to add more estimators.
'''parameters: models (list of length 3 in our case )
train_X, train_y [n_samples x 3], test
n_estimators : previous + 500 (default) [additional estimators]
warm_start : True (default)
'''
def addMoreEstimators(train_X, train_y, test, models, n_estimators = 500, warm_start=True):
params = {'n_estimators':n_estimators, 'warm_start':warm_start}
gbm_pred= pd.DataFrame()
for (i,stars),clf in zip(enumerate(['*','**','***']), models):
clf.set_params(**params)
%time clf.fit(train_X.todense(),train_y[stars])
%time gbm_pred[stars] = clf.predict(test.todense())
gbm_pred = gbm_pred.as_matrix()
gbm_dict ={'model': gbm, 'prediction': gbm_pred}
return gbm_dict
Note: the models parameter is a list of 3 fitted models for the 3 targets.
When I ran it for the first time using 2500 (originally I had 2000 estimators), it ran fine and gave me an output.
When, I am running the same function using 3000 estimators, I am getting an AttributeError (see the traceback of the error below). Here the models contained the 3 fitted models. Below is the traceback of the error: (it's kinda long)
AttributeError Traceback (most recent call last)
<ipython-input-104-9418ada3b36f> in <module>()
7 test = val_X_tfidf[:,shortened_col_index],
8 models = models,
----> 9 n_estimators = 3000)
10
11 reduced_features_gbm_pred_3000_2_lr_1_msp_2 = reduced_features_gbm_model_3000_2_lr_1_msp_2['prediction']
<ipython-input-103-e15a4fb70b50> in addMoreEstimators(train_X, train_y, test, models, n_estimators, warm_start)
15
16 clf.set_params(**params)
---> 17 get_ipython().magic(u'time clf.fit(train_X.todense(),train_y[stars])')
18 print 'starting prediction'
19
//anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc in magic(self, arg_s)
2305 magic_name, _, magic_arg_s = arg_s.partition(' ')
2306 magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
-> 2307 return self.run_line_magic(magic_name, magic_arg_s)
2308
2309 #-------------------------------------------------------------------------
//anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc in run_line_magic(self, magic_name, line)
2226 kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
2227 with self.builtin_trap:
-> 2228 result = fn(*args,**kwargs)
2229 return result
2230
//anaconda/lib/python2.7/site-packages/IPython/core/magics/execution.pyc in time(self, line, cell, local_ns)
//anaconda/lib/python2.7/site-packages/IPython/core/magic.pyc in <lambda>(f, *a, **k)
191 # but it's overkill for just that one bit of state.
192 def magic_deco(arg):
--> 193 call = lambda f, *a, **k: f(*a, **k)
194
195 if callable(arg):
//anaconda/lib/python2.7/site-packages/IPython/core/magics/execution.pyc in time(self, line, cell, local_ns)
1160 if mode=='eval':
1161 st = clock2()
-> 1162 out = eval(code, glob, local_ns)
1163 end = clock2()
1164 else:
<timed eval> in <module>()
//anaconda/lib/python2.7/site-packages/sklearn/ensemble/gradient_boosting.pyc in fit(self, X, y, sample_weight, monitor)
973 self.estimators_.shape[0]))
974 begin_at_stage = self.estimators_.shape[0]
--> 975 y_pred = self._decision_function(X)
976 self._resize_state()
977
//anaconda/lib/python2.7/site-packages/sklearn/ensemble/gradient_boosting.pyc in _decision_function(self, X)
1080 # not doing input validation.
1081 score = self._init_decision_function(X)
-> 1082 predict_stages(self.estimators_, X, self.learning_rate, score)
1083 return score
1084
sklearn/ensemble/_gradient_boosting.pyx in sklearn.ensemble._gradient_boosting.predict_stages (sklearn/ensemble/_gradient_boosting.c:2502)()
AttributeError: 'int' object has no attribute 'tree_'
Sorry for the long traceback, but I think it wouldn't be possible to provide me with meaningful feedback.
Again, why am I getting this feedback ?
Any help would be greatly appreciated.
Thanks
Edit
Below is the code that generates the models that was one of the inputs in the above function.
from sklearn import ensemble
def updated_runGBM(train_X, train_y, test,
n_estimators =100,
max_depth = 1,
min_samples_split=1,
learning_rate=0.01,
loss= 'ls',
warm_start=True):
'''train_X : n_samples x m_features
train_y : n_samples x k_targets (multiple targets allowed)
test : n_samples x m_features
warm_start : True (originally the default is False, but I want to add trees)
'''
params = {'n_estimators': n_estimators, 'max_depth': max_depth, 'min_samples_split': min_samples_split,
'learning_rate': learning_rate, 'loss': loss,'warm_start':warm_start}
gbm1 = ensemble.GradientBoostingRegressor(**params)
gbm2 = ensemble.GradientBoostingRegressor(**params)
gbm3 = ensemble.GradientBoostingRegressor(**params)
gbm = [gbm1,gbm2,gbm3]
gbm_pred= pd.DataFrame()
for (i,stars),clf in zip(enumerate(['*','**','***']), gbm):
%time clf.fit(train_X.todense(),train_y[stars])
%time gbm_pred[stars] = clf.predict(test.todense())
gbm_pred = gbm_pred.as_matrix()
gbm_pred = np.clip(gbm_pred,0,np.inf)
gbm_dict ={'model': gbm, 'prediction': gbm_pred}
return gbm_dict
NOTE In the code above, I have removed some of the print statements to reduce clutter.
These are the two functions I am using, nothing else (apart from the code to split up the data).