AttributeError: Can't pickle local object 'train.<locals>.create_model' - amazon-web-services

I am trying to use my own ML models for creating trainings job in aws Sagemaker. When I start training process everything goes well but at the end it says that "AttributeError: Can't pickle local object 'train..create_model'". I am new into this job. I did the same things for mlp, knn, cart, and svr but never encountered with that issue. I know that lstm uses too much different things to create model but I can not figure out how to solve that issue.
Here is my train.py file where I get the error:
from __future__ import print_function
import json
import os
import pickle
import sys
import traceback
import pandas as pd
import numpy as np
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
# These are the paths to where SageMaker mounts interesting things in your container.
prefix = "/opt/ml/"
input_path = prefix + "input/data"
output_path = os.path.join(prefix, "output")
model_path = os.path.join(prefix, "model")
# This algorithm has a single channel of input data called 'training'. Since we run in
# File mode, the input files are copied to the directory specified here.
channel_name = "training"
training_path = os.path.join(input_path, channel_name)
# The function to execute training.
def train():
print("Starting the training")
print(training_path)
try:
# Take the set of files and read them all into a single pandas dataframe
input_files = [ os.path.join(training_path, file) for file in os.listdir(training_path) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(training_path, channel_name))
raw_data = [ pd.read_csv(file, header=0, index_col=0) for file in input_files ]
data = pd.concat(raw_data)
print(data)
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
values = data.values
# ensure all data is float
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler()
scaled = scaler.fit_transform(values)
# specify the number of lag time steps
n_timesteps = 3
n_features = 4
# frame as supervised learning
reframed = series_to_supervised(scaled, n_timesteps, 1)
print(reframed.shape)
# drop columns we don't want to predict
reframed.drop(reframed.columns[[4,9,14,15,16,17,18]], axis=1, inplace=True)
print(reframed.head())
# split into train and test sets
values = reframed.values
n_train_size = 403
train = values[:n_train_size, :]
test = values[n_train_size:, :]
# split into input and outputs
n_obs = n_timesteps * n_features
train_X, train_y = train[:, :n_obs], train[:, -1]
test_X, test_y = test[:, :n_obs], test[:, -1]
print(train_X.shape, len(train_X), train_y.shape)
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], n_timesteps, n_features))
test_X = test_X.reshape((test_X.shape[0], n_timesteps, n_features))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# Function to create model
def create_model():
# create model
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
# Compile model
# optimizer = SGD(lr=learn_rate, momentum=momentum)
model.compile(loss='mae',optimizer='adam')
return model
from scikeras.wrappers import KerasRegressor
# create model
model = KerasRegressor(model=create_model, verbose=0)
from sklearn.model_selection import GridSearchCV
# define the grid search parameters
batch_size = [2,4,8,16,32]
epochs = [10, 50, 100]
#learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3]
#momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
grid_result = grid.fit(train_X, train_y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# save the model
with open(os.path.join(model_path, "snop-lstm.pkl"), "wb") as out:
pickle.dump(grid, out)
print("Training complete.")
except Exception as e:
# Write out an error file. This will be returned as the failureReason in the
# DescribeTrainingJob result.
trc = traceback.format_exc()
with open(os.path.join(output_path, "failure"), "w") as s:
s.write("Exception during training: " + str(e) + "\n" + trc)
# Printing this causes the exception to be in the training job logs, as well.
print("Exception during training: " + str(e) + "\n" + trc, file=sys.stderr)
# A non-zero exit code causes the training job to be marked as Failed.
sys.exit(255)
if __name__ == "__main__":
train()
# A zero exit code causes the job to be marked a Succeeded.
sys.exit(0)
And this is the log:
2022-02-25T10:28:16.751+03:00
Exception during training: Can't pickle local object 'train.<locals>.create_model'
Exception during training: Can't pickle local object 'train.<locals>.create_model'
2022-02-25T10:28:16.751+03:00
Traceback (most recent call last):
File "/opt/program/train", line 154, in train
pickle.dump(grid, out)
Traceback (most recent call last): File "/opt/program/train", line 154, in train pickle.dump(grid, out)
2022-02-25T10:28:16.751+03:00
AttributeError: Can't pickle local object 'train.<locals>.create_model'
AttributeError: Can't pickle local object 'train.<locals>.create_model'

It seems that you are trying to pickle an object of class GridSearchCV instead of the model itself:
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
...
...
pickle.dump(grid, out)
I think what you want instead is to retrieve the best model (via best_model_, see here: https://github.com/scikit-learn/scikit-learn/blob/37ac6788c/sklearn/model_selection/_search.py#L1247) and then pickle that model

Related

Getting a 'ValueError: 2 many values to unpack' from a method that lists only one required arg. I'm not sure of the best way to unpack it

I am calling a method from a predefined Class (L2Interface) from the acitoolkit module that lists only one required argument. The method returns two strings 'encap-type' and 'encap-id'. I am floundering with the best way to unpack these values. Here is my script. The method in question is: 'vlans = aci.L2Interface.parse_encap(encap)'
import sys
import acitoolkit.acitoolkit as aci
import requests
import re
def init(self, name, encap_type, encap_id, encap_mode=None):
self.name = None
self.encap_type = VLAN
self.encap_id = None
def main():
"""
Main Show Endpoints Routine
:return: None
"""
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = ('Simple application that logs on to the APIC'
' and displays all of the Endpoints.')
creds = aci.Credentials('apic', description)
args = creds.get()
# Login to APIC
session = aci.Session(args.url, args.login, args.password, verify_ssl=False)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
sys.exit(0)
# Get encap per interface
# and store the data as tuples in a List
data = []
encap = 'vlan-[0-9].*'
#vxtype = 'vxlan\-[0-9|a-z].*'
vlans = aci.L2Interface.parse_encap(encap)
for vlan in vlans:
data.append((vlan.attributes['encap_type'],
vlan.attributes['encap_id']))
# Display the data downloaded
col_widths = [19, 17, 15, 15, 15]
template = ''
for idx, width in enumerate(col_widths):
template += '{%s:%s} ' % (idx, width)
print(template.format("ENDCAP_TYPE", "ENCAP_ID"))
fmt_string = []
for i in range(0, len(col_widths)):
fmt_string.append('-' * (col_widths[i] - 2))
print(template.format(*fmt_string))
for rec in data:
print(template.format(*rec))
if name == 'main':
try:
main()
except KeyboardInterrupt:
pass
I am trying to connect to an APIC, grab L2 interfaces with encapsulation (encap) assigned and return them in a list.

how to deploy the custom model in amazon sageMaker

I am newbie to AWS sagemaker, I am trying to deploy the time series custom lstm model in sagemaker , please help me out and how to perpare the script mode.
this my script file timer_series.py code.
import sagemaker
import boto3
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from sklearn.metrics import mean_squared_error
if __name__ =='__main__':
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=72)
parser.add_argument('--n_train_hours', type=int, default=24*365*2)
parser.add_argument('--n_validation_hours', type=int, default=24*365*4)
# input data and model directories
parser.add_argument('--model_dir', type=str)
args, _ = parser.parse_known_args()
train_dataset_dir = os.environ.get('SM_INPUT_DIR') + '/data/training/'
output_model_dir = os.environ.get('SM_MODEL_DIR')
output_object_dir = os.environ.get('SM_OUTPUT_DATA_DIR')
epochs = args.epochs
batch_size = args.batch_size
input_data = {args.input_data}
dataset = read_csv( train_dataset_dir + 'dataset.csv', header=0, index_col='Date')
dataset.sort_index(inplace=True)
train = dataset.iloc[:109]
test= dataset.iloc[109:]
scaler = MinMaxScaler()
scaled_train = scaler.fit_transform(train)
scaled_test=scaler.fit_transform(test)
n_input = 12
n_feature = 1
train_generator = TimeseriesGenerator(scaled_train,scaled_train,length=n_input, batch_size=1)
model = Sequential()
model.add(LSTM(128,activation = 'relu', input_shape= (n_input, n_feature), return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=False))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
history =model.fit_generator(train_generator,epochs=50, batch_size=1,verbose=1)
# Get a SageMaker-compatible role used by this Notebook Instance.
role = get_execution_role()
with open(output_model_dir + '/history.json', 'w') as f:
json.dump(history.history, f)
#Save the Scaler
dump(scaler, output_model_dir + '/scaler.model', protocol=2)
#Save the trained model and weights
model_json = model.to_json()
with open(output_model_dir + "/model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights(output_model_dir + "/model.h5")
here it showing some error:
train_instance_type = "ml.m4.xlarg"
tf_estimator = TensorFlow(entry_point='time_series.py', role=get_execution_role(),
train_instance_count=1, train_instance_type=train_instance_type,
framework_version='1.12', py_version='py3', script_mode=True,
output_path = 's3://' + s3Bucket, base_job_name = "sales-forecasting-lstm",
hyperparameters={'batch_size': 2,
'epochs': 50})
tf_estimator.fit(uploaded_data_path)
Here I got the error. what this error , I didn't understand this error.
UnexpectedStatusException: Error for Training job sales-forecasting-lstm-2020-04-13-10-17-34-919: Failed. Reason: AlgorithmError: ExecuteUserScriptError:
Command "/usr/bin/python time_series.py --batch_size 2 --epochs 50 --model_dir s3://sagemaker12/sales-forecasting-lstm-2020-04-13-10-17-34-919/model"
​
Hi, I am newbie to AWS sagemaker, I am trying to deploy the time series custom lstm model in sagemaker , please help me out and how to perpare the script mode , python script for deployment.
this my script file timer_series.py code.
import sagemaker
import boto3
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from sklearn.metrics import mean_squared_error
if __name__ =='__main__':
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=72)
parser.add_argument('--n_train_hours', type=int, default=24*365*2)
parser.add_argument('--n_validation_hours', type=int, default=24*365*4)
# input data and model directories
parser.add_argument('--model_dir', type=str)
args, _ = parser.parse_known_args()
train_dataset_dir = os.environ.get('SM_INPUT_DIR') + '/data/training/'
output_model_dir = os.environ.get('SM_MODEL_DIR')
output_object_dir = os.environ.get('SM_OUTPUT_DATA_DIR')
epochs = args.epochs
batch_size = args.batch_size
input_data = {args.input_data}
dataset = read_csv( input_data + 'dataset.csv', header=0, index_col='Date')
dataset.sort_index(inplace=True)
train = dataset.iloc[:109]
test= dataset.iloc[109:]
scaler = MinMaxScaler()
scaled_train = scaler.fit_transform(train)
scaled_test=scaler.fit_transform(test)
n_input = 12
n_feature = 1
train_generator = TimeseriesGenerator(scaled_train,scaled_train,length=n_input, batch_size=1)
model = Sequential()
model.add(LSTM(128,activation = 'relu', input_shape= (n_input, n_feature), return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=False))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
history =model.fit_generator(train_generator,epochs=50, batch_size=1,verbose=1)
# Get a SageMaker-compatible role used by this Notebook Instance.
role = get_execution_role()
with open(output_model_dir + '/history.json', 'w') as f:
json.dump(history.history, f)
#Save the Scaler
dump(scaler, output_model_dir + '/scaler.model', protocol=2)
#Save the trained model and weights
model_json = model.to_json()
with open(output_model_dir + "/model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights(output_model_dir + "/model.h5")
here it showing some error:
train_instance_type = "ml.m4.xlarg"
tf_estimator = TensorFlow(entry_point='time_series.py', role=get_execution_role(),
train_instance_count=1, train_instance_type=train_instance_type,
framework_version='1.12', py_version='py3', script_mode=True,
output_path = 's3://' + s3Bucket, base_job_name = "sales-forecasting-lstm",
hyperparameters={'batch_size': 2,
'epochs': 50})
tf_estimator.fit(uploaded_data_path)
Here I got the error. what this error , I didn't understand this error.
UnexpectedStatusException: Error for Training job sales-forecasting-lstm-2020-04-13-10-17-34-919: Failed. Reason: AlgorithmError: ExecuteUserScriptError:
Command "/usr/bin/python time_series.py --batch_size 2 --epochs 50 --model_dir s3://sagemaker12/sales-forecasting-lstm-2020-04-13-10-17-34-919/model"
​
I would recommend you change timer_series.py cahnnels to your s3 paths, you do not have to add the plus sign on your train_dataset_dir and add sagemaker specific arguments as:
parser.add_argument('--output-data-dir', type=str, default='s3://bucket_name/folder_name/output')
Line above for directions on where your output data should be stored. However that is something you have to specify when calling the batch transform function. Otherwise would store it in default bucket.
Second in order to debug , and being able to help you, you would have to take a look at CloudWatch for that specific training job to better understand what is failing on your script: time_series.py. I suppose is a problem with the specifications and reading of your training data.

'no SavedModel bundles found!' on tensorflow_hub model deployment to AWS SageMaker

I attempting to deploy the universal-sentence-encoder model to a aws Sagemaker endpoint and am getting the error raise ValueError('no SavedModel bundles found!')
I have shown my code below, I have a feeling that one of my paths is incorrect
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
from sagemaker import get_execution_role
from sagemaker.tensorflow.serving import Model
def tfhub_to_savedmodel(model_name,uri):
tfhub_uri = uri
model_path = 'encoder_model/' + model_name
with tf.Session(graph=tf.Graph()) as sess:
module = hub.Module(tfhub_uri)
input_params = module.get_input_info_dict()
dtype = input_params['text'].dtype
shape = input_params['text'].get_shape()
# define the model inputs
inputs = {'text': tf.placeholder(dtype, shape, 'text')}
# define the model outputs
# we want the class ids and probabilities for the top 3 classes
logits = module(inputs['text'])
outputs = {
'vector': logits,
}
# export the model
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
tf.saved_model.simple_save(
sess,
model_path,
inputs=inputs,
outputs=outputs)
return model_path
sagemaker_role = get_execution_role()
!tar -C "$PWD" -czf encoder.tar.gz encoder_model/
model_data = Session().upload_data(path='encoder.tar.gz',key_prefix='model')
env = {'SAGEMAKER_TFS_DEFAULT_MODEL_NAME': 'universal-sentence-encoder-large'}
model = Model(model_data=model_data, role=sagemaker_role, framework_version=1.12, env=env)
predictor = model.deploy(initial_instance_count=1, instance_type='ml.t2.medium')
I suppose you started from this example? https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-python-sdk/tensorflow_serving_container
It looks like you're not saving the TF Serving bundle properly: the model version number is missing, because of this line:
model_path = 'encoder_model/' + model_name
Replacing it with this should fix your problem:
model_path = '{}/{}/00000001'.format('encoder_model/', model_name)
Your model artefact should look like this (I used the model in the notebook above):
mobilenet/
mobilenet/mobilenet_v2_140_224/
mobilenet/mobilenet_v2_140_224/00000001/
mobilenet/mobilenet_v2_140_224/00000001/saved_model.pb
mobilenet/mobilenet_v2_140_224/00000001/variables/
mobilenet/mobilenet_v2_140_224/00000001/variables/variables.data-00000-of-00001
mobilenet/mobilenet_v2_140_224/00000001/variables/variables.index
Then, upload to S3 and deploy.

How to order NDB query by the key?

I try to use task queues on Google App Engine. I want to utilize the Mapper class shown in the App Engine documentation "Background work with the deferred library".
I get an exception on the ordering of the query result by the key
def get_query(self):
...
q = q.order("__key__")
...
Exception:
File "C:... mapper.py", line 41, in get_query
q = q.order("__key__")
File "C:\Program Files (x86)\Google\google_appengine\google\appengine\ext\ndb\query.py", line 1124, in order
'received %r' % arg)
TypeError: order() expects a Property or query Order; received '__key__'
INFO 2017-03-09 11:56:32,448 module.py:806] default: "POST /_ah/queue/deferred HTTP/1.1" 500 114
The article is from 2009, so I guess something might have changed.
My environment: Windows 7, Python 2.7.9, Google App Engine SDK 1.9.50
There are somewhat similar questions about ordering in NDB on SO.
What bugs me this code is from the official doc, presumably updated in Feb 2017 (recently) and posted by someone within top 0.1 % of SO users by reputation.
So I must be doing something wrong. What is the solution?
Bingo.
Avinash Raj is correct. If it were an answer I'd accept it.
Here is the full class code
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from google.appengine.runtime import DeadlineExceededError
import logging
class Mapper(object):
"""
from https://cloud.google.com/appengine/docs/standard/python/ndb/queries
corrected with suggestions from Stack Overflow
http://stackoverflow.com/questions/42692319/how-to-order-ndb-query-by-the-key
"""
# Subclasses should replace this with a model class (eg, model.Person).
KIND = None
# Subclasses can replace this with a list of (property, value) tuples to filter by.
FILTERS = []
def __init__(self):
logging.info("Mapper.__init__: {}")
self.to_put = []
self.to_delete = []
def map(self, entity):
"""Updates a single entity.
Implementers should return a tuple containing two iterables (to_update, to_delete).
"""
return ([], [])
def finish(self):
"""Called when the mapper has finished, to allow for any final work to be done."""
pass
def get_query(self):
"""Returns a query over the specified kind, with any appropriate filters applied."""
q = self.KIND.query()
for prop, value in self.FILTERS:
q = q.filter(prop == value)
if __name__ == '__main__':
q = q.order(self.KIND.key) # the fixed version. The original q.order('__key__') failed
# see http://stackoverflow.com/questions/42692319/how-to-order-ndb-query-by-the-key
return q
def run(self, batch_size=100):
"""Starts the mapper running."""
logging.info("Mapper.run: batch_size: {}".format(batch_size))
self._continue(None, batch_size)
def _batch_write(self):
"""Writes updates and deletes entities in a batch."""
if self.to_put:
ndb.put_multi(self.to_put)
self.to_put = []
if self.to_delete:
ndb.delete_multi(self.to_delete)
self.to_delete = []
def _continue(self, start_key, batch_size):
q = self.get_query()
# If we're resuming, pick up where we left off last time.
if start_key:
key_prop = getattr(self.KIND, '_key')
q = q.filter(key_prop > start_key)
# Keep updating records until we run out of time.
try:
# Steps over the results, returning each entity and its index.
for i, entity in enumerate(q):
map_updates, map_deletes = self.map(entity)
self.to_put.extend(map_updates)
self.to_delete.extend(map_deletes)
# Do updates and deletes in batches.
if (i + 1) % batch_size == 0:
self._batch_write()
# Record the last entity we processed.
start_key = entity.key
self._batch_write()
except DeadlineExceededError:
# Write any unfinished updates to the datastore.
self._batch_write()
# Queue a new task to pick up where we left off.
deferred.defer(self._continue, start_key, batch_size)
return
self.finish()

How to classify image in real time using tensorflow?

I'm trying to use raspberry pi camera to capture image and classify the image in real time into three classes. What I did is using the code below. It can predict in the first iteration. The problem is that it shows me ran out of memory after the second iteration. Is there anyway to fix this?
import numpy as np
import tensorflow as tf
import argparse
import os
import sys
def create_graph(model_file):
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(model_file, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference(images, out_file, labels, model_file, k=5):
# Creates graph from saved GraphDef.
create_graph(model_file)
if out_file:
out_file = open(out_file, 'wb', 1)
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
for img in images:
if not tf.gfile.Exists(img):
tf.logging.fatal('File does not exist %s', img)
continue
image_data = tf.gfile.FastGFile(img, 'rb').read()
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-k:][::-1] # Getting top k predictions
vals = []
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
vals.append('%s=%.5f' % (human_string, score))
rec = "%s\t %s" % (img, ", ".join(vals))
if out_file:
out_file.write(rec)
out_file.write("\n")
else:
print(rec)
if out_file:
print("Output stored to a file")
out_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Classify Image(s)')
parser.add_argument('-i','--in', help='Input Image file ')
parser.add_argument('-li','--list', help='List File having input image paths')
parser.add_argument('-o','--out', help='Output file for storing the content')
parser.add_argument('-m','--model', help='model file path (protobuf)', required=True)
parser.add_argument('-l','--labels', help='labels text file', required=True)
parser.add_argument('-r','--root', help='path to root directory of input data')
args = vars(parser.parse_args())
# Read input
if not args['in'] and not args['list']:
print("Either -in or -list option is required.")
sys.exit(1)
if args['in']:
images = [args['in']]
else: # list must be given
with open(args['list']) as ff:
images = filter(lambda x: x, map(lambda y: y.strip(), ff.readlines()))
# if a separate root directory given then make a new path
if args['root']:
print("Input data from : %s" % args['root'])
images = map(lambda p: os.path.join(args['root'], p), images)
with open(args['labels'], 'rb') as f:
labels = [str(w).replace("\n", "") for w in f.readlines()]
while True:
imagename='/home/pi/Desktop/camerasnap.jpg'
images=raspi.capture(imagename)
run_inference(images=images, out_file=args['out'], labels=labels, model_file=args['model'])
The problem is that you are creating the graph in every run_inference method call:
while True:
imagename='/home/pi/Desktop/camerasnap.jpg'
images=raspi.capture(imagename)
run_inference(images=images, out_file=args['out'], labels=labels, model_file=args['model'])
def run_inference(images, out_file, labels, model_file, k=5):
# Creates graph from saved GraphDef.
create_graph(model_file)
...
As the graph probably uses almost all memory in your GPU it fails in the second iteration when a the code tries to create a new graph. You should create only one graph for all the program life.
Try this:
create_graph(model_file)
while True:
imagename='/home/pi/Desktop/camerasnap.jpg'
images=raspi.capture(imagename)
run_inference(images=images, out_file=args['out'], labels=labels, model_file=args['model'])