How to classify image in real time using tensorflow? - python-2.7

I'm trying to use raspberry pi camera to capture image and classify the image in real time into three classes. What I did is using the code below. It can predict in the first iteration. The problem is that it shows me ran out of memory after the second iteration. Is there anyway to fix this?
import numpy as np
import tensorflow as tf
import argparse
import os
import sys
def create_graph(model_file):
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(model_file, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference(images, out_file, labels, model_file, k=5):
# Creates graph from saved GraphDef.
create_graph(model_file)
if out_file:
out_file = open(out_file, 'wb', 1)
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
for img in images:
if not tf.gfile.Exists(img):
tf.logging.fatal('File does not exist %s', img)
continue
image_data = tf.gfile.FastGFile(img, 'rb').read()
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-k:][::-1] # Getting top k predictions
vals = []
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
vals.append('%s=%.5f' % (human_string, score))
rec = "%s\t %s" % (img, ", ".join(vals))
if out_file:
out_file.write(rec)
out_file.write("\n")
else:
print(rec)
if out_file:
print("Output stored to a file")
out_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Classify Image(s)')
parser.add_argument('-i','--in', help='Input Image file ')
parser.add_argument('-li','--list', help='List File having input image paths')
parser.add_argument('-o','--out', help='Output file for storing the content')
parser.add_argument('-m','--model', help='model file path (protobuf)', required=True)
parser.add_argument('-l','--labels', help='labels text file', required=True)
parser.add_argument('-r','--root', help='path to root directory of input data')
args = vars(parser.parse_args())
# Read input
if not args['in'] and not args['list']:
print("Either -in or -list option is required.")
sys.exit(1)
if args['in']:
images = [args['in']]
else: # list must be given
with open(args['list']) as ff:
images = filter(lambda x: x, map(lambda y: y.strip(), ff.readlines()))
# if a separate root directory given then make a new path
if args['root']:
print("Input data from : %s" % args['root'])
images = map(lambda p: os.path.join(args['root'], p), images)
with open(args['labels'], 'rb') as f:
labels = [str(w).replace("\n", "") for w in f.readlines()]
while True:
imagename='/home/pi/Desktop/camerasnap.jpg'
images=raspi.capture(imagename)
run_inference(images=images, out_file=args['out'], labels=labels, model_file=args['model'])

The problem is that you are creating the graph in every run_inference method call:
while True:
imagename='/home/pi/Desktop/camerasnap.jpg'
images=raspi.capture(imagename)
run_inference(images=images, out_file=args['out'], labels=labels, model_file=args['model'])
def run_inference(images, out_file, labels, model_file, k=5):
# Creates graph from saved GraphDef.
create_graph(model_file)
...
As the graph probably uses almost all memory in your GPU it fails in the second iteration when a the code tries to create a new graph. You should create only one graph for all the program life.
Try this:
create_graph(model_file)
while True:
imagename='/home/pi/Desktop/camerasnap.jpg'
images=raspi.capture(imagename)
run_inference(images=images, out_file=args['out'], labels=labels, model_file=args['model'])

Related

How to save or upload an image from LOCAL directory to ImageField of database object in DJANGO

I was trying to create some products in ecommerce project in django and i had the data file ready and just wanted to loop throw the data and save to the database with Product.objects.create(image='', ...) but i couldnt upload the images from local directory to database!
I tried these ways:
1
with open('IMAGE_PATH', 'rb') as f:
image = f.read()
Product.objects.create(image=image)
2
image = open('IMAGE_PATH', 'rb')
Product.objects.create(image=image)
3
module_dir = dir_path = os.path.dirname(os.path.realpath(__file__))
for p in products:
file_path = os.path.join(module_dir, p['image'])
Product.objects.create()
product.image.save(
file_path,
File(open(file_path, 'rb'))
)
product.save()
none worked for me.
After some searching, I got the answer.
the code to use would be like this:
from django.core.files import File
for p in products:
product = Product.objects.create()
FILE_PATH = p['image']
local_file = open(f'./APP_NAME/{FILE_PATH}', "rb")
djangofile = File(local_file)
product.image.save('FILE_NAME.jpg', djangofile)
local_file.close()
from django.core.files import File
import urllib
result = urllib.urlretrieve(image_url) # image_url is a URL to an image
model_instance.photo.save(
os.path.basename(self.url),
File(open(result[0], 'rb'))
)
self.save()
Got the answer from here

Getting a 'ValueError: 2 many values to unpack' from a method that lists only one required arg. I'm not sure of the best way to unpack it

I am calling a method from a predefined Class (L2Interface) from the acitoolkit module that lists only one required argument. The method returns two strings 'encap-type' and 'encap-id'. I am floundering with the best way to unpack these values. Here is my script. The method in question is: 'vlans = aci.L2Interface.parse_encap(encap)'
import sys
import acitoolkit.acitoolkit as aci
import requests
import re
def init(self, name, encap_type, encap_id, encap_mode=None):
self.name = None
self.encap_type = VLAN
self.encap_id = None
def main():
"""
Main Show Endpoints Routine
:return: None
"""
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = ('Simple application that logs on to the APIC'
' and displays all of the Endpoints.')
creds = aci.Credentials('apic', description)
args = creds.get()
# Login to APIC
session = aci.Session(args.url, args.login, args.password, verify_ssl=False)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
sys.exit(0)
# Get encap per interface
# and store the data as tuples in a List
data = []
encap = 'vlan-[0-9].*'
#vxtype = 'vxlan\-[0-9|a-z].*'
vlans = aci.L2Interface.parse_encap(encap)
for vlan in vlans:
data.append((vlan.attributes['encap_type'],
vlan.attributes['encap_id']))
# Display the data downloaded
col_widths = [19, 17, 15, 15, 15]
template = ''
for idx, width in enumerate(col_widths):
template += '{%s:%s} ' % (idx, width)
print(template.format("ENDCAP_TYPE", "ENCAP_ID"))
fmt_string = []
for i in range(0, len(col_widths)):
fmt_string.append('-' * (col_widths[i] - 2))
print(template.format(*fmt_string))
for rec in data:
print(template.format(*rec))
if name == 'main':
try:
main()
except KeyboardInterrupt:
pass
I am trying to connect to an APIC, grab L2 interfaces with encapsulation (encap) assigned and return them in a list.

AttributeError: Can't pickle local object 'train.<locals>.create_model'

I am trying to use my own ML models for creating trainings job in aws Sagemaker. When I start training process everything goes well but at the end it says that "AttributeError: Can't pickle local object 'train..create_model'". I am new into this job. I did the same things for mlp, knn, cart, and svr but never encountered with that issue. I know that lstm uses too much different things to create model but I can not figure out how to solve that issue.
Here is my train.py file where I get the error:
from __future__ import print_function
import json
import os
import pickle
import sys
import traceback
import pandas as pd
import numpy as np
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
# These are the paths to where SageMaker mounts interesting things in your container.
prefix = "/opt/ml/"
input_path = prefix + "input/data"
output_path = os.path.join(prefix, "output")
model_path = os.path.join(prefix, "model")
# This algorithm has a single channel of input data called 'training'. Since we run in
# File mode, the input files are copied to the directory specified here.
channel_name = "training"
training_path = os.path.join(input_path, channel_name)
# The function to execute training.
def train():
print("Starting the training")
print(training_path)
try:
# Take the set of files and read them all into a single pandas dataframe
input_files = [ os.path.join(training_path, file) for file in os.listdir(training_path) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(training_path, channel_name))
raw_data = [ pd.read_csv(file, header=0, index_col=0) for file in input_files ]
data = pd.concat(raw_data)
print(data)
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
values = data.values
# ensure all data is float
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler()
scaled = scaler.fit_transform(values)
# specify the number of lag time steps
n_timesteps = 3
n_features = 4
# frame as supervised learning
reframed = series_to_supervised(scaled, n_timesteps, 1)
print(reframed.shape)
# drop columns we don't want to predict
reframed.drop(reframed.columns[[4,9,14,15,16,17,18]], axis=1, inplace=True)
print(reframed.head())
# split into train and test sets
values = reframed.values
n_train_size = 403
train = values[:n_train_size, :]
test = values[n_train_size:, :]
# split into input and outputs
n_obs = n_timesteps * n_features
train_X, train_y = train[:, :n_obs], train[:, -1]
test_X, test_y = test[:, :n_obs], test[:, -1]
print(train_X.shape, len(train_X), train_y.shape)
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], n_timesteps, n_features))
test_X = test_X.reshape((test_X.shape[0], n_timesteps, n_features))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# Function to create model
def create_model():
# create model
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
# Compile model
# optimizer = SGD(lr=learn_rate, momentum=momentum)
model.compile(loss='mae',optimizer='adam')
return model
from scikeras.wrappers import KerasRegressor
# create model
model = KerasRegressor(model=create_model, verbose=0)
from sklearn.model_selection import GridSearchCV
# define the grid search parameters
batch_size = [2,4,8,16,32]
epochs = [10, 50, 100]
#learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3]
#momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
grid_result = grid.fit(train_X, train_y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# save the model
with open(os.path.join(model_path, "snop-lstm.pkl"), "wb") as out:
pickle.dump(grid, out)
print("Training complete.")
except Exception as e:
# Write out an error file. This will be returned as the failureReason in the
# DescribeTrainingJob result.
trc = traceback.format_exc()
with open(os.path.join(output_path, "failure"), "w") as s:
s.write("Exception during training: " + str(e) + "\n" + trc)
# Printing this causes the exception to be in the training job logs, as well.
print("Exception during training: " + str(e) + "\n" + trc, file=sys.stderr)
# A non-zero exit code causes the training job to be marked as Failed.
sys.exit(255)
if __name__ == "__main__":
train()
# A zero exit code causes the job to be marked a Succeeded.
sys.exit(0)
And this is the log:
2022-02-25T10:28:16.751+03:00
Exception during training: Can't pickle local object 'train.<locals>.create_model'
Exception during training: Can't pickle local object 'train.<locals>.create_model'
2022-02-25T10:28:16.751+03:00
Traceback (most recent call last):
File "/opt/program/train", line 154, in train
pickle.dump(grid, out)
Traceback (most recent call last): File "/opt/program/train", line 154, in train pickle.dump(grid, out)
2022-02-25T10:28:16.751+03:00
AttributeError: Can't pickle local object 'train.<locals>.create_model'
AttributeError: Can't pickle local object 'train.<locals>.create_model'
It seems that you are trying to pickle an object of class GridSearchCV instead of the model itself:
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
...
...
pickle.dump(grid, out)
I think what you want instead is to retrieve the best model (via best_model_, see here: https://github.com/scikit-learn/scikit-learn/blob/37ac6788c/sklearn/model_selection/_search.py#L1247) and then pickle that model

CelebA datasets Identities or classes

I want to use CelebA dataset on face recognition project, but when I downloaded the dataset I found that all images are in one folder without any identities or sub folder assign to classes. Also, the downloaded information files have no any idea about the classes or identities! Any information about the 10,000 identities that mentioned on its paper and websites.
import os
import numpy as np
import cv2
import shutil
def read_identity(identities_filename):
identities = []
with open(identities_filename, 'r') as f:
for line in f.readlines()[1:]:
pair = line.strip().split()
identities.append(pair)
#print(identities)
return np.array(identities)
def create_identity(celebA_dir,identity_path,output_dir):
img_identity = read_identity(os.path.expanduser(identity_path))
#print(img_identity[0][1])
i=0
for filename in os.listdir(celebA_dir):
if filename == img_identity[i][0]:
# print("Good ", filename ,img_identity)
# # ----create the sub folder in the output folder
# save_dir = os.path.join(output_dir,img_identity[i][1])
# i += 1
# if not os.path.exists(save_dir):
# os.makedirs(save_dir)
# # ----copy image source to its identity destination
destination = os.path.join(output_dir, img_identity[i][1])
source = os.path.join(celebA_dir, img_identity[i][0])
i += 1
shutil.copy2(source, destination)
if __name__ == '__main__':
celebA_dir = r"...." #Add your directory where dataset exist
output_dir = r"...." #Add directory where you want to save datasets with identities
identity_path = r"identity_CelebA.txt"
create_identity(celebA_dir,identity_path,output_dir)

use global variables in AWS Sagemaker script

After having correctly deployed our model, I need to invoke it via lambda function. The script features two cleaning function, the first one (cleaning()) gives us 5 variables: the cleaned dataset and 4 other variables (scaler, monthdummies, compadummies, parceldummies) that we need to use in the second cleaning function (cleaning_test()).
The reason behind this is that in the use case I'll have only one instance at a time to perform predictions on, not an entire dataset. This means that I pass the row to the first cleaning() function since some commands won't work. I can't also use a scaler and neither create dummy variables, so the aim is to import the scaler and some dummies used in the cleaning() function, since they come from the whole dataset, that I used to train the model.
Hence, in the input_fn() function, the input needs to be cleaned using the cleaning_test() function, that requires the scaler and the three lists of dummies from the cleaning() one.
When I train the model, the cleaning() function works fine, but after the deployment, if we invoke the endpoint, it raises the error that variable "scaler" is not defined.
Below is the script.py:
Note that the test is # since I've already tested it, so now I'm training on the whole dataset and I want to predict completely new instances
def cleaning(data):
some cleaning on data stored in s3
return cleaned_data, scaler, monthdummies, compadummies, parceldummies
def cleaning_test(data, scaler, monthdummies, compadummies, parceldummies):
cleaning on data without labels
return cleaned_data
def model_fn(model_dir):
clf = joblib.load(os.path.join(model_dir, "model.joblib"))
return clf
def input_fn(request_body, request_content_type):
if request_content_type == "application/json":
data = json.loads(request_body)
df = pd.DataFrame(data, index = [0])
input_data = cleaning_test(df, scaler, monthdummies, compadummies, parceldummies)
else:
pass
return input_data
def predict_fn(input_data, model):
return model.predict_proba(input_data)
if __name__ =='__main__':
print('extracting arguments')
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument('--n_estimators', type=int, default=10)
parser.add_argument('--min-samples-leaf', type=int, default=3)
# Data, model, and output directories
parser.add_argument('--model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
#parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
parser.add_argument('--train-file', type=str, default='fp_train.csv')
#parser.add_argument('--test-file', type=str, default='fp_test.csv')
args, _ = parser.parse_known_args()
print('reading data')
train_df = pd.read_csv(os.path.join(args.train, args.train_file))
#test_df = pd.read_csv(os.path.join(args.test, args.test_file))
print("cleaning")
train_df, scaler, monthdummies, compadummies, parceldummies = cleaning(train_df)
#test_df, scaler1, monthdummies1, compadummies1, parceldummies1 = cleaning(test_df)
print("splitting")
y = train_df.loc[:,"event"]
X = train_df.loc[:, train_df.columns != 'event']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
"""print('building training and testing datasets')
X_train = train_df.loc[:, train_df.columns != 'event']
X_test = test_df.loc[:, test_df.columns != 'event']
y_train = train_df.loc[:,"event"]
y_test = test_df.loc[:,"event"]"""
print(X_train.columns)
print(X_test.columns)
# train
print('training model')
model = RandomForestClassifier(
n_estimators=args.n_estimators,
min_samples_leaf=args.min_samples_leaf,
n_jobs=-1)
model.fit(X_train, y_train)
# print abs error
print('validating model')
proba = model.predict_proba(X_test)
# persist model
path = os.path.join(args.model_dir, "model.joblib")
joblib.dump(model, path)
print('model persisted at ' + path)
That I run through:
sklearn_estimator = SKLearn(
entry_point='script.py',
role = get_execution_role(),
train_instance_count=1,
train_instance_type='ml.c5.xlarge',
framework_version='0.20.0',
base_job_name='rf-scikit',
hyperparameters = {'n_estimators': 15})
sklearn_estimator.fit({'train':trainpath})
sklearn_estimator.latest_training_job.wait(logs='None')
artifact = sm_boto3.describe_training_job(
TrainingJobName=sklearn_estimator.latest_training_job.name)['ModelArtifacts']['S3ModelArtifacts']
predictor = sklearn_estimator.deploy(
instance_type='ml.c5.large',
initial_instance_count=1)
The question is, how can I "store" the variables given by the cleaning() function during the training process, in order to use them in the input_fn() function, making cleaning_test() work fine?
Thanks!