How to restore Tensorflow model and predict input image - python-2.7

i have restored my trained tensorflow model and then i want detect input image results.
here restore code:
import tensorflow as tf
import cv2
sess=tf.Session()
image_size=128
saver = tf.train.import_meta_graph('my_test_model-1000.meta')
saver.restore(sess,tf.train.latest_checkpoint('./'))
sess.run(tf.global_variables_initializer())
then how can i predict 128*128*3 rgb input image with this model?

import tensorflow as tf
import cv2
def model(inputs):
# You define your model over here
...
...
return logtis
image = cv2.imread("image_path.jpg")
ip_tensor = tf.placeholder(tf.float32, (None, 128, 128, 3))
logits = model(ip_tensor)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('./')) # restore your model
feed={ip_tensor: inputs} # prepare your feed to the saved model
predictions = sess.run(tf.argmax(logits, 1), feed_dict=feed) # make prediction

Related

How to save pillow processed image in already existing Django object

I have created a object model as below
from django.db import models
# Create your models here.
class ImageModel(models.Model):
image = models.ImageField(upload_to='images/')
editedImg = models.ImageField(upload_to='images/')
def delete(self, *args, **kwargs):
self.image.delete()
self.editedImg.delete()
super().delete(*args, **kwargs)
And here is what i am trying to do in a function
from django.shortcuts import render
from EditorApp.forms import ImageForm
from EditorApp.models import ImageModel
from django.http import HttpResponseRedirect
from PIL import Image
def edit_column(request):
codArr = request.POST.getlist('codArr[]')
imgs = ImageModel.objects.first()
orgImage = ImageModel.objects.first().image
orgImage = Image.open(orgImage)
croppedImg = orgImage.crop((int(codArr[0]), int(codArr[1]), int(codArr[2]), int(codArr[3])))
# croppedImg.show()
# imgs.editedImg = croppedImg
# imgs.save()
return HttpResponseRedirect("/editing/")
What i am trying to do is the codArr consists of coordinates of top(x, y) and bottom(x, y) in the array form(Which is not an issue and is tested(croppedImg.show() showed the desired cropped image) and handled and used to crop the image). Image crop is working fine. But what i am trying to do is to save the cropped image in editedImg of the model used above. The above commented one is what i tried but throw a error AttributeError: _committed
As i have not used any name for image in model as its not required.
Kindly help please, Would be very thankfull.
you should do it like this:
from io import BytesIO
from api.models import ProductPicture
from django.core import files
codArr = request.POST.getlist('codArr[]')
img_obj = ImageModel.objects.first()
orgImage = img_obj.image
orgImage = Image.open(orgImage)
croppedImg = orgImage.crop((int(codArr[0]), int(codArr[1]), int(codArr[2]), int(codArr[3])))
thumb_io = BytesIO() # create a BytesIO object
croppedImg.save(thumb_io, 'png')
editedImg = files.File(thumb_io, name=file_name)
img_obj.editedImg = editedImg
img_obj.save()
You can use Python's context manager to open the image and save it to the desired storage in that case I'm using the images dir.
Pillow will crop the image and image.save() will save it to the filesystem and after that, you can add it to Django's ImageField and save it into the DB.
The context manager takes care of the file opening and closing, Pillow
takes care of the image, and Django takes care of the DB.
from PIL import Image
with Image.open(orgImage) as image:
file_name = image.filename # Can be replaced by orgImage filename
cropped_path = f"images/croped-{file_name}"
# The crop method from the Image module takes four coordinates as input.
# The right can also be represented as (left+width)
# and lower can be represented as (upper+height).
(left, upper, right, lower) = (20, 20, 100, 100)
# Here the image "image" is cropped and assigned to new variable im_crop
im_crop = image.crop((left, upper, right, lower))
im_crop.save(cropped_path)
imgs.editedImg = cropped_path
imgs.save()
Pillow's reference

AttributeError: Can't pickle local object 'train.<locals>.create_model'

I am trying to use my own ML models for creating trainings job in aws Sagemaker. When I start training process everything goes well but at the end it says that "AttributeError: Can't pickle local object 'train..create_model'". I am new into this job. I did the same things for mlp, knn, cart, and svr but never encountered with that issue. I know that lstm uses too much different things to create model but I can not figure out how to solve that issue.
Here is my train.py file where I get the error:
from __future__ import print_function
import json
import os
import pickle
import sys
import traceback
import pandas as pd
import numpy as np
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
# These are the paths to where SageMaker mounts interesting things in your container.
prefix = "/opt/ml/"
input_path = prefix + "input/data"
output_path = os.path.join(prefix, "output")
model_path = os.path.join(prefix, "model")
# This algorithm has a single channel of input data called 'training'. Since we run in
# File mode, the input files are copied to the directory specified here.
channel_name = "training"
training_path = os.path.join(input_path, channel_name)
# The function to execute training.
def train():
print("Starting the training")
print(training_path)
try:
# Take the set of files and read them all into a single pandas dataframe
input_files = [ os.path.join(training_path, file) for file in os.listdir(training_path) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(training_path, channel_name))
raw_data = [ pd.read_csv(file, header=0, index_col=0) for file in input_files ]
data = pd.concat(raw_data)
print(data)
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
values = data.values
# ensure all data is float
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler()
scaled = scaler.fit_transform(values)
# specify the number of lag time steps
n_timesteps = 3
n_features = 4
# frame as supervised learning
reframed = series_to_supervised(scaled, n_timesteps, 1)
print(reframed.shape)
# drop columns we don't want to predict
reframed.drop(reframed.columns[[4,9,14,15,16,17,18]], axis=1, inplace=True)
print(reframed.head())
# split into train and test sets
values = reframed.values
n_train_size = 403
train = values[:n_train_size, :]
test = values[n_train_size:, :]
# split into input and outputs
n_obs = n_timesteps * n_features
train_X, train_y = train[:, :n_obs], train[:, -1]
test_X, test_y = test[:, :n_obs], test[:, -1]
print(train_X.shape, len(train_X), train_y.shape)
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], n_timesteps, n_features))
test_X = test_X.reshape((test_X.shape[0], n_timesteps, n_features))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# Function to create model
def create_model():
# create model
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
# Compile model
# optimizer = SGD(lr=learn_rate, momentum=momentum)
model.compile(loss='mae',optimizer='adam')
return model
from scikeras.wrappers import KerasRegressor
# create model
model = KerasRegressor(model=create_model, verbose=0)
from sklearn.model_selection import GridSearchCV
# define the grid search parameters
batch_size = [2,4,8,16,32]
epochs = [10, 50, 100]
#learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3]
#momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
grid_result = grid.fit(train_X, train_y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# save the model
with open(os.path.join(model_path, "snop-lstm.pkl"), "wb") as out:
pickle.dump(grid, out)
print("Training complete.")
except Exception as e:
# Write out an error file. This will be returned as the failureReason in the
# DescribeTrainingJob result.
trc = traceback.format_exc()
with open(os.path.join(output_path, "failure"), "w") as s:
s.write("Exception during training: " + str(e) + "\n" + trc)
# Printing this causes the exception to be in the training job logs, as well.
print("Exception during training: " + str(e) + "\n" + trc, file=sys.stderr)
# A non-zero exit code causes the training job to be marked as Failed.
sys.exit(255)
if __name__ == "__main__":
train()
# A zero exit code causes the job to be marked a Succeeded.
sys.exit(0)
And this is the log:
2022-02-25T10:28:16.751+03:00
Exception during training: Can't pickle local object 'train.<locals>.create_model'
Exception during training: Can't pickle local object 'train.<locals>.create_model'
2022-02-25T10:28:16.751+03:00
Traceback (most recent call last):
File "/opt/program/train", line 154, in train
pickle.dump(grid, out)
Traceback (most recent call last): File "/opt/program/train", line 154, in train pickle.dump(grid, out)
2022-02-25T10:28:16.751+03:00
AttributeError: Can't pickle local object 'train.<locals>.create_model'
AttributeError: Can't pickle local object 'train.<locals>.create_model'
It seems that you are trying to pickle an object of class GridSearchCV instead of the model itself:
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
...
...
pickle.dump(grid, out)
I think what you want instead is to retrieve the best model (via best_model_, see here: https://github.com/scikit-learn/scikit-learn/blob/37ac6788c/sklearn/model_selection/_search.py#L1247) and then pickle that model

Why layer.get_weights() is returning list of length 1 and 4?

I am trying to get the weights and biases of all convolutional layers of resnet50 model for one of my assignments. I learned that we can use the function layer.get_weights() to get the weight and bias. This will return a list of which contains two elements weight of the layer stored at layer.get_weights()[0] and the bias is stored at layer.get_weights()[1]. Here is the code which I used.
import tensorflow as to
import source
from source import models
from source.utils.image import read_image_bgr, preprocess_image, resize_image
from source.utils.visualization import draw_box, draw_caption
from source.utils.colors import label_color
from source.models import retinanet
import warnings
warnings.filterwarnings("ignore")
from tensorflow import ConfigProto
import numpy as np
import os
import argparse
import keras
from keras.layers import Input,Conv2D,MaxPooling2D,UpSampling2D, Activation, Dropout
from keras.models import Model
ap = argparse.ArgumentParser()
ap.add_argument("-weight", "--weight_file", type=str,default="trained_model.h5",help="Path to the weights file")
ap.add_argument("-backbone", "--backbone", type=str, default="resnet50",help="Backbone model name")
args = vars(ap.parse_args())
#fetching a tensorflow session
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.keras.backend.set_session(tf.Session(config=config))
keras.backend.tensorflow_backend.set_session(get_session())
model = str(args.get("weight_file", False))
backbone = str(args.get("backbone", False))
model = models.load_model(str(model), backbone_name=str(backbone))
#model is the resnet50 model
for layer in model.layers:
print('layer name', layer.name)
we = layer.get_weights()
print('len(we)',len(we))
But in my case, I am getting length 1 for some of the cases and length 4 for other cases which is different from what it is expected. I am really confused at this point. If anybody has any idea and suggestions will be really helpful.
Thanks in advance.
The get_weights() function returns both trainable and not trainable parameters of a layer. The BatchNormalization layer has 4 parameters, which explains the 4 length outputs (since Resnet blocks have batchnorm). As far as I am aware, ResNet models do not use the bias term in the convolutional layers because of the batchnorm, which would explain the length 1 outputs.

how to deploy the custom model in amazon sageMaker

I am newbie to AWS sagemaker, I am trying to deploy the time series custom lstm model in sagemaker , please help me out and how to perpare the script mode.
this my script file timer_series.py code.
import sagemaker
import boto3
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from sklearn.metrics import mean_squared_error
if __name__ =='__main__':
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=72)
parser.add_argument('--n_train_hours', type=int, default=24*365*2)
parser.add_argument('--n_validation_hours', type=int, default=24*365*4)
# input data and model directories
parser.add_argument('--model_dir', type=str)
args, _ = parser.parse_known_args()
train_dataset_dir = os.environ.get('SM_INPUT_DIR') + '/data/training/'
output_model_dir = os.environ.get('SM_MODEL_DIR')
output_object_dir = os.environ.get('SM_OUTPUT_DATA_DIR')
epochs = args.epochs
batch_size = args.batch_size
input_data = {args.input_data}
dataset = read_csv( train_dataset_dir + 'dataset.csv', header=0, index_col='Date')
dataset.sort_index(inplace=True)
train = dataset.iloc[:109]
test= dataset.iloc[109:]
scaler = MinMaxScaler()
scaled_train = scaler.fit_transform(train)
scaled_test=scaler.fit_transform(test)
n_input = 12
n_feature = 1
train_generator = TimeseriesGenerator(scaled_train,scaled_train,length=n_input, batch_size=1)
model = Sequential()
model.add(LSTM(128,activation = 'relu', input_shape= (n_input, n_feature), return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=False))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
history =model.fit_generator(train_generator,epochs=50, batch_size=1,verbose=1)
# Get a SageMaker-compatible role used by this Notebook Instance.
role = get_execution_role()
with open(output_model_dir + '/history.json', 'w') as f:
json.dump(history.history, f)
#Save the Scaler
dump(scaler, output_model_dir + '/scaler.model', protocol=2)
#Save the trained model and weights
model_json = model.to_json()
with open(output_model_dir + "/model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights(output_model_dir + "/model.h5")
here it showing some error:
train_instance_type = "ml.m4.xlarg"
tf_estimator = TensorFlow(entry_point='time_series.py', role=get_execution_role(),
train_instance_count=1, train_instance_type=train_instance_type,
framework_version='1.12', py_version='py3', script_mode=True,
output_path = 's3://' + s3Bucket, base_job_name = "sales-forecasting-lstm",
hyperparameters={'batch_size': 2,
'epochs': 50})
tf_estimator.fit(uploaded_data_path)
Here I got the error. what this error , I didn't understand this error.
UnexpectedStatusException: Error for Training job sales-forecasting-lstm-2020-04-13-10-17-34-919: Failed. Reason: AlgorithmError: ExecuteUserScriptError:
Command "/usr/bin/python time_series.py --batch_size 2 --epochs 50 --model_dir s3://sagemaker12/sales-forecasting-lstm-2020-04-13-10-17-34-919/model"
​
Hi, I am newbie to AWS sagemaker, I am trying to deploy the time series custom lstm model in sagemaker , please help me out and how to perpare the script mode , python script for deployment.
this my script file timer_series.py code.
import sagemaker
import boto3
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from sklearn.metrics import mean_squared_error
if __name__ =='__main__':
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=72)
parser.add_argument('--n_train_hours', type=int, default=24*365*2)
parser.add_argument('--n_validation_hours', type=int, default=24*365*4)
# input data and model directories
parser.add_argument('--model_dir', type=str)
args, _ = parser.parse_known_args()
train_dataset_dir = os.environ.get('SM_INPUT_DIR') + '/data/training/'
output_model_dir = os.environ.get('SM_MODEL_DIR')
output_object_dir = os.environ.get('SM_OUTPUT_DATA_DIR')
epochs = args.epochs
batch_size = args.batch_size
input_data = {args.input_data}
dataset = read_csv( input_data + 'dataset.csv', header=0, index_col='Date')
dataset.sort_index(inplace=True)
train = dataset.iloc[:109]
test= dataset.iloc[109:]
scaler = MinMaxScaler()
scaled_train = scaler.fit_transform(train)
scaled_test=scaler.fit_transform(test)
n_input = 12
n_feature = 1
train_generator = TimeseriesGenerator(scaled_train,scaled_train,length=n_input, batch_size=1)
model = Sequential()
model.add(LSTM(128,activation = 'relu', input_shape= (n_input, n_feature), return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=False))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
history =model.fit_generator(train_generator,epochs=50, batch_size=1,verbose=1)
# Get a SageMaker-compatible role used by this Notebook Instance.
role = get_execution_role()
with open(output_model_dir + '/history.json', 'w') as f:
json.dump(history.history, f)
#Save the Scaler
dump(scaler, output_model_dir + '/scaler.model', protocol=2)
#Save the trained model and weights
model_json = model.to_json()
with open(output_model_dir + "/model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights(output_model_dir + "/model.h5")
here it showing some error:
train_instance_type = "ml.m4.xlarg"
tf_estimator = TensorFlow(entry_point='time_series.py', role=get_execution_role(),
train_instance_count=1, train_instance_type=train_instance_type,
framework_version='1.12', py_version='py3', script_mode=True,
output_path = 's3://' + s3Bucket, base_job_name = "sales-forecasting-lstm",
hyperparameters={'batch_size': 2,
'epochs': 50})
tf_estimator.fit(uploaded_data_path)
Here I got the error. what this error , I didn't understand this error.
UnexpectedStatusException: Error for Training job sales-forecasting-lstm-2020-04-13-10-17-34-919: Failed. Reason: AlgorithmError: ExecuteUserScriptError:
Command "/usr/bin/python time_series.py --batch_size 2 --epochs 50 --model_dir s3://sagemaker12/sales-forecasting-lstm-2020-04-13-10-17-34-919/model"
​
I would recommend you change timer_series.py cahnnels to your s3 paths, you do not have to add the plus sign on your train_dataset_dir and add sagemaker specific arguments as:
parser.add_argument('--output-data-dir', type=str, default='s3://bucket_name/folder_name/output')
Line above for directions on where your output data should be stored. However that is something you have to specify when calling the batch transform function. Otherwise would store it in default bucket.
Second in order to debug , and being able to help you, you would have to take a look at CloudWatch for that specific training job to better understand what is failing on your script: time_series.py. I suppose is a problem with the specifications and reading of your training data.

'no SavedModel bundles found!' on tensorflow_hub model deployment to AWS SageMaker

I attempting to deploy the universal-sentence-encoder model to a aws Sagemaker endpoint and am getting the error raise ValueError('no SavedModel bundles found!')
I have shown my code below, I have a feeling that one of my paths is incorrect
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
from sagemaker import get_execution_role
from sagemaker.tensorflow.serving import Model
def tfhub_to_savedmodel(model_name,uri):
tfhub_uri = uri
model_path = 'encoder_model/' + model_name
with tf.Session(graph=tf.Graph()) as sess:
module = hub.Module(tfhub_uri)
input_params = module.get_input_info_dict()
dtype = input_params['text'].dtype
shape = input_params['text'].get_shape()
# define the model inputs
inputs = {'text': tf.placeholder(dtype, shape, 'text')}
# define the model outputs
# we want the class ids and probabilities for the top 3 classes
logits = module(inputs['text'])
outputs = {
'vector': logits,
}
# export the model
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
tf.saved_model.simple_save(
sess,
model_path,
inputs=inputs,
outputs=outputs)
return model_path
sagemaker_role = get_execution_role()
!tar -C "$PWD" -czf encoder.tar.gz encoder_model/
model_data = Session().upload_data(path='encoder.tar.gz',key_prefix='model')
env = {'SAGEMAKER_TFS_DEFAULT_MODEL_NAME': 'universal-sentence-encoder-large'}
model = Model(model_data=model_data, role=sagemaker_role, framework_version=1.12, env=env)
predictor = model.deploy(initial_instance_count=1, instance_type='ml.t2.medium')
I suppose you started from this example? https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-python-sdk/tensorflow_serving_container
It looks like you're not saving the TF Serving bundle properly: the model version number is missing, because of this line:
model_path = 'encoder_model/' + model_name
Replacing it with this should fix your problem:
model_path = '{}/{}/00000001'.format('encoder_model/', model_name)
Your model artefact should look like this (I used the model in the notebook above):
mobilenet/
mobilenet/mobilenet_v2_140_224/
mobilenet/mobilenet_v2_140_224/00000001/
mobilenet/mobilenet_v2_140_224/00000001/saved_model.pb
mobilenet/mobilenet_v2_140_224/00000001/variables/
mobilenet/mobilenet_v2_140_224/00000001/variables/variables.data-00000-of-00001
mobilenet/mobilenet_v2_140_224/00000001/variables/variables.index
Then, upload to S3 and deploy.