I am newbie to AWS sagemaker, I am trying to deploy the time series custom lstm model in sagemaker , please help me out and how to perpare the script mode.
this my script file timer_series.py code.
import sagemaker
import boto3
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from sklearn.metrics import mean_squared_error
if __name__ =='__main__':
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=72)
parser.add_argument('--n_train_hours', type=int, default=24*365*2)
parser.add_argument('--n_validation_hours', type=int, default=24*365*4)
# input data and model directories
parser.add_argument('--model_dir', type=str)
args, _ = parser.parse_known_args()
train_dataset_dir = os.environ.get('SM_INPUT_DIR') + '/data/training/'
output_model_dir = os.environ.get('SM_MODEL_DIR')
output_object_dir = os.environ.get('SM_OUTPUT_DATA_DIR')
epochs = args.epochs
batch_size = args.batch_size
input_data = {args.input_data}
dataset = read_csv( train_dataset_dir + 'dataset.csv', header=0, index_col='Date')
dataset.sort_index(inplace=True)
train = dataset.iloc[:109]
test= dataset.iloc[109:]
scaler = MinMaxScaler()
scaled_train = scaler.fit_transform(train)
scaled_test=scaler.fit_transform(test)
n_input = 12
n_feature = 1
train_generator = TimeseriesGenerator(scaled_train,scaled_train,length=n_input, batch_size=1)
model = Sequential()
model.add(LSTM(128,activation = 'relu', input_shape= (n_input, n_feature), return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=False))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
history =model.fit_generator(train_generator,epochs=50, batch_size=1,verbose=1)
# Get a SageMaker-compatible role used by this Notebook Instance.
role = get_execution_role()
with open(output_model_dir + '/history.json', 'w') as f:
json.dump(history.history, f)
#Save the Scaler
dump(scaler, output_model_dir + '/scaler.model', protocol=2)
#Save the trained model and weights
model_json = model.to_json()
with open(output_model_dir + "/model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights(output_model_dir + "/model.h5")
here it showing some error:
train_instance_type = "ml.m4.xlarg"
tf_estimator = TensorFlow(entry_point='time_series.py', role=get_execution_role(),
train_instance_count=1, train_instance_type=train_instance_type,
framework_version='1.12', py_version='py3', script_mode=True,
output_path = 's3://' + s3Bucket, base_job_name = "sales-forecasting-lstm",
hyperparameters={'batch_size': 2,
'epochs': 50})
tf_estimator.fit(uploaded_data_path)
Here I got the error. what this error , I didn't understand this error.
UnexpectedStatusException: Error for Training job sales-forecasting-lstm-2020-04-13-10-17-34-919: Failed. Reason: AlgorithmError: ExecuteUserScriptError:
Command "/usr/bin/python time_series.py --batch_size 2 --epochs 50 --model_dir s3://sagemaker12/sales-forecasting-lstm-2020-04-13-10-17-34-919/model"
Hi, I am newbie to AWS sagemaker, I am trying to deploy the time series custom lstm model in sagemaker , please help me out and how to perpare the script mode , python script for deployment.
this my script file timer_series.py code.
import sagemaker
import boto3
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from sklearn.metrics import mean_squared_error
if __name__ =='__main__':
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=72)
parser.add_argument('--n_train_hours', type=int, default=24*365*2)
parser.add_argument('--n_validation_hours', type=int, default=24*365*4)
# input data and model directories
parser.add_argument('--model_dir', type=str)
args, _ = parser.parse_known_args()
train_dataset_dir = os.environ.get('SM_INPUT_DIR') + '/data/training/'
output_model_dir = os.environ.get('SM_MODEL_DIR')
output_object_dir = os.environ.get('SM_OUTPUT_DATA_DIR')
epochs = args.epochs
batch_size = args.batch_size
input_data = {args.input_data}
dataset = read_csv( input_data + 'dataset.csv', header=0, index_col='Date')
dataset.sort_index(inplace=True)
train = dataset.iloc[:109]
test= dataset.iloc[109:]
scaler = MinMaxScaler()
scaled_train = scaler.fit_transform(train)
scaled_test=scaler.fit_transform(test)
n_input = 12
n_feature = 1
train_generator = TimeseriesGenerator(scaled_train,scaled_train,length=n_input, batch_size=1)
model = Sequential()
model.add(LSTM(128,activation = 'relu', input_shape= (n_input, n_feature), return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=True))
model.add(LSTM(128, activation='relu', return_sequences=False))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
history =model.fit_generator(train_generator,epochs=50, batch_size=1,verbose=1)
# Get a SageMaker-compatible role used by this Notebook Instance.
role = get_execution_role()
with open(output_model_dir + '/history.json', 'w') as f:
json.dump(history.history, f)
#Save the Scaler
dump(scaler, output_model_dir + '/scaler.model', protocol=2)
#Save the trained model and weights
model_json = model.to_json()
with open(output_model_dir + "/model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights(output_model_dir + "/model.h5")
here it showing some error:
train_instance_type = "ml.m4.xlarg"
tf_estimator = TensorFlow(entry_point='time_series.py', role=get_execution_role(),
train_instance_count=1, train_instance_type=train_instance_type,
framework_version='1.12', py_version='py3', script_mode=True,
output_path = 's3://' + s3Bucket, base_job_name = "sales-forecasting-lstm",
hyperparameters={'batch_size': 2,
'epochs': 50})
tf_estimator.fit(uploaded_data_path)
Here I got the error. what this error , I didn't understand this error.
UnexpectedStatusException: Error for Training job sales-forecasting-lstm-2020-04-13-10-17-34-919: Failed. Reason: AlgorithmError: ExecuteUserScriptError:
Command "/usr/bin/python time_series.py --batch_size 2 --epochs 50 --model_dir s3://sagemaker12/sales-forecasting-lstm-2020-04-13-10-17-34-919/model"
I would recommend you change timer_series.py cahnnels to your s3 paths, you do not have to add the plus sign on your train_dataset_dir and add sagemaker specific arguments as:
parser.add_argument('--output-data-dir', type=str, default='s3://bucket_name/folder_name/output')
Line above for directions on where your output data should be stored. However that is something you have to specify when calling the batch transform function. Otherwise would store it in default bucket.
Second in order to debug , and being able to help you, you would have to take a look at CloudWatch for that specific training job to better understand what is failing on your script: time_series.py. I suppose is a problem with the specifications and reading of your training data.
Related
Getting error: ClientError: An error occurred (ValidationException) when calling the CreateHyperParameterTuningJob operation: The objective metric for the hyperparameter tuning job, [mse], isn’t valid for the [720646828776.dkr.ecr.ap-south-1.amazonaws.com/sagemaker-xgboost:0.90-2-cpu-py3] algorithm. Choose a valid objective metric.
import datetime
import time
import tarfile
import boto3
import pandas as pd
import numpy as np
from sagemaker import get_execution_role
import sagemaker
from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_california_housing
from sagemaker.tuner import (
IntegerParameter,
CategoricalParameter,
ContinuousParameter,
HyperparameterTuner,
)
s3 = boto3.client("s3")
sm_boto3 = boto3.client("sagemaker")
sagemaker_session = sagemaker.Session()
region = sess.boto_session.region_name
role = get_execution_role()
#Set the required configurations
model_name = "abc_model"
env = "dev"
#S3 Bucket
bucket = "abcpoc"
print("Using bucket " + bucket)
from sagemaker.debugger import Rule, rule_configs
from sagemaker.session import TrainingInput
s3_input_train = TrainingInput(
s3_data=f"s3://{default_bucket}/train/",content_type="csv")
s3_input_validation = TrainingInput(
s3_data=f"s3://{default_bucket}/validation/",content_type="csv")
prefix = 'output'
container=sagemaker.image_uris.retrieve("xgboost", region, "1.2-1")
print(container)
xgb = sagemaker.estimator.Estimator(
image_uri=container,
role=role,
base_job_name="xgboost-random-search",
instance_count=1,
instance_type="ml.m4.xlarge",
output_path="s3://{}/{}/output".format(bucket, prefix),
sagemaker_session= sagemaker.Session(),
rules=[Rule.sagemaker(rule_configs.create_xgboost_report())]
)
xgb.set_hyperparameters(
max_depth = 5,
eta = 0.2,
gamma = 4,
min_child_weight = 6,
subsample = 0.7,
objective = "reg:squarederror",
num_round = 1000
)
hyperparameter_ranges = {
"eta": ContinuousParameter(0, 1),
"min_child_weight": ContinuousParameter(1, 10),
"alpha": ContinuousParameter(0, 2),
"max_depth": IntegerParameter(1, 10),
}
objective_metric_name = "mse"
metric_definitions = [{"Name": "mse", "Regex": "mse: ([0-9\\.]+)"}]
tuner = HyperparameterTuner(estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions=None,
strategy='Bayesian',
objective_type='Maximize',
max_jobs=1,
max_parallel_jobs=1,
tags=None,
base_tuning_job_name=None)
#Tune
tuner.fit({
"train":s3_input_train,
"validation":s3_input_validation
},include_cls_metadata=False)
#Explore the best model generated
tuning_job_result = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)
job_count = tuning_job_result["TrainingJobStatusCounters"]["Completed"]
print("%d training jobs have completed" %job_count)
#10 training jobs have completed
#Get the best training job
from pprint import pprint
if tuning_job_result.get("BestTrainingJob",None):
print("Best Model found so far:")
pprint(tuning_job_result["BestTrainingJob"])
else:
print("No training jobs have reported results yet.")
For built-in algorithms, you simply need to use a metric that's already set as an objective metric for tuning, instead of defining a metric like here -
objective_metric_name = "mse"
metric_definitions = [{"Name": "mse", "Regex": "mse: ([0-9\\.]+)"}]
Here's a list of metrics supported by the XGBoost algorithm. You can choose one of these, for example, validation:mse and specify them as your objective metric.
I am trying to use my own ML models for creating trainings job in aws Sagemaker. When I start training process everything goes well but at the end it says that "AttributeError: Can't pickle local object 'train..create_model'". I am new into this job. I did the same things for mlp, knn, cart, and svr but never encountered with that issue. I know that lstm uses too much different things to create model but I can not figure out how to solve that issue.
Here is my train.py file where I get the error:
from __future__ import print_function
import json
import os
import pickle
import sys
import traceback
import pandas as pd
import numpy as np
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
# These are the paths to where SageMaker mounts interesting things in your container.
prefix = "/opt/ml/"
input_path = prefix + "input/data"
output_path = os.path.join(prefix, "output")
model_path = os.path.join(prefix, "model")
# This algorithm has a single channel of input data called 'training'. Since we run in
# File mode, the input files are copied to the directory specified here.
channel_name = "training"
training_path = os.path.join(input_path, channel_name)
# The function to execute training.
def train():
print("Starting the training")
print(training_path)
try:
# Take the set of files and read them all into a single pandas dataframe
input_files = [ os.path.join(training_path, file) for file in os.listdir(training_path) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(training_path, channel_name))
raw_data = [ pd.read_csv(file, header=0, index_col=0) for file in input_files ]
data = pd.concat(raw_data)
print(data)
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
values = data.values
# ensure all data is float
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler()
scaled = scaler.fit_transform(values)
# specify the number of lag time steps
n_timesteps = 3
n_features = 4
# frame as supervised learning
reframed = series_to_supervised(scaled, n_timesteps, 1)
print(reframed.shape)
# drop columns we don't want to predict
reframed.drop(reframed.columns[[4,9,14,15,16,17,18]], axis=1, inplace=True)
print(reframed.head())
# split into train and test sets
values = reframed.values
n_train_size = 403
train = values[:n_train_size, :]
test = values[n_train_size:, :]
# split into input and outputs
n_obs = n_timesteps * n_features
train_X, train_y = train[:, :n_obs], train[:, -1]
test_X, test_y = test[:, :n_obs], test[:, -1]
print(train_X.shape, len(train_X), train_y.shape)
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], n_timesteps, n_features))
test_X = test_X.reshape((test_X.shape[0], n_timesteps, n_features))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# Function to create model
def create_model():
# create model
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
# Compile model
# optimizer = SGD(lr=learn_rate, momentum=momentum)
model.compile(loss='mae',optimizer='adam')
return model
from scikeras.wrappers import KerasRegressor
# create model
model = KerasRegressor(model=create_model, verbose=0)
from sklearn.model_selection import GridSearchCV
# define the grid search parameters
batch_size = [2,4,8,16,32]
epochs = [10, 50, 100]
#learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3]
#momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
grid_result = grid.fit(train_X, train_y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# save the model
with open(os.path.join(model_path, "snop-lstm.pkl"), "wb") as out:
pickle.dump(grid, out)
print("Training complete.")
except Exception as e:
# Write out an error file. This will be returned as the failureReason in the
# DescribeTrainingJob result.
trc = traceback.format_exc()
with open(os.path.join(output_path, "failure"), "w") as s:
s.write("Exception during training: " + str(e) + "\n" + trc)
# Printing this causes the exception to be in the training job logs, as well.
print("Exception during training: " + str(e) + "\n" + trc, file=sys.stderr)
# A non-zero exit code causes the training job to be marked as Failed.
sys.exit(255)
if __name__ == "__main__":
train()
# A zero exit code causes the job to be marked a Succeeded.
sys.exit(0)
And this is the log:
2022-02-25T10:28:16.751+03:00
Exception during training: Can't pickle local object 'train.<locals>.create_model'
Exception during training: Can't pickle local object 'train.<locals>.create_model'
2022-02-25T10:28:16.751+03:00
Traceback (most recent call last):
File "/opt/program/train", line 154, in train
pickle.dump(grid, out)
Traceback (most recent call last): File "/opt/program/train", line 154, in train pickle.dump(grid, out)
2022-02-25T10:28:16.751+03:00
AttributeError: Can't pickle local object 'train.<locals>.create_model'
AttributeError: Can't pickle local object 'train.<locals>.create_model'
It seems that you are trying to pickle an object of class GridSearchCV instead of the model itself:
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
...
...
pickle.dump(grid, out)
I think what you want instead is to retrieve the best model (via best_model_, see here: https://github.com/scikit-learn/scikit-learn/blob/37ac6788c/sklearn/model_selection/_search.py#L1247) and then pickle that model
I have an inference code in TensorRT(with python). I want to run this code in ROS but I get the below error when trying to allocate buffer:
LogicError: explicit_context_dependent failed: invalid device context - no currently active context?
The code works well out of the ROS package. A ROS node publishes an image and the given code get the image to do inference. The inference code is shown below:
#!/usr/bin/env python
# Revision $Id$
import rospy
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import os
import numpy as np
import argparse
import torch
from torch.autograd import Variable
from torchvision import transforms
import torch.nn.functional as F
import torch._utils
from PIL import Image
from sensor_msgs.msg import Image as ImageMsg
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import random
import sys
import common
import shutil
from itertools import chain
TRT_LOGGER = trt.Logger()
# cuda.init()
class ModelData(object):
def __init__(self):
self.MODEL_PATH = "./MobileNet_v2_Final.onnx" ## converted model from pytorch to onnx
self.batch_size = 1
self.num_classes = 3
self.engine = build_int8_engine(self.MODEL_PATH, self.batch_size)
self.context = self.engine.create_execution_context()
### ROS PART
self.bridge_ROS = CvBridge()
self.loop_rate = rospy.Rate(1)
self.pub = rospy.Publisher('Image_Label', String, queue_size=1)
print('INIT Successfully')
def callback(self, msg):
rospy.loginfo('Image received...')
cv_image = self.bridge_ROS.imgmsg_to_cv2(msg, desired_encoding="passthrough")
inputs, outputs, bindings, stream = common.allocate_buffers(context.engine)
[output] = common.do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream, batch_size=effective_batch_size)
def listener(self):
rospy.Subscriber("chatter", ImageMsg, self.callback)
while not rospy.is_shutdown():
rospy.loginfo('Getting image...')
self.loop_rate.sleep()
def build_int8_engine(model_file, batch_size=32):
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_batch_size = batch_size
builder.max_workspace_size = common.GiB(1)
with open(model_file, 'rb') as model:
parser.parse(model.read(),)
return builder.build_cuda_engine(network)
if __name__ == '__main__':
rospy.init_node("listener", anonymous=True)
infer = ModelData()
infer.listener()
The error comes from the below class in stream = cuda.Stream():
#!/usr/bin/env python
# Revision $Id$
from itertools import chain
import argparse
import os
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import tensorrt as trt
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
ctx.pop()
del ctx
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# [cuda.memcpy_htod(inp.device, inp.host) for inp in inputs]
# Run inference.
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# context.execute(batch_size=batch_size, bindings=bindings)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# [cuda.memcpy_dtoh(out.host, out.device) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
More info:
TensorRT: 6.1.5
Python: 2.7
rosversion: 1.14.3
rosdistro: melodic
You need to explicitly create Cuda Device and load Cuda Context in the worker thread i.e. your callback function, instead of using import pycuda.autoinit in the main thread, as follows
import pycuda.driver as cuda
import threading
def callback():
cuda.init()
device = cuda.Device(0) # enter your Gpu id here
ctx = device.make_context()
allocate_buffers() # load Cuda buffers or any other Cuda or TenosrRT operations
ctx.pop() # very important
if __name__ == "__main__":
worker_thread = threading.Thread(target=callback())
worker_thread.start()
worker_thread.join()
Note: do not forget to remove import pycuda.autoinit in both modules
This is also discussed in a question here
please init cuda.
As answers above.
import pycuda.driver as cuda in main.py or befor import cuda-XXX-process
I attempting to deploy the universal-sentence-encoder model to a aws Sagemaker endpoint and am getting the error raise ValueError('no SavedModel bundles found!')
I have shown my code below, I have a feeling that one of my paths is incorrect
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
from sagemaker import get_execution_role
from sagemaker.tensorflow.serving import Model
def tfhub_to_savedmodel(model_name,uri):
tfhub_uri = uri
model_path = 'encoder_model/' + model_name
with tf.Session(graph=tf.Graph()) as sess:
module = hub.Module(tfhub_uri)
input_params = module.get_input_info_dict()
dtype = input_params['text'].dtype
shape = input_params['text'].get_shape()
# define the model inputs
inputs = {'text': tf.placeholder(dtype, shape, 'text')}
# define the model outputs
# we want the class ids and probabilities for the top 3 classes
logits = module(inputs['text'])
outputs = {
'vector': logits,
}
# export the model
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
tf.saved_model.simple_save(
sess,
model_path,
inputs=inputs,
outputs=outputs)
return model_path
sagemaker_role = get_execution_role()
!tar -C "$PWD" -czf encoder.tar.gz encoder_model/
model_data = Session().upload_data(path='encoder.tar.gz',key_prefix='model')
env = {'SAGEMAKER_TFS_DEFAULT_MODEL_NAME': 'universal-sentence-encoder-large'}
model = Model(model_data=model_data, role=sagemaker_role, framework_version=1.12, env=env)
predictor = model.deploy(initial_instance_count=1, instance_type='ml.t2.medium')
I suppose you started from this example? https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-python-sdk/tensorflow_serving_container
It looks like you're not saving the TF Serving bundle properly: the model version number is missing, because of this line:
model_path = 'encoder_model/' + model_name
Replacing it with this should fix your problem:
model_path = '{}/{}/00000001'.format('encoder_model/', model_name)
Your model artefact should look like this (I used the model in the notebook above):
mobilenet/
mobilenet/mobilenet_v2_140_224/
mobilenet/mobilenet_v2_140_224/00000001/
mobilenet/mobilenet_v2_140_224/00000001/saved_model.pb
mobilenet/mobilenet_v2_140_224/00000001/variables/
mobilenet/mobilenet_v2_140_224/00000001/variables/variables.data-00000-of-00001
mobilenet/mobilenet_v2_140_224/00000001/variables/variables.index
Then, upload to S3 and deploy.
i have restored my trained tensorflow model and then i want detect input image results.
here restore code:
import tensorflow as tf
import cv2
sess=tf.Session()
image_size=128
saver = tf.train.import_meta_graph('my_test_model-1000.meta')
saver.restore(sess,tf.train.latest_checkpoint('./'))
sess.run(tf.global_variables_initializer())
then how can i predict 128*128*3 rgb input image with this model?
import tensorflow as tf
import cv2
def model(inputs):
# You define your model over here
...
...
return logtis
image = cv2.imread("image_path.jpg")
ip_tensor = tf.placeholder(tf.float32, (None, 128, 128, 3))
logits = model(ip_tensor)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('./')) # restore your model
feed={ip_tensor: inputs} # prepare your feed to the saved model
predictions = sess.run(tf.argmax(logits, 1), feed_dict=feed) # make prediction