I have trained pytorch model. I am trying to import it in C++. I have followed the steps mentioned in Pytorch website for this but i am unable to do so. Can anyone please tell me what should i do? I am using this piece of neural network.
class MLP(nn.Module):
def __init__(self, layers):
super(MLP,self).__init__()
'activation function'
self.activation = nn.Tanh()
'loss function'
self.loss_function = nn.MSELoss(reduction ='mean')
'Initialise neural network as a list using nn.Modulelist'
self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1)])
'Xavier Normal Initialization'
for i in range(len(layers)-1):
nn.init.xavier_normal_(self.linears[i].weight.data, gain=1.0)
# set biases to zero
nn.init.zeros_(self.linears[i].bias.data)
'forward pass'
def forward(self,x):
x = (x-l_b)/(u_b-l_b)
x=x.float()
for i in range(len(layers)-2):
z = self.linears[i](x)
x = self.activation(z)
x = self.linears[-1](x)
return x
def loss_bc_init(self,x,y):
loss_u = self.loss_function(self.forward(x), y)
return loss_u
Please help.
Related
I am begginer in NLP Transformers.
I am facing this issue while deploying model using Django framework.Locally model is working fine but not when deployed.
Here I am importing BERT model which was trained and saved using pytorch same procedure i follow to load the model but before I am defining the architecture which was defined during model training.
But facing the issue after deploying the model.
AttributeError: Can't get attribute 'BERT_Arch' on <module 'main' from '/home/ubuntu/kc_env/bin/gunicorn'>
I tried couple of things:
Like defining the BERT Architecture before model loading:
####Utils.py
from django.apps import AppConfig
# import torch
import torch.nn as nn
class BERT_Arch(nn.Module):
def __init__(self):
super(BERT_Arch, self).__init__()
# dropout layer
self.dropout = nn.Dropout(0.2)
# relu activation function
self.relu = nn.ReLU()
# dense layer
self.fc1 = nn.Linear(768,512)
self.fc2 = nn.Linear(512,256)
self.fc3 = nn.Linear(256,3)
#softmax activation function
self.softmax = nn.LogSoftmax(dim=1)
#define the forward pass
def forward(self, sent_id, mask):
cls_hs = self.bert(sent_id, attention_mask=mask)[0][:,0]
x = self.fc1(cls_hs)
x = self.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.relu(x)
x = self.dropout(x)
# output layer
x = self.fc3(x)
# apply softmax activation
x = self.softmax(x)
return x
###main.py
from .utils import BERT_Arch
model=BERT_Arch()
def func():
model=torch.load('Path to load model.pt')
I've launched a training on CelebA dataset for a binary classification with PyTorch, in Sagemaker Studio.
I've made sure all, model, tensors are sent to cuda().
My image dataset is in S3, and I'm accessing it via this import and code:
from PIL import Image
import s3fs
fs = s3fs.S3FileSystem()
# example
f = fs.open(f's3://aoha-bucket/img_celeba/dataset/000001.jpg')
And of course my PyTorch DataLoader class, which is using of s3fs to load data into DataLoaders.
class myDataset(Dataset):
def __init__(self, csv_file, root_dir, target, length, adv = None, transform=None):
self.annotations = pd.read_csv(csv_file).iloc[:length,:]
self.root_dir = root_dir
self.transform = transform
self.target = target
self.length = length
self.adv = adv
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
img_path = fs.open(os.path.join(self.root_dir, self.annotations.loc[index, 'image_id']))
image = Image.open(img_path)
image = np.array(image)
if self.transform:
image = self.transform(image=image)["image"]
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
image = torch.Tensor(image)
y_label = torch.tensor(int(self.annotations.loc[index, str(self.target)]))
if self.adv is None:
return image, y_label
if self.adv :
z_label = torch.tensor(int(self.annotations.loc[index, 'origin']))
return image, y_label, z_label
when I run this function, I get True:
next(model.parameters()).is_cuda
My issue is that, I don't know the training is too slow, even slower than my local CPU (not that powerful). It says for example, that one epoch is needing 1h45minutes, which is way too much.
Im' using a GPU optimized PyTorch instance of Studio.
Have you ever launched a training on GPU in Sagemaker using PyTorch ?
Could you please help ?
Thank you very much,
Habib
I am using the DQN for a resource allocation where the agent should assign the arrival requests to the best Virtual Machine.
I am modifying a Cartpole code as follow:
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import os
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_decay = 0.995
self.epsilon_min = 0.01
self.learning_rate = 0.001
self.model = self._build_model()
def _build_model(self):
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0])
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = (reward + self.gamma * np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
The Cartpole states as the inputs of the Q network are given by the environment.
0 Cart Position
1 Cart Velocity -Inf Inf
2 Pole Angle ~ -41.8° ~ 41.8°
3 Pole Velocity At Tip
The question is that in my code what are the inputs of the Q network?
Since the agent should take the best possible action based on the size of the arrival request but this is not given by the environment. Shall I feed the Q network by this input value, the size?
The inputs of the Deep Q-Network architecture is fed by the replay memory, in the following part of the code:
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
The dynamic of this system as shown in the original paper Deepmind paper, is that you interact with the system, store the transition in the replay memory, and then use it for the training step. In the lines above you're storing these experiences.
Basically, the input of the network is the states and outputs the Q-values. In your code, there's no interaction with the environment, that's when you can get these transitions (experiences) to feed the replay memory. So, if you can't extract some information in the environment to be represented as states, you're not able to make assumptions about that.
After having correctly deployed our model, I need to invoke it via lambda function. The script features two cleaning function, the first one (cleaning()) gives us 5 variables: the cleaned dataset and 4 other variables (scaler, monthdummies, compadummies, parceldummies) that we need to use in the second cleaning function (cleaning_test()).
The reason behind this is that in the use case I'll have only one instance at a time to perform predictions on, not an entire dataset. This means that I pass the row to the first cleaning() function since some commands won't work. I can't also use a scaler and neither create dummy variables, so the aim is to import the scaler and some dummies used in the cleaning() function, since they come from the whole dataset, that I used to train the model.
Hence, in the input_fn() function, the input needs to be cleaned using the cleaning_test() function, that requires the scaler and the three lists of dummies from the cleaning() one.
When I train the model, the cleaning() function works fine, but after the deployment, if we invoke the endpoint, it raises the error that variable "scaler" is not defined.
Below is the script.py:
Note that the test is # since I've already tested it, so now I'm training on the whole dataset and I want to predict completely new instances
def cleaning(data):
some cleaning on data stored in s3
return cleaned_data, scaler, monthdummies, compadummies, parceldummies
def cleaning_test(data, scaler, monthdummies, compadummies, parceldummies):
cleaning on data without labels
return cleaned_data
def model_fn(model_dir):
clf = joblib.load(os.path.join(model_dir, "model.joblib"))
return clf
def input_fn(request_body, request_content_type):
if request_content_type == "application/json":
data = json.loads(request_body)
df = pd.DataFrame(data, index = [0])
input_data = cleaning_test(df, scaler, monthdummies, compadummies, parceldummies)
else:
pass
return input_data
def predict_fn(input_data, model):
return model.predict_proba(input_data)
if __name__ =='__main__':
print('extracting arguments')
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument('--n_estimators', type=int, default=10)
parser.add_argument('--min-samples-leaf', type=int, default=3)
# Data, model, and output directories
parser.add_argument('--model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
#parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
parser.add_argument('--train-file', type=str, default='fp_train.csv')
#parser.add_argument('--test-file', type=str, default='fp_test.csv')
args, _ = parser.parse_known_args()
print('reading data')
train_df = pd.read_csv(os.path.join(args.train, args.train_file))
#test_df = pd.read_csv(os.path.join(args.test, args.test_file))
print("cleaning")
train_df, scaler, monthdummies, compadummies, parceldummies = cleaning(train_df)
#test_df, scaler1, monthdummies1, compadummies1, parceldummies1 = cleaning(test_df)
print("splitting")
y = train_df.loc[:,"event"]
X = train_df.loc[:, train_df.columns != 'event']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
"""print('building training and testing datasets')
X_train = train_df.loc[:, train_df.columns != 'event']
X_test = test_df.loc[:, test_df.columns != 'event']
y_train = train_df.loc[:,"event"]
y_test = test_df.loc[:,"event"]"""
print(X_train.columns)
print(X_test.columns)
# train
print('training model')
model = RandomForestClassifier(
n_estimators=args.n_estimators,
min_samples_leaf=args.min_samples_leaf,
n_jobs=-1)
model.fit(X_train, y_train)
# print abs error
print('validating model')
proba = model.predict_proba(X_test)
# persist model
path = os.path.join(args.model_dir, "model.joblib")
joblib.dump(model, path)
print('model persisted at ' + path)
That I run through:
sklearn_estimator = SKLearn(
entry_point='script.py',
role = get_execution_role(),
train_instance_count=1,
train_instance_type='ml.c5.xlarge',
framework_version='0.20.0',
base_job_name='rf-scikit',
hyperparameters = {'n_estimators': 15})
sklearn_estimator.fit({'train':trainpath})
sklearn_estimator.latest_training_job.wait(logs='None')
artifact = sm_boto3.describe_training_job(
TrainingJobName=sklearn_estimator.latest_training_job.name)['ModelArtifacts']['S3ModelArtifacts']
predictor = sklearn_estimator.deploy(
instance_type='ml.c5.large',
initial_instance_count=1)
The question is, how can I "store" the variables given by the cleaning() function during the training process, in order to use them in the input_fn() function, making cleaning_test() work fine?
Thanks!
I am trying to infer the generator of a continuous markov process observed at discrete intervals. If the generator of the markov process is $T$, then the stochastic matrix for the discrete time intervals is given by $ P = \exp(T \Delta t)$. To implement this using pymc, I wrote the custom distribution class
import pymc3
from pymc3.distributions import Discrete
from pymc3.distributions.dist_math import bound
class ContinuousMarkovChain(Discrete):
def __init__(self, t10=None, t01=None, dt=None, *args, **kwargs):
super(ContinuousMarkovChain, self).__init__(*args, **kwargs)
# self.p = p
# self.q = q
self.p = tt.slicetype
self.gt0 = (t01 >0) & (t10> 0)
T = tt.stacklists([[-t01, t01], [t10,-t10]])
self.p = ts.expm(T*dt)
def logp(self, x):
return bound(tt.log(self.p[x[:-1],x[1:]]).sum(), self.gt0)
I can use find_MAP and the Slice sampler with this class, but it fails with NUTS. The error message is:
AttributeError: 'ExpmGrad' object has no attribute 'grad'
I thought that NUTS only needed information about the gradient, so why is it trying to take the Hessian of expm?
I thought Pymc3 needs Hessian in parameter space to set the step-size and directionality for the parameters when using NUTS algorithm. Maybe you can define the grad of ExpmGrad yourself.
A relative discussing is here https://github.com/pymc-devs/pymc3/issues/1226