Related
I am trying to implement below architecture and not sure in applying gradient tape properly.
In the above architecture we can see, outputs taken from multiple layers in the blue boxes. Each blue box is termed as loss branch in the paper which contains two losses namely cross entropy and l2 loss. I wrote architecture in tensorflow 2 and using gradient tape for custom training purpose. One thing I am not sure is how should I update the losses using gradient tape.
I have two queries,
How am I supposed to use gradient tape for multiple losses in this scenario. I am interested in seeing code!
For instance, consider the 3rd blue box(3rd loss branch) in the above image, where we will take inputs from conv 13 layer and get two outputs, one for classification and other for regression.
So after computing the losses how I am supposed to update the weights, should I update all the layers above(from conv 1 to conv 13) or should I only update the layers weights which fetched me conv 13 (conv 11, 12 and 13).
I am also attaching a link where I posted a question yesterday in detail.
Below is the snippet which I have tried for gradient descent. Please correct me if I am wrong.
images = batch.data[0]
images = (images - 127.5) / 127.5
targets = batch.label
with tensorflow.GradientTape() as tape:
outputs = self.net(images)
loss = self.loss_criterion(outputs, targets)
self.scheduler(i, self.optimizer)
grads = tape.gradient(loss, self.net.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.net.trainable_variables))
Below is the code for custom loss function which is used as loss_criterion above.
losses = []
for i in range(self.num_output_scales):
pred_score = outputs[i * 2]
pred_bbox = outputs[i * 2 + 1]
gt_mask = targets[i * 2]
gt_label = targets[i * 2 + 1]
pred_score_softmax = tensorflow.nn.softmax(pred_score, axis=1)
loss_mask = tensorflow.ones(pred_score_softmax.shape, tensorflow.float32)
if self.hnm_ratio > 0:
pos_flag = (gt_label[:, 0, :, :] > 0.5)
pos_num = tensorflow.math.reduce_sum(tensorflow.cast(pos_flag, dtype=tensorflow.float32))
if pos_num > 0:
neg_flag = (gt_label[:, 1, :, :] > 0.5)
neg_num = tensorflow.math.reduce_sum(tensorflow.cast(neg_flag, dtype=tensorflow.float32))
neg_num_selected = min(int(self.hnm_ratio * pos_num), int(neg_num))
neg_prob = tensorflow.where(neg_flag, pred_score_softmax[:, 1, :, :], \
tensorflow.zeros_like(pred_score_softmax[:, 1, :, :]))
neg_prob_sort = tensorflow.sort(tensorflow.reshape(neg_prob, shape=(1, -1)), direction='ASCENDING')
prob_threshold = neg_prob_sort[0][int(neg_num_selected)]
neg_grad_flag = (neg_prob <= prob_threshold)
loss_mask = tensorflow.concat([tensorflow.expand_dims(pos_flag, axis=1),
tensorflow.expand_dims(neg_grad_flag, axis=1)], axis=1)
else:
neg_choice_ratio = 0.1
neg_num_selected = int(tensorflow.cast(tensorflow.size(pred_score_softmax[:, 1, :, :]), dtype=tensorflow.float32) * 0.1)
neg_prob = pred_score_softmax[:, 1, :, :]
neg_prob_sort = tensorflow.sort(tensorflow.reshape(neg_prob, shape=(1, -1)), direction='ASCENDING')
prob_threshold = neg_prob_sort[0][int(neg_num_selected)]
neg_grad_flag = (neg_prob <= prob_threshold)
loss_mask = tensorflow.concat([tensorflow.expand_dims(pos_flag, axis=1),
tensorflow.expand_dims(neg_grad_flag, axis=1)], axis=1)
pred_score_softmax_masked = tensorflow.where(loss_mask, pred_score_softmax,
tensorflow.zeros_like(pred_score_softmax, dtype=tensorflow.float32))
pred_score_log = tensorflow.math.log(pred_score_softmax_masked)
score_cross_entropy = - tensorflow.where(loss_mask, gt_label[:, :2, :, :],
tensorflow.zeros_like(gt_label[:, :2, :, :], dtype=tensorflow.float32)) * pred_score_log
loss_score = tensorflow.math.reduce_sum(score_cross_entropy) /
tensorflow.cast(tensorflow.size(score_cross_entropy), tensorflow.float32)
mask_bbox = gt_mask[:, 2:6, :, :]
predict_bbox = pred_bbox * mask_bbox
label_bbox = gt_label[:, 2:6, :, :] * mask_bbox
# l2 loss of boxes
# loss_bbox = tensorflow.math.reduce_sum(tensorflow.nn.l2_loss((label_bbox - predict_bbox)) ** 2) / 2
loss_bbox = mse(label_bbox, predict_bbox) / tensorflow.math.reduce_sum(mask_bbox)
# Adding only losses relevant to a branch and sending them for back prop
losses.append(loss_score + loss_bbox)
# losses.append(loss_bbox)
# Adding all losses and sending to back prop Approach 1
# loss_cls += loss_score
# loss_reg += loss_bbox
# loss_branch.append(loss_score)
# loss_branch.append(loss_bbox)
# loss = loss_cls + loss_reg
return losses
I am not getting any error but my losses aren't minimizing. Here is the log for my training.
Someone please help me in fixing this.
I training my model on MNIST dataset, use Google Colab for GPU purpose
device is cuda still getting error, I have tried other solution it does not work as well as code working fine in local pc, Is anything colab required different?
I have done previously training on the aws and there was no issue with the code
epoch = 22
steps = 0
print_every_step = 5
total_train_loss, total_test_loss = [], []
for e in range(epoch):
train_loss = 0
test_loss = 0
accuracy = 0
for images, labels in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
steps += 1
images, labels = images.to(device), labels.to(device)
# forward pass: compute predicted outputs by passing inputs to the model
log_ps = model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
if(steps % print_every_step == 0 ):
model.eval()
with torch.no_grad():
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
log_ps = model(images)
loss = criterion(log_ps, labels)
test_loss += loss.item()
#calculate accuracy
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {train_loss/print_every_step:.3f}.. "
f"Test loss: {test_loss/len(test_loader):.3f}.. "
f"Test accuracy: {accuracy/len(testl_oader):.3f}")
model.train()
total_train_loss.append(train_loss/print_every_step)
total_test_loss.append(test_loss/len(testloader))
I am training a neural network which tries to classify a traffic signs, but it takes too much time to train only one epoch, maybe 30+ mins for just one epoch, I have set the batch size to 64 and the learning rate to be 0.002, the input is 20x20 pixels with 3 channels, and the model summary shows that it is training 173,931 parameters, is that too much or good?
Here is the network architecture
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
class Network(nn.Module):
def __init__(self):
super(Network,self).__init__()
#Convolutional Layers
self.conv1 = nn.Conv2d(3,16,3,padding=1)
self.conv2 = nn.Conv2d(16,32,3,padding=1)
#Max Pooling Layers
self.pool = nn.MaxPool2d(2,2)
#Linear Fully connected layers
self.fc1 = nn.Linear(32*5*5,200)
self.fc2 = nn.Linear(200,43)
#Dropout
self.dropout = nn.Dropout(p=0.25)
def forward(self,x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1,32*5*5)
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
Here is the optimizer instance
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optim = optim.SGD(model.parameters(),lr = 0.002)
Here is the training code
epochs = 20
valid_loss_min = np.Inf
print("Training the network")
for epoch in range (1,epochs+1):
train_loss = 0
valid_loss = 0
model.train()
for data,target in train_data:
if gpu_available:
data,target = data.cuda(),target.cuda()
optim.zero_grad()
output = model(data)
loss = criterion(output,target)
loss.backward()
optim.step()
train_loss += loss.item()*data.size(0)
#########################
###### Validate #########
model.eval()
for data,target in valid_data:
if gpu_available:
data,target = data.cuda(),target.cuda()
output = model(data)
loss = criterion(output,target)
valid_loss += loss.item()*data.size(0)
train_loss = train_loss/len(train_data.dataset)
valid_loss = train/len(valid_data.dataset)
print("Epoch {}.....Train Loss = {:.6f}....Valid Loss = {:.6f}".format(epoch,train_loss,valid_loss))
if valid_loss < valid_loss_min:
torch.save(model.state_dict(), 'model_traffic.pt')
print("Valid Loss min {:.6f} >>> {:.6f}".format(valid_loss_min, valid_loss))
I am using GPU through google colab
I am trying to use GPUs to accelerate convolution and pooling operations in my neural network application(Spiking networks). I wrote a small script to see how much speedup I can get by using Tensorflow. Surprisingly, SciPy/Numpy does better. In my application, all the inputs(images) are stored on the disk but for an example, I created a randomly initialized image of size 27x27 and weights kernel of size 5x5x30, i made sure that I am not transferring anything from CPU to GPU and I also increased the input image size to 270x270 and the weights kernel to 7x7x30, still I don't see any improvement. I made sure that all the TF methods are in fact being executed on my GPUs by setting
sess =tf.Session(config=tf.ConfigProto(log_device_placement=True))
I have access to 2 GPUs(Tesla K20m) on a cluster.
Here's my code:
import tensorflow as tf
import numpy as np
from scipy import signal
import time
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
image_size = 27
kernel_size = 5
nofMaps = 30
def convolution(Image, weights):
in_channels = 1 # 1 because our image has 1 units in the -z direction.
out_channels = weights.shape[-1]
strides_1d = [1, 1, 1, 1]
#in_2d = tf.constant(Image, dtype=tf.float32)
in_2d = Image
#filter_3d = tf.constant(weights, dtype=tf.float32)
filter_3d =weights
in_width = int(in_2d.shape[0])
in_height = int(in_2d.shape[1])
filter_width = int(filter_3d.shape[0])
filter_height = int(filter_3d.shape[1])
input_4d = tf.reshape(in_2d, [1, in_height, in_width, in_channels])
kernel_4d = tf.reshape(filter_3d, [filter_height, filter_width, in_channels, out_channels])
inter = tf.nn.conv2d(input_4d, kernel_4d, strides=strides_1d, padding='VALID')
output_3d = tf.squeeze(inter)
output_3d= sess.run(output_3d)
return output_3d
def pooling(Image):
in_channels = Image.shape[-1]
Image_3d = tf.constant(Image, dtype = tf.float32)
in_width = int(Image.shape[0])
in_height = int(Image.shape[1])
Image_4d = tf.reshape(Image_3d,[1,in_width,in_height,in_channels])
pooled_pots4d = tf.layers.max_pooling2d(inputs=Image_4d, pool_size=[2, 2], strides=2)
pooled_pots3d = tf.squeeze(pooled_pots4d)
return sess.run(pooled_pots3d)
t1 = time.time()
#with tf.device('/device:GPU:1'):
Image = tf.random_uniform([image_size, image_size], name='Image')
weights = tf.random_uniform([kernel_size,kernel_size,nofMaps], name='Weights')
conv_result = convolution(Image,weights)
pool_result = pooling(conv_result)
print('Time taken:{}'.format(time.time()-t1))
#with tf.device('/device:CPU:0'):
print('Pool_result shape:{}'.format(pool_result.shape))
#print('first map of pool result:\n',pool_result[:,:,0])
def scipy_convolution(Image,weights):
instant_conv1_pots = np.zeros((image_size-kernel_size+1,image_size-kernel_size+1,nofMaps))
for i in range(weights.shape[-1]):
instant_conv1_pots[:,:,i]=signal.correlate(Image,weights[:,:,i],mode='valid',method='fft')
return instant_conv1_pots
def scipy_pooling(conv1_spikes):
'''
Reshape splitting each of the two axes into two each such that the
latter of the split axes is of the same length as the block size.
This would give us a 4D array. Then, perform maximum finding along those
latter axes, which would be the second and fourth axes in that 4D array.
https://stackoverflow.com/questions/41813722/numpy-array-reshaped-but-how-to-change-axis-for-pooling
'''
if(conv1_spikes.shape[0]%2!=0): #if array is odd size then omit the last row and col
conv1_spikes = conv1_spikes[0:-1,0:-1,:]
else:
conv1_spikes = conv1_spikes
m,n = conv1_spikes[:,:,0].shape
o = conv1_spikes.shape[-1]
pool1_spikes = np.zeros((m/2,n/2,o))
for i in range(o):
pool1_spikes[:,:,i]=conv1_spikes[:,:,i].reshape(m/2,2,n/2,2).max(axis=(1,3))
return pool1_spikes
t1 = time.time()
Image = np.random.rand(image_size,image_size)
weights = np.random.rand(kernel_size,kernel_size,nofMaps)
conv_result = scipy_convolution(Image,weights)
pool_result = scipy_pooling(conv_result)
print('Time taken:{}'.format(time.time()-t1))
print('Pool_result shape:{}'.format(pool_result.shape))
#print('first map of pool result:\n',pool_result[:,:,0])
~
Results are as follows:
Time taken:0.746644973755
Pool_result shape:(11, 11, 30)
Time taken:0.0127348899841
Pool_result shape:(11, 11, 30)
With suggestions from the commenter, I set image_size=270 and enclosed both convolution and pool functions in a for loop, now, TF performs better than SciPy note that I am using tf.nn.conv2d and NOT the tf.layers.conv2d. I also set the parameter use_cudnn_on_gpu=True in tf.nn.conv2d but that didn't hurt or help.
Here's the code:
import tensorflow as tf
import numpy as np
from scipy import signal
import time
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
image_size = 270
kernel_size = 5
nofMaps = 30
def convolution(Image, weights):
in_channels = 1 # 1 because our image has 1 units in the -z direction.
out_channels = weights.shape[-1]
strides_1d = [1, 1, 1, 1]
#in_2d = tf.constant(Image, dtype=tf.float32)
in_2d = Image
#filter_3d = tf.constant(weights, dtype=tf.float32)
filter_3d =weights
in_width = int(in_2d.shape[0])
in_height = int(in_2d.shape[1])
filter_width = int(filter_3d.shape[0])
filter_height = int(filter_3d.shape[1])
input_4d = tf.reshape(in_2d, [1, in_height, in_width, in_channels])
kernel_4d = tf.reshape(filter_3d, [filter_height, filter_width, in_channels, out_channels])
inter = tf.nn.conv2d(input_4d, kernel_4d, strides=strides_1d, padding='VALID',use_cudnn_on_gpu=True)
output_3d = tf.squeeze(inter)
#t1 = time.time()
output_3d= sess.run(output_3d)
#print('TF Time for Conv:{}'.format(time.time()-t1))
return output_3d
def pooling(Image):
in_channels = Image.shape[-1]
Image_3d = tf.constant(Image, dtype = tf.float32)
in_width = int(Image.shape[0])
in_height = int(Image.shape[1])
Image_4d = tf.reshape(Image_3d,[1,in_width,in_height,in_channels])
pooled_pots4d = tf.layers.max_pooling2d(inputs=Image_4d, pool_size=[2, 2], strides=2)
pooled_pots3d = tf.squeeze(pooled_pots4d)
#t1 = time.time()
pool_res = sess.run(pooled_pots3d)
#print('TF Time for Pool:{}'.format(time.time()-t1))
return pool_res
#with tf.device('/device:GPU:1'):
Image = tf.random_uniform([image_size, image_size], name='Image')
weights = tf.random_uniform([kernel_size,kernel_size,nofMaps], name='Weights')
#init = tf.global_variables_initializer
#sess.run(init)
t1 = time.time()
for i in range(150):
#t1 = time.time()
conv_result = convolution(Image,weights)
pool_result = pooling(conv_result)
#print('TF Time taken:{}'.format(time.time()-t1))
print('TF Time taken:{}'.format(time.time()-t1))
#with tf.device('/device:CPU:0'):
print('TF Pool_result shape:{}'.format(pool_result.shape))
#print('first map of pool result:\n',pool_result[:,:,0])
def scipy_convolution(Image,weights):
instant_conv1_pots = np.zeros((image_size-kernel_size+1,image_size-kernel_size+1,nofMaps))
for i in range(weights.shape[-1]):
instant_conv1_pots[:,:,i]=signal.correlate(Image,weights[:,:,i],mode='valid',method='fft')
return instant_conv1_pots
def scipy_pooling(conv1_spikes):
'''
Reshape splitting each of the two axes into two each such that the
latter of the split axes is of the same length as the block size.
This would give us a 4D array. Then, perform maximum finding along those
latter axes, which would be the second and fourth axes in that 4D array.
https://stackoverflow.com/questions/41813722/numpy-array-reshaped-but-how-to-change-axis-for-pooling
'''
if(conv1_spikes.shape[0]%2!=0): #if array is odd size then omit the last row and col
conv1_spikes = conv1_spikes[0:-1,0:-1,:]
else:
conv1_spikes = conv1_spikes
m,n = conv1_spikes[:,:,0].shape
o = conv1_spikes.shape[-1]
pool1_spikes = np.zeros((m/2,n/2,o))
for i in range(o):
pool1_spikes[:,:,i]=conv1_spikes[:,:,i].reshape(m/2,2,n/2,2).max(axis=(1,3))
return pool1_spikes
Image = np.random.rand(image_size,image_size)
weights = np.random.rand(kernel_size,kernel_size,nofMaps)
t1 = time.time()
for i in range(150):
conv_result = scipy_convolution(Image,weights)
pool_result = scipy_pooling(conv_result)
print('Scipy Time taken:{}'.format(time.time()-t1))
print('Scipy Pool_result shape:{}'.format(pool_result.shape))
#print('first map of pool result:\n',pool_result[:,:,0])
Here are results:
image_size = 27x27
kernel_size = 5x5x30
iterations = 150
TF Time taken:11.0800771713
TF Pool_result shape:(11, 11, 30)
Scipy Time taken:1.4141368866
Scipy Pool_result shape:(11, 11, 30)
image_size = 270x270
kernel_size = 5x5x30
iterations = 150
TF Time taken:26.2359631062
TF Pool_result shape:(133, 133, 30)
Scipy Time taken:31.6651778221
Scipy Pool_result shape:(11, 11, 30)
image_size = 500x500
kernel_size = 5x5x30
iterations = 150
TF Time taken:89.7967050076
TF Pool_result shape:(248, 248, 30)
Scipy Time taken:143.391746044
Scipy Pool_result shape:(248, 248, 30)
In the 2nd case you can see that I got about 18% reduction in time.
In the 3rd case you can see that I goto about 38% reduction in time.
I'm trying to make speech recognition system with tensorflow.
Input data is an numpy array of size 50000 X 1.
Output data (mapping data) is an numpy array of size 400 X 1.
Input and mapping data is passed in batches of 2 in a list.
I've used this tutorial to design the neural network. Following is the code snippet:
For RNN:
input_data = tf.placeholder(tf.float32, [batch_size, sound_constants.MAX_ROW_SIZE_IN_DATA, sound_constants.MAX_COLUMN_SIZE_IN_DATA], name="train_input")
target = tf.placeholder(tf.float32, [batch_size, sound_constants.MAX_ROW_SIZE_IN_TXT, sound_constants.MAX_COLUMN_SIZE_IN_TXT], name="train_output")
fwd_cell = tf.nn.rnn_cell.BasicLSTMCell(num_hidden, state_is_tuple=True, forget_bias=1.0)
# creating one backward cell
bkwd_cell = tf.nn.rnn_cell.BasicLSTMCell(num_hidden, state_is_tuple=True, forget_bias=1.0)
# creating bidirectional RNN
val, _, _ = tf.nn.static_bidirectional_rnn(fwd_cell, bkwd_cell, tf.unstack(input_data), dtype=tf.float32)
For feeding data:
feed = {g['input_data'] : trb[0], g['target'] : trb[1], g['dropout'] : 0.6}
accuracy_, _ = sess.run([g['accuracy'], g['ts']], feed_dict=feed)
accuracy += accuracy_
When I ran the code, I got this error:
Traceback (most recent call last):
File "/home/wolborg/PycharmProjects/speech-to-text-rnn/src/rnn_train_1.py", line 205, in <module>
tr_losses, te_losses = train_network(g)
File "/home/wolborg/PycharmProjects/speech-to-text-rnn/src/rnn_train_1.py", line 177, in train_network
accuracy_, _ = sess.run([g['accuracy'], g['ts']], feed_dict=feed)
File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 895, in run
run_metadata_ptr)
File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1102, in _run
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
ValueError: Tensor Tensor("Const:0", shape=(), dtype=float32) may not be fed.
Process finished with exit code 1
Earlier, I was facing this issue with tf.sparse_placeholder, then after some browsing, I changed input type to tf.placeholder and made related changes. Now I'm clueless on where I'm making the error.
Please suggest something as how should I feed data.
Entire code:
import tensorflow as tf
# for taking MFCC and label input
import numpy as np
import rnn_input_data_1
import sound_constants
# input constants
# Training Parameters
num_input = 10 # mfcc data input
training_data_size = 8 # determines number of files in training and testing module
testing_data_size = num_input - training_data_size
# Network Parameters
learning_rate = 0.0001 # for large training set, it can be set 0.001
num_hidden = 200 # number of hidden layers
num_classes = 28 # total alphabet classes (a-z) + extra symbols (', ' ')
epoch = 1 # number of iterations
batch_size = 2 # number of batches
mfcc_coeffs, text_data = rnn_input_data_1.mfcc_and_text_encoding()
class DataGenerator:
def __init__(self, data_size):
self.ptr = 0
self.epochs = 0
self.data_size = data_size
def next_batch(self):
self.ptr += batch_size
if self.ptr > self.data_size:
self.epochs += 1
self.ptr = 0
return mfcc_coeffs[self.ptr-batch_size : self.ptr], text_data[self.ptr-batch_size : self.ptr]
def reset_graph():
if 'sess' in globals() and sess:
sess.close()
tf.reset_default_graph()
def struct_network():
print ('Inside struct network !!')
reset_graph()
input_data = tf.placeholder(tf.float32, [batch_size, sound_constants.MAX_ROW_SIZE_IN_DATA, sound_constants.MAX_COLUMN_SIZE_IN_DATA], name="train_input")
target = tf.placeholder(tf.float32, [batch_size, sound_constants.MAX_ROW_SIZE_IN_TXT, sound_constants.MAX_COLUMN_SIZE_IN_TXT], name="train_output")
keep_prob = tf.constant(1.0)
fwd_cell = tf.nn.rnn_cell.BasicLSTMCell(num_hidden, state_is_tuple=True, forget_bias=1.0)
# creating one backward cell
bkwd_cell = tf.nn.rnn_cell.BasicLSTMCell(num_hidden, state_is_tuple=True, forget_bias=1.0)
# creating bidirectional RNN
val, _, _ = tf.nn.static_bidirectional_rnn(fwd_cell, bkwd_cell, tf.unstack(input_data), dtype=tf.float32)
# adding dropouts
val = tf.nn.dropout(val, keep_prob)
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)
# creating bidirectional RNN
print ('BiRNN created !!')
print ('Last Size: ', last.get_shape())
weight = tf.Variable(tf.truncated_normal([num_hidden * 2, sound_constants.MAX_ROW_SIZE_IN_TXT]))
bias = tf.Variable(tf.constant(0.1, shape=[sound_constants.MAX_ROW_SIZE_IN_TXT]))
# mapping to 28 output classes
logits = tf.matmul(last, weight) + bias
prediction = tf.nn.softmax(logits)
prediction = tf.reshape(prediction, shape = [batch_size, sound_constants.MAX_ROW_SIZE_IN_TXT, sound_constants.MAX_COLUMN_SIZE_IN_TXT])
# getting probability distribution
mat1 = tf.cast(tf.argmax(prediction,1),tf.float32)
correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
logits = tf.reshape(logits, shape=[batch_size, sound_constants.MAX_ROW_SIZE_IN_TXT, sound_constants.MAX_COLUMN_SIZE_IN_TXT])
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target))
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
# returning components as dictionary elements
return {'input_data' : input_data,
'target' : target,
'dropout': keep_prob,
'loss': loss,
'ts': train_step,
'preds': prediction,
'accuracy': accuracy
}
def train_network(graph):
# initialize tensorflow session and all variables
# tf_gpu_config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = True)
# tf_gpu_config.gpu_options.allow_growth = True
# with tf.Session(config = tf_gpu_config) as sess:
with tf.Session() as sess:
train_instance = DataGenerator(training_data_size)
test_instance = DataGenerator(testing_data_size)
print ('Training data size: ', train_instance.data_size)
print ('Testing data size: ', test_instance.data_size)
sess.run(tf.global_variables_initializer())
print ('Starting session...')
step, accuracy = 0, 0
tr_losses, te_losses = [], []
current_epoch = 0
while current_epoch < epoch:
step += 1
trb = train_instance.next_batch()
feed = {g['input_data'] : trb[0], g['target'] : trb[1], g['dropout'] : 0.6}
accuracy_, _ = sess.run([g['accuracy'], g['ts']], feed_dict=feed)
accuracy += accuracy_
if train_instance.epochs > current_epoch:
current_epoch += 1
tr_losses.append(accuracy / step)
step, accuracy = 0, 0
#eval test set
te_epoch = test_instance.epochs
while test_instance.epochs == te_epoch:
step += 1
print ('Testing round ', step)
trc = test_instance.next_batch()
feed = {g['input_data']: trc[0], g['target']: trc[1]}
accuracy_ = sess.run([g['accuracy']], feed_dict=feed)[0]
accuracy += accuracy_
te_losses.append(accuracy / step)
step, accuracy = 0,0
print("Accuracy after epoch", current_epoch, " - tr:", tr_losses[-1], "- te:", te_losses[-1])
return tr_losses, te_losses
g = struct_network()
tr_losses, te_losses = train_network(g)
You defined keep_prob as a tf.constant, but then trying to feed the value into it. Replace keep_prob = tf.constant(1.0) with keep_prob = tf.placeholder(tf.float32,[]) or keep_prob = tf.placeholder_with_default(1.0,[])