I'm new in deep learning. In first step I create and train a model in python with keras and freezed by this code:
def export_model(MODEL_NAME, input_node_name, output_node_name):
tf.train.write_graph(K.get_session().graph_def, 'out', \
MODEL_NAME + '_graph.pbtxt')
tf.train.Saver().save(K.get_session(), 'out/' + MODEL_NAME + '.chkp')
freeze_graph.freeze_graph('out/' + MODEL_NAME + '_graph.pbtxt', None, \
False, 'out/' + MODEL_NAME + '.chkp', output_node_name, \
"save/restore_all", "save/Const:0", \
'out/frozen_' + MODEL_NAME + '.pb', True, "")
input_graph_def = tf.GraphDef()
with tf.gfile.Open('out/frozen_' + MODEL_NAME + '.pb', "rb") as f:
input_graph_def.ParseFromString(f.read())
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def, [input_node_name], [output_node_name],
tf.float32.as_datatype_enum)
with tf.gfile.FastGFile('out/opt_' + MODEL_NAME + '.pb', "wb") as f:
f.write(output_graph_def.SerializeToString())
it's output :
checkpoint
Model.chkp.data-00000-of-00001
Model.chkp.index
Model.chkp.meta
Model_graph.pbtxt
frozen_Model.pb
opt_Model.pb
when I want to read the net in opencv c++ by readNetFromTensorflow :
String weights = "frozen_Model.pb";
String pbtxt = "Model_graph.pbtxt";
dnn::Net cvNet = cv::dnn::readNetFromTensorflow(weights, pbtxt);
This will make error :
OpenCV(4.0.0-pre) Error: Unspecified error (FAILED: ReadProtoFromBinaryFile(param_file, param). Failed to parse GraphDef file: frozen_Model.pb) in cv::dnn::ReadTFNetParamsFromBinaryFileOrDie, file D:\LIBS\OpenCV-4.00\modules\dnn\src\tensorflow\tf_io.cpp, line 44
and
OpenCV(4.0.0-pre) Error: Assertion failed (const_layers.insert(std::make_pair(name, li)).second) in cv::dnn::experimental_dnn_v4::`anonymous-namespace'::addConstNodes, file D:\LIBS\OpenCV-4.00\modules\dnn\src\tensorflow\tf_importer.cpp, line 555
How to fix this error?
Amin, may I ask you to try to save a graph in a testing mode:
K.backend.set_learning_phase(0) # <--- This setting makes all the following layers work in test mode
model = Sequential(name = MODEL_NAME)
model.add(Conv2D(filters = 128, kernel_size = (5, 5), activation = 'relu',name = 'FirstLayerConv2D_No1',input_shape = (Width, Height, image_channel)))
...
model.add(Dropout(0.25))
model.add(Dense(100, activation = 'softmax', name = 'endNode'))
# Create a graph definition (with no weights)
sess = K.backend.get_session()
sess.as_default()
tf.train.write_graph(sess.graph.as_graph_def(), "", 'graph_def.pb', as_text=False)
Then freeze your checkpoint files with a newly created graph_def.pb by a freeze_graph.py script (do not forget to use --input_binary flag).
Part of the code :
Create model, train and export_model
train_batch = gen.flow_from_directory(path + 'Train', target_size = (Width, Height), shuffle = False, color_mode = color_mode,
batch_size = batch_size_train, class_mode = 'categorical')
.
.
X_train, Y_train = next(train_batch)
.
.
X_train = X_train.reshape(X_train.shape).astype('float32')
.
.
model = Sequential(name = MODEL_NAME)
model.add(Conv2D(filters = 128, kernel_size = (5, 5), activation = 'relu',name = 'FirstLayerConv2D_No1',input_shape = (Width, Height, image_channel)))
model.add(Conv2D(filters = 128, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPool2D(pool_size = (2, 2)))
model.add(BatchNormalization())
.
.
.
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(200, activation = 'tanh'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(100, activation = 'softmax', name = 'endNode'))
model.compile(loss = 'categorical_crossentropy',
optimizer = SGD(lr = 0.01, momentum = 0.9), metrics = ['accuracy'])
history = model.fit(X_train, Y_train, batch_size = batch_size_fit, epochs = epoch, shuffle = True,
verbose = 1, validation_split = .1, validation_data = (X_test, Y_test))
export_model(MODEL_NAME, "FirstLayerConv2D_No1/Relu", "endNode/Softmax")
When you're writing the graph in python you need to do the following steps:
with tf.Session(graph=tf.Graph()) as sess:
# 1. Load saved model
saved_model = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], SAVED_MODEL_PATH)
# 2. Convert variables to constants
inference_graph_def = tf.graph_util.convert_variables_to_constants(sess, saved_model.graph_def, OUTPUT_NODE_NAMES)
# 3. Optimize for inference
optimized_graph_def = optimize_for_inference_lib.optimize_for_inference(inference_graph_def,
INPUT_NODE_NAMES,
OUTPUT_NODE_NAMES,
tf.float32.as_datatype_enum)
# 4. Save .pb file
tf.train.write_graph(optimized_graph_def, MODEL_DIR, 'model_name.pb', as_text=False)
# 5. Transform graph
transforms = [
'strip_unused_nodes(type=float, shape=\"1,128,128,3\")',
'remove_nodes(op=PlaceholderWithDefault)',
'remove_device',
'sort_by_execution_order'
]
transformed_graph_def = TransformGraph(optimized_graph_def,
INPUT_NODE_NAMES,
OUTPUT_NODE_NAMES,
transforms)
# 6. Remove constant nodes and attributes
for i in reversed(range(len(transformed_graph_def.node))):
if transformed_graph_def.node[i].op == "Const":
del transformed_graph_def.node[i]
for attr in ['T', 'data_format', 'Tshape', 'N', 'Tidx', 'Tdim',
'use_cudnn_on_gpu', 'Index', 'Tperm', 'is_training', 'Tpaddings']:
if attr in transformed_graph_def.node[i].attr:
del transformed_graph_def.node[i].attr[attr]
# 7. Save .pbtxt file
tf.train.write_graph(transformed_graph_def, MODEL_DIR, 'model_name.pbtxt', as_text=True)
Plus, if you have special nodes as Flatten, you need to remove and rename some nodes manually.
More info here.
Related
I am trying to construct a batch of (wav_file, label) pair.
wav file labels and paths are listed in dev.csv.
below code is not working,
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
threads = 1
batch_size = 5
global record_defaults
record_defaults = [['/Users/phoenix/workspace/dataset/data_thchs30/dev/A11_101.wav'], ['8.26'], ['七十 年代 末 我 外出 求学 母亲 叮咛 我 吃饭 要 细嚼慢咽 学习 要 深 钻 细 研']]
def read_record(filename_queue, num_records):
reader = tf.TextLineReader()
key, value = reader.read_up_to(filename_queue, num_records)
wav_filename, duration, transcript = tf.decode_csv(value, record_defaults, field_delim=",")
wav_reader = tf.WholeFileReader()
wav_key, wav_value = wav_reader.read_up_to(tf.train.string_input_producer(wav_filename, shuffle=False, capacity=num_records), num_records)
return [wav_key, transcript] # throw errors
# return [wav_key, wav_value] # works
# return [wav_filename, duration, transcript] # works
data_queue = tf.train.string_input_producer(tf.train.match_filenames_once('dev.csv'), shuffle=False)
batch_data = [read_record(data_queue, batch_size) for _ in range(threads)]
capacity = threads * batch_size
batch_values = tf.train.batch_join(batch_data, batch_size=batch_size, capacity=capacity, enqueue_many=True)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
sess.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
print(coord)
threads = tf.train.start_queue_runners(coord=coord)
print("threads num: " + str(threads))
try:
step = 0
while not coord.should_stop():
step += 1
feat = sess.run([batch_values])
print("line:", step, feat)
except tf.errors.OutOfRangeError:
print(' training for 1 epochs, %d steps', step)
finally:
coord.request_stop()
coord.join(threads)
throw errors below, how can I fix it?:
dev.csv content as below:
/Users/phoenix/workspace/dataset/data_thchs30/dev/A11_101.wav,8.26,qi shi nian dai mo wo wai chu qiu xue
/Users/phoenix/workspace/dataset/data_thchs30/dev/A11_119.wav,6.9,chen yun tong shi yao qiu gan bu men ren zhen xue xi
I tried to rewrite your code like this.
This is my observation.
The error is no longer thrown. And the values are returned.
An obvious discrepancy is that the size of the batch for transcript is double that specified. So it is 4 instead of 2. It doubles for some reason. No such problem for the audio binary.
shapes=[tf.TensorShape(()),tf.TensorShape(batch_size,)] is based on an error I saw which mentioned that I have to specify this using TensorShape. I didn't find the documentation of any help but it is mentioned there.
shapes: (Optional.) A list of fully-defined TensorShape objects with the same length as dtypes, or None.
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.DEBUG)
FLAGS = tf.app.flags.FLAGS
threads = 1
batch_size = 2
record_defaults = [['D:/male.wav'], ['8.26'], ['七十 年代 末 我 外出 求学 母亲 叮咛 我 吃饭 要 细嚼慢咽 学习 要 深 钻 细 研']]
def readbatch(data_queue) :
reader = tf.TextLineReader()
_, rows = reader.read_up_to(data_queue, batch_size)
wav_filename, duration, transcript = tf.decode_csv(rows, record_defaults,field_delim=",")
audioreader = tf.WholeFileReader()
_, audio = audioreader.read( tf.train.string_input_producer(wav_filename) )
return [audio,transcript]
data_queue = tf.train.string_input_producer(
tf.train.match_filenames_once('D:/Book1.csv'), shuffle=False)
batch_data = [readbatch(data_queue) for _ in range(threads)]
capacity = threads * batch_size
batch_values = tf.train.batch_join(batch_data, shapes=[tf.TensorShape(()),tf.TensorShape(batch_size,)], capacity=capacity, batch_size=batch_size, enqueue_many=False )
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
sess.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
step = 0
while not coord.should_stop():
step += 1
feat = sess.run([batch_values])
audio = feat[0][0]
print ('Size of audio is ' + str(audio.size))
script = feat[0][1]
print ('Size of script is ' + str(script.size))
except tf.errors.OutOfRangeError:
print(' training for 1 epochs, %d steps', step)
finally:
coord.request_stop()
coord.join(threads)
A sample dataset proves that there is an extra pair.
[[array([b'Text2', b'Text1'], dtype=object), array([[b'Translation-1', b'Translation-2'],
[b'Translation-1', b'Translation-2']], dtype=object)]]
I'm new to TF and ML.
Details about data: Features(x) - (70 x 70 x 70) tensor for each sample, y - a float for each sample.
TFRecords created with the following code:
def convert_to_tf_records():
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float64_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
tfrecords_filename = 'A-100-h2-h2o.tfrecords'
writer = tf.python_io.TFRecordWriter(tfrecords_filename)
# Get data from db for now.
db = connect('results-60-70.db')
data = db.select(selection='Ti')
i = 0
for row in data:
desc = np.array(json.loads(row.descriptor), dtype=np.float32)
print(desc.shape)
be = float(row.binding_energy) * 23 # Convert to Kcal/mol ?
desc = desc.flatten()
desc = desc.tostring()
example = tf.train.Example(features=tf.train.Features(feature={'voxel_grid': _bytes_feature(desc), 'binding_energy': _float64_feature(be)}))
writer.write(example.SerializeToString())
i += 1
if i >= 10:
break
Input function:
def my_input_function(fname, perform_shuffle=False, repeat_count=None):
def _parse_elements(example):
features = tf.parse_single_example(example, features={'voxel_grid': tf.FixedLenFeature([], tf.string), 'binding_energy': tf.FixedLenFeature([], tf.float32)})
vg = tf.decode_raw(features['voxel_grid'], tf.float32)
vg = tf.reshape(vg, [70, 70, 70])
vg = tf.convert_to_tensor(vg, dtype=tf.float32)
vg = {'voxel_grid': vg}
e = tf.cast(features['binding_energy'], tf.float32)
return vg, e
def input_function():
dataset = tf.data.TFRecordDataset(fname).map(_parse_elements)
dataset = dataset.repeat(repeat_count)
dataset = dataset.batch(5)
dataset = dataset.prefetch(1)
if perform_shuffle:
dataset.shuffle(20)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
return input_function
Model function:
def my_model_function(features, labels, mode):
if mode == tf.estimator.ModeKeys.PREDICT:
tf.logging.info("my_model_fn: PREDICT, {}".format(mode))
elif mode == tf.estimator.ModeKeys.EVAL:
tf.logging.info("my_model_fn: EVAL, {}".format(mode))
elif mode == tf.estimator.ModeKeys.TRAIN:
tf.logging.info("my_model_fn: TRAIN, {}".format(mode))
feature_columns = [tf.feature_column.numeric_column('voxel_grid', shape=(70, 70, 70), dtype=tf.float32)]
# Create the layer of input
input_layer = tf.feature_column.input_layer(features, feature_columns)
input_layer = tf.reshape(input_layer, [-1, 70, 70, 70, 1])
# Convolution layers
conv1 = tf.layers.conv3d(inputs=input_layer, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
conv2 = tf.layers.conv3d(inputs=conv1, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
pool3 = tf.layers.max_pooling3d(inputs=conv2, pool_size=[2, 2, 2], strides=2)
flat = tf.layers.flatten(pool3)
dense1 = tf.layers.dense(inputs=flat, units=10, activation=tf.nn.relu)
dense2 = tf.layers.dense(inputs=dense1, units=10, activation=tf.nn.relu)
output = tf.layers.dense(inputs=dense2, units=1)
predictions = {'binding_energy': output}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics
eval_metric_ops = {"mse": tf.metrics.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
When calling model.train using
model = tf.estimator.Estimator(model_fn=my_model_function, model_dir='./model_dir')
model.train(input_fn=my_input_function('A-100-h2-h2o.tfrecords'), steps=100)
I get the following error.
TypeError: Failed to convert object of type to Tensor.
Found it!
changing
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
to
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])
solves the issue.
For this code:
# Initialize generator - feed noise and profile images
noise = random_normal(shape = (-1, 8, 8, z_dim), mean = 0.0, stddev = 1.0, dtype = None, seed = None)
noise = Input(tensor = noise)
input_data = Input(shape = (128, 128, 3))
generated_img = generator_network(input_data, noise)
# Initialize discriminator - feed frontal faces as ground truth and the generated images as fake
generated_img = Input(tensor = generated_img)
true_score = discriminator_network(input_data)
fake_score = discriminator_network(generated_img)
# Optimizer
Adam_optimizer = Adam(lr = learning_rate, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08, decay = decay_rate)
# Losses
discrim_loss = discriminator_loss(true_score, fake_score)
var_loss = variation_loss(input_data, generated_img)
pix_loss = pixel_loss(input_data, generated_img)
cross_loss = cross_entropy_loss(true_score, fake_score)
gen_loss = generator_loss(discrim_loss, var_loss, pix_loss, cross_loss)
# Models
discriminator = Model(inputs = generated_img , outputs = fake_score)
generator = Model(inputs = [input_data, noise], outputs = generated_img)
# Compilers
discriminator.compile(optimizer = Adam_optimizer, loss = discriminator_loss)
generator.compile( optimizer = Adam_optimizer, loss = generator_loss)
I get this error:
Traceback (most recent call last):
File "main.py", line 74, in
generator = Model(inputs = [input_data, noise], outputs = generated_img)
File "/home/diana/Documents/VirtualNN/local/lib/python2.7/site-packages/keras/legacy/interfaces.py", line 87, in wrapper
return func(*args, **kwargs)
File "/home/diana/Documents/VirtualNN/local/lib/python2.7/site-packages/keras/engine/topology.py", line 1793, in init
str(layers_with_complete_input))
RuntimeError: Graph disconnected: cannot obtain value for tensor Tensor("conv2d_35/Relu:0", shape=(?, ?, ?, 3), dtype=float32) at layer "input_3". The following previous layers were accessed without issue: []
Could anyone know why it says that my model generator is not a connected graph? From my understanding, it is connected.
But maybe there is something else I don't see.
You should wrap the generator network and the discriminator network as two sequential layers within another network if your purpose is to build a GAN model. For example:
from keras.models import Sequential
# generator_network() and generator_network() should each have an Input layer
# that defines their input shapes respectively
g_network = generator_network()
d_network = discriminator_network()
gan_network = Sequential()
gan_network.add(g_network)
d_network.trainable = False
gan_network.add(d_network)
# Compilers
d_network.compile(optimizer = Adam_optimizer, loss = discriminator_loss)
gan_network.compile(optimizer = Adam_optimizer, loss = generator_loss)
I'm trying to make speech recognition system with tensorflow.
Input data is an numpy array of size 50000 X 1.
Output data (mapping data) is an numpy array of size 400 X 1.
Input and mapping data is passed in batches of 2 in a list.
I've used this tutorial to design the neural network. Following is the code snippet:
For RNN:
input_data = tf.placeholder(tf.float32, [batch_size, sound_constants.MAX_ROW_SIZE_IN_DATA, sound_constants.MAX_COLUMN_SIZE_IN_DATA], name="train_input")
target = tf.placeholder(tf.float32, [batch_size, sound_constants.MAX_ROW_SIZE_IN_TXT, sound_constants.MAX_COLUMN_SIZE_IN_TXT], name="train_output")
fwd_cell = tf.nn.rnn_cell.BasicLSTMCell(num_hidden, state_is_tuple=True, forget_bias=1.0)
# creating one backward cell
bkwd_cell = tf.nn.rnn_cell.BasicLSTMCell(num_hidden, state_is_tuple=True, forget_bias=1.0)
# creating bidirectional RNN
val, _, _ = tf.nn.static_bidirectional_rnn(fwd_cell, bkwd_cell, tf.unstack(input_data), dtype=tf.float32)
For feeding data:
feed = {g['input_data'] : trb[0], g['target'] : trb[1], g['dropout'] : 0.6}
accuracy_, _ = sess.run([g['accuracy'], g['ts']], feed_dict=feed)
accuracy += accuracy_
When I ran the code, I got this error:
Traceback (most recent call last):
File "/home/wolborg/PycharmProjects/speech-to-text-rnn/src/rnn_train_1.py", line 205, in <module>
tr_losses, te_losses = train_network(g)
File "/home/wolborg/PycharmProjects/speech-to-text-rnn/src/rnn_train_1.py", line 177, in train_network
accuracy_, _ = sess.run([g['accuracy'], g['ts']], feed_dict=feed)
File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 895, in run
run_metadata_ptr)
File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1102, in _run
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
ValueError: Tensor Tensor("Const:0", shape=(), dtype=float32) may not be fed.
Process finished with exit code 1
Earlier, I was facing this issue with tf.sparse_placeholder, then after some browsing, I changed input type to tf.placeholder and made related changes. Now I'm clueless on where I'm making the error.
Please suggest something as how should I feed data.
Entire code:
import tensorflow as tf
# for taking MFCC and label input
import numpy as np
import rnn_input_data_1
import sound_constants
# input constants
# Training Parameters
num_input = 10 # mfcc data input
training_data_size = 8 # determines number of files in training and testing module
testing_data_size = num_input - training_data_size
# Network Parameters
learning_rate = 0.0001 # for large training set, it can be set 0.001
num_hidden = 200 # number of hidden layers
num_classes = 28 # total alphabet classes (a-z) + extra symbols (', ' ')
epoch = 1 # number of iterations
batch_size = 2 # number of batches
mfcc_coeffs, text_data = rnn_input_data_1.mfcc_and_text_encoding()
class DataGenerator:
def __init__(self, data_size):
self.ptr = 0
self.epochs = 0
self.data_size = data_size
def next_batch(self):
self.ptr += batch_size
if self.ptr > self.data_size:
self.epochs += 1
self.ptr = 0
return mfcc_coeffs[self.ptr-batch_size : self.ptr], text_data[self.ptr-batch_size : self.ptr]
def reset_graph():
if 'sess' in globals() and sess:
sess.close()
tf.reset_default_graph()
def struct_network():
print ('Inside struct network !!')
reset_graph()
input_data = tf.placeholder(tf.float32, [batch_size, sound_constants.MAX_ROW_SIZE_IN_DATA, sound_constants.MAX_COLUMN_SIZE_IN_DATA], name="train_input")
target = tf.placeholder(tf.float32, [batch_size, sound_constants.MAX_ROW_SIZE_IN_TXT, sound_constants.MAX_COLUMN_SIZE_IN_TXT], name="train_output")
keep_prob = tf.constant(1.0)
fwd_cell = tf.nn.rnn_cell.BasicLSTMCell(num_hidden, state_is_tuple=True, forget_bias=1.0)
# creating one backward cell
bkwd_cell = tf.nn.rnn_cell.BasicLSTMCell(num_hidden, state_is_tuple=True, forget_bias=1.0)
# creating bidirectional RNN
val, _, _ = tf.nn.static_bidirectional_rnn(fwd_cell, bkwd_cell, tf.unstack(input_data), dtype=tf.float32)
# adding dropouts
val = tf.nn.dropout(val, keep_prob)
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)
# creating bidirectional RNN
print ('BiRNN created !!')
print ('Last Size: ', last.get_shape())
weight = tf.Variable(tf.truncated_normal([num_hidden * 2, sound_constants.MAX_ROW_SIZE_IN_TXT]))
bias = tf.Variable(tf.constant(0.1, shape=[sound_constants.MAX_ROW_SIZE_IN_TXT]))
# mapping to 28 output classes
logits = tf.matmul(last, weight) + bias
prediction = tf.nn.softmax(logits)
prediction = tf.reshape(prediction, shape = [batch_size, sound_constants.MAX_ROW_SIZE_IN_TXT, sound_constants.MAX_COLUMN_SIZE_IN_TXT])
# getting probability distribution
mat1 = tf.cast(tf.argmax(prediction,1),tf.float32)
correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
logits = tf.reshape(logits, shape=[batch_size, sound_constants.MAX_ROW_SIZE_IN_TXT, sound_constants.MAX_COLUMN_SIZE_IN_TXT])
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target))
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
# returning components as dictionary elements
return {'input_data' : input_data,
'target' : target,
'dropout': keep_prob,
'loss': loss,
'ts': train_step,
'preds': prediction,
'accuracy': accuracy
}
def train_network(graph):
# initialize tensorflow session and all variables
# tf_gpu_config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = True)
# tf_gpu_config.gpu_options.allow_growth = True
# with tf.Session(config = tf_gpu_config) as sess:
with tf.Session() as sess:
train_instance = DataGenerator(training_data_size)
test_instance = DataGenerator(testing_data_size)
print ('Training data size: ', train_instance.data_size)
print ('Testing data size: ', test_instance.data_size)
sess.run(tf.global_variables_initializer())
print ('Starting session...')
step, accuracy = 0, 0
tr_losses, te_losses = [], []
current_epoch = 0
while current_epoch < epoch:
step += 1
trb = train_instance.next_batch()
feed = {g['input_data'] : trb[0], g['target'] : trb[1], g['dropout'] : 0.6}
accuracy_, _ = sess.run([g['accuracy'], g['ts']], feed_dict=feed)
accuracy += accuracy_
if train_instance.epochs > current_epoch:
current_epoch += 1
tr_losses.append(accuracy / step)
step, accuracy = 0, 0
#eval test set
te_epoch = test_instance.epochs
while test_instance.epochs == te_epoch:
step += 1
print ('Testing round ', step)
trc = test_instance.next_batch()
feed = {g['input_data']: trc[0], g['target']: trc[1]}
accuracy_ = sess.run([g['accuracy']], feed_dict=feed)[0]
accuracy += accuracy_
te_losses.append(accuracy / step)
step, accuracy = 0,0
print("Accuracy after epoch", current_epoch, " - tr:", tr_losses[-1], "- te:", te_losses[-1])
return tr_losses, te_losses
g = struct_network()
tr_losses, te_losses = train_network(g)
You defined keep_prob as a tf.constant, but then trying to feed the value into it. Replace keep_prob = tf.constant(1.0) with keep_prob = tf.placeholder(tf.float32,[]) or keep_prob = tf.placeholder_with_default(1.0,[])
I have copied the text file to excel sheet separating cells by ; delimiter.
I need to plot a chart using the same file which I achieved. Since all the values copied are type=str my chart gives me wrong points.
Please suggest to overcome this. Plot is should be made of int values
from datetime import date
from openpyxl import Workbook,load_workbook
from openpyxl.chart import (
LineChart,
Reference,
Series,
)
from openpyxl.chart.axis import DateAxis
excelfile = "C:\Users\lenovo\Desktop\how\openpychart.xlsx"
wb = Workbook()
ws = wb.active
f = open("C:\Users\lenovo\Desktop\sample.txt")
data = []
num = f.readlines()
for line in num:
line = line.split(";")
ws.append(line)
f.close()
wb.save(excelfile)
wb.close()
wb = load_workbook(excelfile, data_only=True)
ws = wb.active
c1 = LineChart()
c1.title = "Line Chart"
##c1.style = 13
c1.y_axis.title = 'Size'
c1.x_axis.title = 'Test Number'
data = Reference(ws, min_col=6, min_row=2, max_col=6, max_row=31)
series = Series(data, title='4th average')
c1.append(series)
data = Reference(ws, min_col=7, min_row=2, max_col=7, max_row=31)
series = Series(data, title='Defined Capacity')
c1.append(series)
##c1.add_data(data, titles_from_data=True)
# Style the lines
s1 = c1.series[0]
s1.marker.symbol = "triangle"
s1.marker.graphicalProperties.solidFill = "FF0000" # Marker filling
s1.marker.graphicalProperties.line.solidFill = "FF0000" # Marker outline
s1.graphicalProperties.line.noFill = True
s2 = c1.series[1]
s2.graphicalProperties.line.solidFill = "00AAAA"
s2.graphicalProperties.line.dashStyle = "sysDot"
s2.graphicalProperties.line.width = 100050 # width in EMUs
##s2 = c1.series[2]
##s2.smooth = True # Make the line smooth
ws.add_chart(c1, "A10")
##
##from copy import deepcopy
##stacked = deepcopy(c1)
##stacked.grouping = "stacked"
##stacked.title = "Stacked Line Chart"
##ws.add_chart(stacked, "A27")
##
##percent_stacked = deepcopy(c1)
##percent_stacked.grouping = "percentStacked"
##percent_stacked.title = "Percent Stacked Line Chart"
##ws.add_chart(percent_stacked, "A44")
##
### Chart with date axis
##c2 = LineChart()
##c2.title = "Date Axis"
##c2.style = 12
##c2.y_axis.title = "Size"
##c2.y_axis.crossAx = 500
##c2.x_axis = DateAxis(crossAx=100)
##c2.x_axis.number_format = 'd-mmm'
##c2.x_axis.majorTimeUnit = "days"
##c2.x_axis.title = "Date"
##
##c2.add_data(data, titles_from_data=True)
##dates = Reference(ws, min_col=1, min_row=2, max_row=7)
##c2.set_categories(dates)
##
##ws.add_chart(c2, "A61")
### setup and append the first series
##values = Reference(ws, (1, 1), (9, 1))
##series = Series(values, title="First series of values")
##chart.append(series)
##
### setup and append the second series
##values = Reference(ws, (1, 2), (9, 2))
##series = Series(values, title="Second series of values")
##chart.append(series)
##
##ws.add_chart(chart)
wb.save(excelfile)
wb.close()
I have modified below code in for loop and it worked.
f = open("C:\Users\lenovo\Desktop\sample.txt")
data = []
num = f.readlines()
for line in num:
line = line.split(";")
new_line=[]
for x in line:
if x.isdigit():
x=int(x)
new_line.append(x)
else:
new_line.append(x)
ws.append(new_line)
f.close()
wb.save(excelfile)
wb.close()
For each list,for each value check if its a digit, if yes converts to integer and store in another list.
Using x=map(int,x) didnt work since I have character values too.
I felt above is much more easy than using x=map(int,x) with try and Except
Thanks
Basha