Related
I am having the hardest time figuring out why i am getting this error. I have searched a lot but unable to fine any solution
import numpy as np
import warnings
from collections import Counter
import pandas as pd
def k_nearest_neighbors(data, predict, k=3):
if len(data) >= k:
warnings.warn('K is set to a value less than total voting groups!')
distances = []
for group in data:
for features in data[group]:
euclidean_distance = np.linalg.norm(np.array(features)-
np.array(predict))
distances.append([euclidean_distance,group])
votes = [i[1] for i in sorted(distances)[:k]]
vote_result = Counter(votes).most_common(1)[0][0]
return vote_result
df = pd.read_csv("data.txt")
df.replace('?',-99999, inplace=True)
df.drop(['id'], 1, inplace=True)
full_data = df.astype(float).values.tolist()
print(full_data)
After running. it gives error
Traceback (most recent call last):
File "E:\Jazab\Machine Learning\Lec18(Testing K Neatest Nerighbors
Classifier)\Lec18(Testing K Neatest Nerighbors
Classifier)\Lec18_Testing_K_Neatest_Nerighbors_Classifier_.py", line 25, in
<module>
full_data = df.astype(float).values.tolist()
File "C:\Python27\lib\site-packages\pandas\util\_decorators.py", line 91, in
wrapper
return func(*args, **kwargs)
File "C:\Python27\lib\site-packages\pandas\core\generic.py", line 3299, in
astype
**kwargs)
File "C:\Python27\lib\site-packages\pandas\core\internals.py", line 3224, in
astype
return self.apply('astype', dtype=dtype, **kwargs)
File "C:\Python27\lib\site-packages\pandas\core\internals.py", line 3091, in
apply
applied = getattr(b, f)(**kwargs)
File "C:\Python27\lib\site-packages\pandas\core\internals.py", line 471, in
astype
**kwargs)
File "C:\Python27\lib\site-packages\pandas\core\internals.py", line 521, in
_astype
values = astype_nansafe(values.ravel(), dtype, copy=True)
File "C:\Python27\lib\site-packages\pandas\core\dtypes\cast.py", line 636,
in astype_nansafe
return arr.astype(dtype)
ValueError: invalid literal for float(): 3) <-----Reappears in Group 8 as:
Press any key to continue . . .
if i remove astype(float) program run fine
What should i need to do ?
There are bad data (3)), so need to_numeric with apply because need processes all columns.
Non numeric are converted to NaNs, which are replaced by fillna to some scalar, e.g. 0:
full_data = df.apply(pd.to_numeric, errors='coerce').fillna(0).values.tolist()
Sample:
df = pd.DataFrame({'A':[1,2,7], 'B':['3)',4,5]})
print (df)
A B
0 1 3)
1 2 4
2 7 5
full_data = df.apply(pd.to_numeric, errors='coerce').fillna(0).values.tolist()
print (full_data)
[[1.0, 0.0], [2.0, 4.0], [7.0, 5.0]]
It looks like you have 3) as an entry in your CSV file, and Pandas is complaining because it can't cast it to a float because of the ).
I am currently trying to implement a RNN for regression.
I need to create a neural network capable of converting audio samples into vector of mfcc feature. I've already know what the feature for each audio samples is, so the task it self is to create a neural network that is capable of converting a list of audio samples in to the desired MFCC feature.
The second problem I am facing is that since the audio files I am sampling has different length, will the list with the audio sample also have different length, which would cause problem with the number of input I need to feed into to the neural network. I found this post on how to handle variable sequence length, and tried to incorporate into my implementation of a RNN, but seem to not be able to get a lot of errors for unexplainable reasons..
Could anyone see what is going wrong with my implementation?
Here is the code:
def length(sequence): ##Zero padding to fit the max lenght... Question whether that is a good idea.
used = tf.sign(tf.reduce_max(tf.abs(sequence), reduction_indices=2))
length = tf.reduce_sum(used, reduction_indices=1)
length = tf.cast(length, tf.int32)
return length
def cost(output, target):
# Compute cross entropy for each frame.
cross_entropy = target * tf.log(output)
cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2)
mask = tf.sign(tf.reduce_max(tf.abs(target), reduction_indices=2))
cross_entropy *= mask
# Average over actual sequence lengths.
cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1)
cross_entropy /= tf.reduce_sum(mask, reduction_indices=1)
return tf.reduce_mean(cross_entropy)
def last_relevant(output):
max_length = int(output.get_shape()[1])
relevant = tf.reduce_sum(tf.mul(output, tf.expand_dims(tf.one_hot(length, max_length), -1)), 1)
return relevant
files_train_path = [dnn_train+f for f in listdir(dnn_train) if isfile(join(dnn_train, f))]
files_test_path = [dnn_test+f for f in listdir(dnn_test) if isfile(join(dnn_test, f))]
files_train_name = [f for f in listdir(dnn_train) if isfile(join(dnn_train, f))]
files_test_name = [f for f in listdir(dnn_test) if isfile(join(dnn_test, f))]
os.chdir(dnn_train)
train_name,train_data = generate_list_of_names_data(files_train_path)
train_data, train_names, train_output_data, train_class_output = load_sound_files(files_train_path,train_name,train_data)
max_length = 0 ## Used for variable sequence input
for element in train_data:
if element.size > max_length:
max_length = element.size
NUM_EXAMPLES = len(train_data)/2
test_data = train_data[NUM_EXAMPLES:]
test_output = train_output_data[NUM_EXAMPLES:]
train_data = train_data[:NUM_EXAMPLES]
train_output = train_output_data[:NUM_EXAMPLES]
print("--- %s seconds ---" % (time.time() - start_time))
#----------------------------------------------------------------------#
#----------------------------Main--------------------------------------#
### Tensorflow neural network setup
batch_size = None
sequence_length_max = max_length
input_dimension=1
data = tf.placeholder(tf.float32,[batch_size,sequence_length_max,input_dimension])
target = tf.placeholder(tf.float32,[None,14])
num_hidden = 24 ## Hidden layer
cell = tf.nn.rnn_cell.LSTMCell(num_hidden,state_is_tuple=True) ## Long short term memory
output, state = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32,sequence_length = length(data)) ## Creates the Rnn skeleton
last = last_relevant(output)#tf.gather(val, int(val.get_shape()[0]) - 1) ## Appedning as last
weight = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
cross_entropy = cost(output,target)# How far am I from correct value?
optimizer = tf.train.AdamOptimizer() ## TensorflowOptimizer
minimize = optimizer.minimize(cross_entropy)
mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
## Training ##
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)
batch_size = 1000
no_of_batches = int(len(train_data)/batch_size)
epoch = 5000
for i in range(epoch):
ptr = 0
for j in range(no_of_batches):
inp, out = train_data[ptr:ptr+batch_size], train_output[ptr:ptr+batch_size]
ptr+=batch_size
sess.run(minimize,{data: inp, target: out})
print "Epoch - ",str(i)
incorrect = sess.run(error,{data: test_data, target: test_output})
print('Epoch {:2d} error {:3.1f}%'.format(i + 1, 100 * incorrect))
sess.close()
Error message:
Traceback (most recent call last):
File "tensorflow_test.py", line 177, in <module>
last = last_relevant(output)#tf.gather(val, int(val.get_shape()[0]) - 1) ## Appedning as last
File "tensorflow_test.py", line 132, in last_relevant
relevant = tf.reduce_sum(tf.mul(output, tf.expand_dims(tf.one_hot(length, max_length), -1)), 1)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 2778, in one_hot
name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 1413, in _one_hot
axis=axis, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 454, in apply_op
as_ref=input_arg.is_ref)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 621, in convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/constant_op.py", line 180, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/constant_op.py", line 163, in constant
tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/tensor_util.py", line 421, in make_tensor_proto
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/compat.py", line 45, in as_bytes
(bytes_or_text,))
TypeError: Expected binary or unicode string, got <function length at 0x7f51a7a3ede8>
Edit:
Changing the tf.one_hot(lenght(output),max_length) gives me this error message:
Traceback (most recent call last):
File "tensorflow_test.py", line 184, in <module>
cross_entropy = cost(output,target)# How far am I from correct value?
File "tensorflow_test.py", line 121, in cost
cross_entropy = target * tf.log(output)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py", line 754, in binary_op_wrapper
return func(x, y, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py", line 903, in _mul_dispatch
return gen_math_ops.mul(x, y, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_math_ops.py", line 1427, in mul
result = _op_def_lib.apply_op("Mul", x=x, y=y, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 703, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2312, in create_op
set_shapes_for_outputs(ret)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1704, in set_shapes_for_outputs
shapes = shape_func(op)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py", line 1801, in _BroadcastShape
% (shape_x, shape_y))
ValueError: Incompatible shapes for broadcasting: (?, 14) and (?, 138915, 24)
tf.one_hot(length, ...)
here length is a function, not a tensor. Try length(something) instead.
I'm a little new to scikit and ML. I'm trying to train an Adaboost classifier for one vs Rest classification. I'm using the following code
# To Read Training data set
test = pd.read_csv("train.csv", header=0, delimiter=",", \
quoting=1, error_bad_lines=False)
num_reviews = len(test["text"])
clean_train_reviews = []
catlist=[]
for i in xrange(0,num_reviews):
data=processText(test["text"][i])
data1=test["category"][i]
clean_train_reviews.append(data)
catlist.append(data1.split('.'))
# To read test dataset
test = pd.read_csv("test.csv", header=0, delimiter=",", \
quoting=1, error_bad_lines=False)
num_reviews = len(test["text"])
clean_test_reviews = []
for i in xrange(0,num_reviews):
data=processText(test["text"][i])
clean_test_reviews.append(data)
X_test=np.array(clean_test_reviews)
lb = preprocessing.MultiLabelBinarizer()
Y = lb.fit_transform(catlist)
classifier = Pipeline([
('vectorizer', CountVectorizer(ngram_range=(1,2), max_features=1500,min_df=4)),
('tfidf', TfidfTransformer()),
('chi2', SelectKBest(chi2, k=200)),
('clf', OneVsRestClassifier(AdaBoostClassifier()))])
classifier.fit(clean_train_reviews, Y)
predicted = classifier.predict(X_test)
I use a pipeline, where text is inserted as clean_train_reviews and Y is the class (multi-Label, N = 10). Textual features are extracted in the pipeline using TfidfVectorizer() and selected using Chi squared feature selection method. Adaboost classifiers give: ValueError: bad input shape (1000, 10)
File "<ipython-input-10-9dbc8b18e6b8>", line 1, in <module>
runfile('C:/Users/Administrator/Desktop/nincymiss/adaboost.py', wdir='C:/Users/Administrator/Desktop/nincymiss')
File "C:\Python27\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 601, in runfile
execfile(filename, namespace)
File "C:\Python27\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 66, in execfile
exec(compile(scripttext, filename, 'exec'), glob, loc)
File "C:/Users/Administrator/Desktop/nincymiss/adaboost.py", line 179, in <module>
classifier.fit(clean_train_reviews, Y)
File "C:\Python27\lib\site-packages\sklearn\pipeline.py", line 164, in fit
Xt, fit_params = self._pre_transform(X, y, **fit_params)
File "C:\Python27\lib\site-packages\sklearn\pipeline.py", line 145, in _pre_transform
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
File "C:\Python27\lib\site-packages\sklearn\base.py", line 458, in fit_transform
return self.fit(X, y, **fit_params).transform(X)
File "C:\Python27\lib\site-packages\sklearn\feature_selection\univariate_selection.py", line 322, in fit
X, y = check_X_y(X, y, ['csr', 'csc'])
File "C:\Python27\lib\site-packages\sklearn\utils\validation.py", line 515, in check_X_y
y = column_or_1d(y, warn=True)
File "C:\Python27\lib\site-packages\sklearn\utils\validation.py", line 551, in column_or_1d
raise ValueError("bad input shape {0}".format(shape))
ValueError: bad input shape (1000, 10)
This is because feature selection does not work as you'd expect for multilabel problems. You can try the following which will select the 'best' features for each label separately.
classifier = Pipeline([
('vectorizer', CountVectorizer(ngram_range=(1,2), max_features=1500, min_df=4)),
('tfidf', TfidfTransformer()),
('chi2', SelectKBest(chi2, k=200)),
('clf', AdaBoostClassifier())])
clf = OneVsRestClassifier(classifier)
Hi I am trying to run a conv. neural network addapted from MINST2 tutorial in tensorflow.
I am having the following error, but i am not sure what is going on:
W tensorflow/core/framework/op_kernel.cc:909] Invalid argument: Shape mismatch in tuple component 0. Expected [784], got [6272]
W tensorflow/core/framework/op_kernel.cc:909] Invalid argument: Shape mismatch in tuple component 0. Expected [784], got [6272]
Traceback (most recent call last):
File "4_Treino_Rede_Neural.py", line 161, in <module>
train_accuracy = accuracy.eval(feed_dict={keep_prob: 1.0})
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 555, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 3498, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 372, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 636, in _run
feed_dict_string, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 708, in _do_run
target_list, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 728, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors.OutOfRangeError: RandomShuffleQueue '_0_input/shuffle_batch/random_shuffle_queue' is closed and has insufficient elements (requested 100, current size 0)
[[Node: input/shuffle_batch = QueueDequeueMany[_class=["loc:#input/shuffle_batch/random_shuffle_queue"], component_types=[DT_FLOAT, DT_INT32], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](input/shuffle_batch/random_shuffle_queue, input/shuffle_batch/n)]]
Caused by op u'input/shuffle_batch', defined at:
File "4_Treino_Rede_Neural.py", line 113, in <module>
x, y_ = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs)
File "4_Treino_Rede_Neural.py", line 93, in inputs
min_after_dequeue=1000)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/input.py", line 779, in shuffle_batch
dequeued = queue.dequeue_many(batch_size, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/data_flow_ops.py", line 400, in dequeue_many
self._queue_ref, n=n, component_types=self._dtypes, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_data_flow_ops.py", line 465, in _queue_dequeue_many
timeout_ms=timeout_ms, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 704, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2260, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1230, in __init__
self._traceback = _extract_stack()
My program is:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import numpy as np
import tensorflow as tf
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_epochs', 2, 'Number of epochs to run trainer.')
flags.DEFINE_integer('batch_size', 100, 'Batch size.')
flags.DEFINE_string('train_dir', '/root/data', 'Directory with the training data.')
#flags.DEFINE_string('train_dir', '/root/data2', 'Directory with the training data.')
# Constants used for dealing with the files, matches convert_to_records.
TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'validation.tfrecords'
# Set-up dos pacotes
sess = tf.InteractiveSession()
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([784])
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
def inputs(train, batch_size, num_epochs):
"""Reads input data num_epochs times.
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
Returns:
A tuple (images, labels), where:
* images is a float tensor with shape [batch_size, 30,26,1]
in the range [-0.5, 0.5].
* labels is an int32 tensor with shape [batch_size] with the true label,
a number in the range [0, char letras).
Note that an tf.train.QueueRunner is added to the graph, which
must be run using e.g. tf.train.start_queue_runners().
"""
if not num_epochs: num_epochs = None
filename = os.path.join(FLAGS.train_dir,
TRAIN_FILE if train else VALIDATION_FILE)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
[filename], num_epochs=num_epochs)
# Even when reading in multiple threads, share the filename
# queue.
image, label = read_and_decode(filename_queue)
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
images, sparse_labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size, num_threads=2,
capacity=1000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=1000)
return images, sparse_labels
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
#Variaveis
x, y_ = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs)
#onehot_y_ = tf.one_hot(y_, 36, dtype=tf.float32)
#y_ = tf.string_to_number(y_, out_type=tf.int32)
#Layer 1
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#Layer 2
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
#Densely Connected Layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
#Dropout - reduz overfitting
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#Readout layer
W_fc2 = weight_variable([1024, 36])
b_fc2 = bias_variable([36])
#y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
#Train and evaluate
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(onehot_y_ * tf.log(y_conv), reduction_indices=[1]))
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y_conv, y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(20000):
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={keep_prob: 0.5})
x, y_ = inputs(train=True, batch_size=2000)
#y_ = tf.string_to_number(y_, out_type=tf.int32)
print("test accuracy %g"%accuracy.eval(feed_dict={keep_prob: 1.0}))
coord.join(threads)
sess.close()
Can anyone explain me whats going on? And how to fix it?
Thanks!
Marcelo V
I had similar problems in the past, and it was due to that I was storing and reading the data in incorrect data types. For example, I had casted the data first as type float when converting original png data to tfrecords. Then when I read the data out from tfrecords, I once again casted it as float (assuming the data coming out was uint8. Hence I had mismatch of 3136 (784*4) when expected 784. I'm guessing that may also be the case for you here.
In the line:
filename_queue = tf.train.string_input_producer([filename], num_epochs=num_epochs)
You specify the number of epochs the queue will run through the filenames. The documentation explains it well:
num_epochs: An integer (optional). If specified, string_input_producer produces each string from num_epochs times before generating an OutOfRange error. If not specified, string_input_producer can cycle through the strings in string_tensor an unlimited number of times.
In flags.DEFINE_integer('num_epochs', 2, 'Number of epochs to run trainer.'), you specify a default number of epochs 2. You should either increase that, or remove the num_epochs argument in string_input_producer.
I am facing a problem when I start my trainer and I can't figure out the cause.
My input data is of dimension 42 and my output should be one value out of 4.
This is the shape of my training and test set:
Training set:
input = (1152, 42) target = (1152,)
Training set: input = (1152, 42) target = (1152,)
Test set: input = (384, 42) target = (384,)
This is the construction of my network:
ls = MS.GradientDescent(lr=0.01)
cost = MC.CrossEntropy()
i = ML.Input(42, name='inp')
h = ML.Hidden(23, activation=MA.Sigmoid(), initializations=[MI.GlorotTanhInit()], name="hid")
o = ML.SoftmaxClassifier(4, learningScenario=ls, costObject=cost, name="out")
mlp = i > h > o
And this is the construction of the datasets, trainers and recorders:
trainData = MDM.RandomSeries(distances = train_set[0], next_state = train_set[1])
trainMaps = MDM.DatasetMapper()
trainMaps.mapInput(i, trainData.distances)
trainMaps.mapOutput(o, trainData.next_state)
testData = MDM.RandomSeries(distances = test_set[0], next_state = test_set[1])
testMaps = MDM.DatasetMapper()
testMaps.mapInput(i, testData.distances)
testMaps.mapOutput(o, testData.next_state)
earlyStop = MSTOP.GeometricEarlyStopping(testMaps, patience=100, patienceIncreaseFactor=1.1, significantImprovement=0.00001, outputFunction="score", outputLayer=o)
epochWall = MSTOP.EpochWall(1000)
trainer = MT.DefaultTrainer(
trainMaps=trainMaps,
testMaps=testMaps,
validationMaps=None,
stopCriteria=[earlyStop, epochWall],
testFunctionName="testAndAccuracy",
trainMiniBatchSize=MT.DefaultTrainer.ALL_SET,
saveIfMurdered=False
)
recorder = MREC.GGPlot2("MLP", whenToSave = [MREC.SaveMin("test", o.name, "score")], printRate=1, writeRate=1)
trainer.start("MLP", mlp, recorder = recorder)
But the following error is being produced:
Traceback (most recent call last):
File "nn-mariana.py", line 82, in <module>
trainer.start("MLP", mlp, recorder = recorder)
File "SUPRESSED/Mariana/Mariana/training/trainers.py", line 226, in start
Trainer_ABC.start( self, runName, model, recorder, trainingOrder, moreHyperParameters )
File "SUPRESSED/Mariana/Mariana/training/trainers.py", line 110, in start
return self.run(runName, model, recorder, *args, **kwargs)
File "SUPRESSED/Mariana/Mariana/training/trainers.py", line 410, in run
outputLayers
File "SUPRESSED/Mariana/Mariana/training/trainers.py", line 269, in _trainTest
res = modelFct(output, **kwargs)
File "SUPRESSED/Mariana/Mariana/network.py", line 47, in __call__
return self.callTheanoFct(outputLayer, **kwargs)
File "SUPRESSED/Mariana/Mariana/network.py", line 44, in callTheanoFct
return self.outputFcts[ol](**kwargs)
File "SUPRESSED/Mariana/Mariana/wrappers.py", line 110, in __call__
return self.run(**kwargs)
File "SUPRESSED/Mariana/Mariana/wrappers.py", line 102, in run
fres = iter(self.theano_fct(*self.fctInputs.values()))
File "SUPRESSED/Theano/theano/compile/function_module.py", line 871, in __call__
storage_map=getattr(self.fn, 'storage_map', None))
File "SUPRESSED/Theano/theano/gof/link.py", line 314, in raise_with_op
reraise(exc_type, exc_value, exc_trace)
File "SUPRESSED/Theano/theano/compile/function_module.py", line 859, in __call__
outputs = self.fn()
ValueError: Input dimension mis-match. (input[0].shape[1] = 1152, input[1].shape[1] = 4)
Apply node that caused the error: Elemwise{Composite{((i0 * i1) + (i2 * log(i3)))}}[(0, 1)](InplaceDimShuffle{x,0}.0, LogSoftmax.0, Elemwise{sub,no_inplace}.0, Elemwise{sub,no_inplace}.0)
Toposort index: 18
Inputs types: [TensorType(int32, row), TensorType(float64, matrix), TensorType(int32, row), TensorType(float64, matrix)]
Inputs shapes: [(1, 1152), (1152, 4), (1, 1152), (1152, 4)]
Inputs strides: [(4608, 4), (32, 8), (4608, 4), (32, 8)]
Inputs values: ['not shown', 'not shown', 'not shown', 'not shown']
Outputs clients: [[Sum{axis=[1], acc_dtype=float64}(Elemwise{Composite{((i0 * i1) + (i2 * log(i3)))}}[(0, 1)].0)]]
Versions:
Mariana (1.0.1rc1, /media/guilhermevrs/Data/Documentos/Academico/TCC-code/Mariana)
Theano (0.8.0.dev0, SUPRESSED/Theano)
This code was produced having as base the tutorial code from the mnist example.
Could you please help me to figure out what's going on?
Thank you in advance
I talked directly to the authors of Mariana and the cause and solution is explained in this issue