I have implemented a very simple computational graph, and I am able to visualize it correctly on tensorboard.
However, when I run the graph, I am not able to see the numerical value of the variables
import tensorflow as tf
a = tf.constant(5, name = 'a')
b = tf.constant(5, name = 'b')
c = a + b
print(a)
print(b)
print(c)
sess = tf.Session()
print(sess.run(c))
with tf.Session() as sess:
writer = tf.summary.FileWriter('c:/users/gpapari/documents/python', sess.graph)
writer.close()
Maybe I am missing something?
First of all you are creating two seperate sessions.
Second, you need to add the values you want to track to the filewriter.
To do that you have to create scalars.
In the example I merged all the scalars so you won't have to add the scalars one by one if you want to add more
import tensorflow as tf
tf.reset_default_graph()
a = tf.constant(5, name = 'a')
b = tf.constant(5, name = 'b')
c = a + b
tf.summary.scalar("c", c)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('log', tf.get_default_graph())
with tf.Session() as sess:
merged_value , _ =sess.run([merged,c])
writer.add_summary(merged_value, 1)
writer.close()
Also you don't have to define the entire path for the filewriter. You can use a relative path.
Related
I followed the official tutorial of the tensorflow website: https://www.tensorflow.org/extend/adding_an_op
There is also described how to call the gradient of the example ZeroOut in the tutorial that I want to try in this short code snippet underneath.
I have found the code here: https://github.com/MatteoRagni/tf.ZeroOut.gpu
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
zero_out_module = tf.load_op_library('./libzeroout.so')
#ops.RegisterGradient("ZeroOut")
def _zero_out_grad(op, grad):
to_zero = op.inputs[0]
shape = array_ops.shape(to_zero)
index = array_ops.zeros_like(shape)
first_grad = array_ops.reshape(grad, [-1])[0]
to_zero_grad = sparse_ops.sparse_to_dense([index], shape, first_grad, 0)
return [to_zero_grad] # List of one Tensor, since we have one input
t_in = tf.placeholder(tf.int32, [None,None])
ret = zero_out_module.zero_out(t_in)
grad = tf.gradients(ys=tf.reduce_sum(ret), xs=t_in)
with tf.Session(''):
feed_dict = {t_in: [[1, 2], [3, 4]]}
print "ret val: ", ret.eval(feed_dict=feed_dict)
print "grad: ", grad
print "grad: ", grad.eval(feed_dict=feed_dict)
I got this error ...
AttributeError: 'list' object has no attribute 'eval'
... but I can do ret.eval().
Why I cant call grad.eval()? I want to see these values inside the grad tensor. How to debug gradient?
Answer to old question
The implementation
def _zero_out_grad(op, *grads):
topdiff = grads[0]
bottom = op.inputs[0]
shape = array_ops.shape(bottom)
index = array_ops.zeros_like(shape)
first_grad = array_ops.reshape(topdiff, [-1])[0]
to_zero_grad = sparse_ops.sparse_to_dense([index], shape, first_grad, 0)
return to_zero_grad
works quite nicely here. Are you sure "#ops.RegisterGradient("ZeroOut")" is executed before the tf.Session()?
Usually the
zero_out_module = tf.load_op_library('./libzeroout.so')
#ops.RegisterGradient("ZeroOut")
def _zero_out_grad(op, grad):
# ...
is placed in a different file and just imported. A full working example even with the recent TensorFlow version is here.
Answer to completely changed question
Your gradient function returns a list and a Python list has no 'eval()'. Try either:
grad = tf.gradients(ys=tf.reduce_sum(ret), xs=t_in)[0]
Or follow best practice and use
grad = tf.gradients(ys=tf.reduce_sum(ret), xs=t_in)
with tf.Session() as sess:
sess.run(grad, feed_dict=feed_dict)
Please do not change your entire question
So I have created this code for my research, but I want to use it for plenty of data files, I do not want to do it manually, which means retyping some lines in my code to use desired file. How to use input command in python (I work with python 2.7 on Windows OS) to use it faster, just by typing name of desired datafile. My code so far:
import iodata as io
import matplotlib.pyplot as plt
import numpy as np
import time
from scipy.signal import welch
from scipy import signal
testInstance = io.InputConverter()
start = time.time()
conversionError = io.ConversionError()
#data = testInstance.convert(r"S:\Doktorat\Python\", 1", conversionError)
data = testInstance.convert(r"/Users/PycharmProjects/Hugo/20160401", "201604010000", conversionError)
end = time.time()
print("time elapsed " + str(end - start))
if(conversionError.conversionSucces):
print("Conversion succesful")
if(conversionError.conversionSucces == False):
print("Conversion failed: " + conversionError.conversionErrorLog)
print "Done!"
# Create a new subplot for two cannals 1 & 3
a = np.amin(data.data)
Bx = data.data[0,]
By = data.data[1,]
dt = float(300)/266350
Fs = 1/dt
t = np.arange(0,300,dt*1e3)
N = len(Bx)
M = len(By)
time = np.linspace(0,300,N)
time2 = np.linspace(0,300,M)
filename = 'C:/Users/PycharmProjects/Hugo/20160401/201604010000.dat'
d = open(filename,'rb')
degree = u"\u00b0"
headersize = 64
header = d.read(headersize)
ax1 = plt.subplot(211)
ax1.set_title(header[:16] + ', ' + # station name
'Canals: '+header[32:33]+' and '+header[34:35]+ ', ' # canals
+'Temp'+header[38:43]+degree+'C' # temperature
+', '+'Time:'+header[26:32]+', '+'Date'+' '+header[16:26]) # date
plt.ylabel('Pico Tesle [pT]')
plt.xlabel('Time [ms]')
plt.grid()
plt.plot(time[51:-14], Bx[51:-14], label='Canal 1', color='r', linewidth=0.1, linestyle="-")
plt.plot(time2[1:-14], By[1:-14], label='Canal 3', color='b', linewidth=0.1, linestyle="-")
plt.legend(loc='upper right', frameon=False, )
# Create a new subplot for FFT
plt.subplot(212)
plt.title('Fast Fourier Transform')
plt.ylabel('Power [a.u.]')
plt.xlabel('Frequency Hz')
xaxis2 = np.arange(0,470,10)
plt.xticks(xaxis2)
fft1 = (Bx[51:-14])
fft2 = (By[1:-14])
plt.grid()
# Loop for FFT data
for dataset in [fft1]:
dataset = np.asarray(dataset)
freqs, psd = welch(dataset, fs=266336/300, window='hamming', nperseg=8192)
plt.semilogy(freqs, psd/dataset.size**0, color='r')
for dataset2 in [fft2]:
dataset2 = np.asarray(dataset2)
freqs2, psd2 = welch(dataset2, fs=266336/300, window='hamming', nperseg=8192)
plt.semilogy(freqs2, psd2/dataset2.size**0, color='b')
plt.show()
As you can see there are some places where it would be better to put input and when I run the code I can write names of filenames etc. to python instead of creating every single pythonfile, with specified info in the code.
Btw. I use Pycharm to my python.
If all you are trying to do is get rid of the hardcoded pathname, you should be able to format your name string with input variables
name = raw_input("Name: ")
measurement = raw_input("Measurement: ")
filename = "C:/Users/PycharmProjects/{0}/{1}".format(name, measurement)
see raw_input and string formatting
import tensorflow as tf
array = tf.Variable(tf.random_normal([10]))
i = tf.constant(0)
l = []
def cond(i,l):
return i < 10
def body(i,l):
temp = tf.gather(array,i)
l.append(temp)
return i+1,l
index,list_vals = tf.while_loop(cond, body, [i,l])
I want to process a tensor array in the similar way as described in the above code. In the body of the while loop I want to process the array by element by element basis to apply some function. For demonstration, I have given a small code snippet. However, it is giving an error message as follows.
ValueError: Number of inputs and outputs of body must match loop_vars: 1, 2
Any help in resolving this is appreciated.
Thanks
Citing the documentation:
loop_vars is a (possibly nested) tuple, namedtuple or list
of tensors that is passed to both cond and body
You cannot pass regular python array as a tensor. What you can do, is:
i = tf.constant(0)
l = tf.Variable([])
def body(i, l):
temp = tf.gather(array,i)
l = tf.concat([l, [temp]], 0)
return i+1, l
index, list_vals = tf.while_loop(cond, body, [i, l],
shape_invariants=[i.get_shape(),
tf.TensorShape([None])])
The shape invariants are there, because normally tf.while_loop expects the shapes of tensors inside while loop won't change.
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(list_vals)
Out: array([-0.38367489, -1.76104736, 0.26266089, -2.74720812, 1.48196387,
-0.23357525, -1.07429159, -1.79547787, -0.74316853, 0.15982138],
dtype=float32)
TF offers a TensorArray to deal with such cases. From the doc,
Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.
This class is meant to be used with dynamic iteration primitives such as while_loop and map_fn. It supports gradient back-propagation via special "flow" control flow dependencies.
Here is an example,
import tensorflow as tf
array = tf.Variable(tf.random_normal([10]))
step = tf.constant(0)
output = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
def cond(step, output):
return step < 10
def body(step, output):
output = output.write(step, tf.gather(array, step))
return step + 1, output
_, final_output = tf.while_loop(cond, body, loop_vars=[step, output])
final_output = final_output.stack()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(final_output))
I have a fairly simple NN that has 1 hidden layer.
However, the weights don't seem to be updating. Or perhaps they are but the variable values don't change ?
Either way, my accuracy is 0.1 and it doesn't change no matter I change the learning rate or the activation function. Not sure what is wrong. Any ideas ?
I've posted the entire code correctly formatter so you guys can directly copy paste it and run it on your local machines.
from tensorflow.examples.tutorials.mnist import input_data
import math
import numpy as np
import tensorflow as tf
# one hot option returns binarized labels. mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
# model parameters
x = tf.placeholder(tf.float32, [784, None],name='x')
# weights
W1 = tf.Variable(tf.truncated_normal([25, 784],stddev= 1.0/math.sqrt(784)),name='W')
W2 = tf.Variable(tf.truncated_normal([25, 25],stddev=1.0/math.sqrt(25)),name='W')
W3 = tf.Variable(tf.truncated_normal([10, 25],stddev=1.0/math.sqrt(25)),name='W')
# bias units b1 = tf.Variable(tf.zeros([25,1]),name='b1')
b2 = tf.Variable(tf.zeros([25,1]),name='b2')
b3 = tf.Variable(tf.zeros([10,1]),name='b3')
# NN architecture
hidden1 = tf.nn.relu(tf.matmul(W1, x,name='hidden1')+b1, name='hidden1_out')
# hidden2 = tf.nn.sigmoid(tf.matmul(W2, hidden1, name='hidden2')+b2, name='hidden2_out')
y = tf.matmul(W3, hidden1,name='y') + b3
y_ = tf.placeholder(tf.float32, [10, None],name='y_')
# Create the model
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(2).minimize(cross_entropy)
sess = tf.Session()
summary_writer = tf.train.SummaryWriter('log_simple_graph', sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
# Train
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
summary =sess.run(train_step, feed_dict={x: np.transpose(batch_xs), y_: np.transpose(batch_ys)})
if summary is not None:
summary_writer.add_event(summary)
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: np.transpose(mnist.test.images), y_: np.transpose(mnist.test.labels)}))
The reason why you are getting 0.1 accuracy consistently is mainly due to the order of dimensions of the input placeholder and the weights following it. Learning rate is another factor. If the learning rate is very high, the gradient would be oscillating and will not reach any minima.
Tensorflow takes the number of instances(batches) as the first index value of placeholder. So the code which declares input x
x = tf.placeholder(tf.float32, [784, None],name='x')
should be declared as
x = tf.placeholder(tf.float32, [None, 784],name='x')
Consequently, W1 should be declared as
W1 = tf.Variable(tf.truncated_normal([784, 25],stddev= 1.0/math.sqrt(784)),name='W')
and so on.. Even the bias variables should be declared in the transpose sense. (Thats how tensorflow takes it :) )
For example
b1 = tf.Variable(tf.zeros([25]),name='b1')
b2 = tf.Variable(tf.zeros([25]),name='b2')
b3 = tf.Variable(tf.zeros([10]),name='b3')
I'm putting the corrected full code below for your reference. I achieved an accuracy of 0.9262 with this :D
from tensorflow.examples.tutorials.mnist import input_data
import math
import numpy as np
import tensorflow as tf
# one hot option returns binarized labels.
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
# model parameters
x = tf.placeholder(tf.float32, [None, 784],name='x')
# weights
W1 = tf.Variable(tf.truncated_normal([784, 25],stddev= 1.0/math.sqrt(784)),name='W')
W2 = tf.Variable(tf.truncated_normal([25, 25],stddev=1.0/math.sqrt(25)),name='W')
W3 = tf.Variable(tf.truncated_normal([25, 10],stddev=1.0/math.sqrt(25)),name='W')
# bias units
b1 = tf.Variable(tf.zeros([25]),name='b1')
b2 = tf.Variable(tf.zeros([25]),name='b2')
b3 = tf.Variable(tf.zeros([10]),name='b3')
# NN architecture
hidden1 = tf.nn.relu(tf.matmul(x, W1,name='hidden1')+b1, name='hidden1_out')
# hidden2 = tf.nn.sigmoid(tf.matmul(W2, hidden1, name='hidden2')+b2, name='hidden2_out')
y = tf.matmul(hidden1, W3,name='y') + b3
y_ = tf.placeholder(tf.float32, [None, 10],name='y_')
# Create the model
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
sess = tf.Session()
summary_writer = tf.train.SummaryWriter('log_simple_graph', sess.graph)
init = tf.initialize_all_variables()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
summary =sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
if summary is not None:
summary_writer.add_event(summary)
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
I have one model which looks like this:
class Measurement(models.Model):
date = models.DateField('date')
time = models.TimeField('time')
Q = models.DecimalField(max_digits=10, decimal_places=6)
P = models.DecimalField(max_digits=10, decimal_places=6)
f = models.DecimalField(max_digits=10, decimal_places=6)
In my views, I would like to represent it. So I made this function:
def plotMeas(request):
# Count the events
c = Measurement.objects.all()
c = c.count()
# Variables
i = 0
a = [0]
P = a*c
Q = a*c
t = a*c
# Save dP_L1 & dQ_L1 in lists
for i in range(c):
meas = Measurement.objects.get(pk = i+1)
P [i] = meas.P
Q [i] = meas.Q
t [c-1-i] = i*10
if c > 100:
P = P[-100:]
Q = Q[-100:]
t [i] = t[-100:]
# Construct the graph
fig = Figure()
q = fig.add_subplot(211)
q.set_xlabel("time (minutes ago)")
q.set_ylabel("Q (VAR)")
p = fig.add_subplot(212)
p.set_xlabel("time (minutes ago)")
p.set_ylabel("P (W)")
p.plot(t,P, 'go-')
q.plot(t,Q, 'o-')
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
However, I would like that the horizontal axis would show the date and the time (saved in the model). Does anyone know how to do it?
Have a look at the documentation for plot_date. Conveniently plot_date takes similar arguments to plot. A call might look like:
p.plot_date(sequence_of_datetime_objects, y_axis_values, 'go-')
Using matplotlib.dates you can then customize the format of your x-axis labels.
A simple example:
The following will specify that the x-axis displays only every third month in the format Jan '09 (assuming English-speaking locale).
p.xaxis.set_major_locator(mdates.MonthLocator(interval=3))
p.xaxis.set_major_formatter(mdates.DateFormatter("%b '%y"))
Since you have dates and times stored separately you may either want to
change your model to use a DateTimeField, or
use Python to combine them.
For example:
import datetime as dt
t1 = dt.time(21,0,1,2) # 21:00:01.2
d1 = dt.date.today()
dt1 = dt.datetime.combine(d1,t1)
# result: datetime.datetime(2011, 4, 15, 21, 0, 1, 2)
To iterate over two sequences and combine them you might use zip (code for illustrative purposes only, not necessarily optimized):
sequence_of_datetime_objects = []
for a_date, a_time in zip(sequence_of_date_objects, sequence_of_time_objects):
sequence_of_datetime_objects.append(dt.datetime.combine(a_date, a_time))
Feel free to open another question if you get stuck implementing the specifics.