Related
My goal is to give numpy.ndarray a different representation, since I want to represent some arrays with units. Thus, I programmed a class that inherits its attributes/ methods from numpy.ndarray. For the another representation I wanted to use the __repr__ magic method like:
class Quantitiy(np.ndarray):
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None, subok=False, ndmin=0):
value = np.asarray(value)
obj = np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin).view(cls)
obj.__unit = util.def_unit(unit)
obj.__value = value
return obj
def __repr__(self):
prefix = '<{0} '.format(self.__class__.__name__)
sep = ','
arrstr = np.array2string(self.view(np.ndarray),
separator=sep,
prefix=prefix)
return '{0}{1} {2}>'.format(prefix, arrstr, self.__unit)
So far this works fine. However, if I want to access the inherited methods from numpy.ndarray I get a AttributeError because __repr__ cant resolve self.__unit.
I tried to solve this problem with a private method that defines the variable self.__unit and called it within the __new__ method but without success:
class Quantitiy(np.ndarray):
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None, subok=False, ndmin=0):
value = np.asarray(value)
obj = np.array(value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin).view(cls)
# Here I call the private method to initialize self.__unit.
obj.__set_unit()
obj.__value = value
return obj
def __repr__(self):
prefix = '<{0} '.format(self.__class__.__name__)
sep = ','
arrstr = np.array2string(self.view(np.ndarray), separator=sep, prefix=prefix)
return '{0}{1} {2}>'.format(prefix, arrstr, self.__unit)
# New defined private class.
def __set_unit(self, unit):
self.__unit = util.def_unit(unit)
I can not solve this with something like cls.__unit = util.def_unit(unit) in the __new__ method. I already tried to define a __init__ method after __new__. Moreover, I tried to interchange the private methods with public methods.
What I expect:
>>> array = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
>>> q = Quantity(value, unit="meter / second")
>>> q
<Quantitiy [[1,2,3,4],
[5,6,7,8]] meter/second>
>>> q * q
>>> <Quantitiy [[ 1, 4, 9,16],
[25,36,49,64]] meter**2/second**2>
>>> q.min()
>>> <Quantitiy 1 meter/second>
The actual result is:
>>> array = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
>>> q = Quantity(value, unit="meter / second")
>>> q
<Quantitiy [[1,2,3,4],
[5,6,7,8]] meter/second>
>>> q * q
>>> <Quantitiy [[ 1, 4, 9,16],
[25,36,49,64]] meter**2/second**2>
# Up to here everything works fine.
>>> q.min()
>>> AttributeError: 'Quantitiy' object has no attribute
'_Quantitiy__unit'
Does anyone see the mistake and can help me?
Ok, the answer is - as usual - in the FineManual (and could be found searching for "subclassing numpy ndarray" - which is how I found it actually), and requires implementing __array_finalize__(self, obj) :
import numpy as np
class Quantitiy(np.ndarray):
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None, subok=False, ndmin=0):
value = np.asarray(value)
x = np.array(value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin)
obj = x.view(type=cls)
obj._unit = unit
obj._value = value
return obj
def __repr__(self):
print("repr %s" % type(self))
prefix = '<{0} '.format(self.__class__.__name__)
sep = ','
arrstr = np.array2string(self.view(np.ndarray),
separator=sep,
prefix=prefix)
return '{0}{1} {2}>'.format(prefix, arrstr, self._unit)
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None:
return
self._unit = getattr(obj, '_unit', None)
self._value = getattr(obj, '_value', None)
I'm new to TF and ML.
Details about data: Features(x) - (70 x 70 x 70) tensor for each sample, y - a float for each sample.
TFRecords created with the following code:
def convert_to_tf_records():
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float64_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
tfrecords_filename = 'A-100-h2-h2o.tfrecords'
writer = tf.python_io.TFRecordWriter(tfrecords_filename)
# Get data from db for now.
db = connect('results-60-70.db')
data = db.select(selection='Ti')
i = 0
for row in data:
desc = np.array(json.loads(row.descriptor), dtype=np.float32)
print(desc.shape)
be = float(row.binding_energy) * 23 # Convert to Kcal/mol ?
desc = desc.flatten()
desc = desc.tostring()
example = tf.train.Example(features=tf.train.Features(feature={'voxel_grid': _bytes_feature(desc), 'binding_energy': _float64_feature(be)}))
writer.write(example.SerializeToString())
i += 1
if i >= 10:
break
Input function:
def my_input_function(fname, perform_shuffle=False, repeat_count=None):
def _parse_elements(example):
features = tf.parse_single_example(example, features={'voxel_grid': tf.FixedLenFeature([], tf.string), 'binding_energy': tf.FixedLenFeature([], tf.float32)})
vg = tf.decode_raw(features['voxel_grid'], tf.float32)
vg = tf.reshape(vg, [70, 70, 70])
vg = tf.convert_to_tensor(vg, dtype=tf.float32)
vg = {'voxel_grid': vg}
e = tf.cast(features['binding_energy'], tf.float32)
return vg, e
def input_function():
dataset = tf.data.TFRecordDataset(fname).map(_parse_elements)
dataset = dataset.repeat(repeat_count)
dataset = dataset.batch(5)
dataset = dataset.prefetch(1)
if perform_shuffle:
dataset.shuffle(20)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
return input_function
Model function:
def my_model_function(features, labels, mode):
if mode == tf.estimator.ModeKeys.PREDICT:
tf.logging.info("my_model_fn: PREDICT, {}".format(mode))
elif mode == tf.estimator.ModeKeys.EVAL:
tf.logging.info("my_model_fn: EVAL, {}".format(mode))
elif mode == tf.estimator.ModeKeys.TRAIN:
tf.logging.info("my_model_fn: TRAIN, {}".format(mode))
feature_columns = [tf.feature_column.numeric_column('voxel_grid', shape=(70, 70, 70), dtype=tf.float32)]
# Create the layer of input
input_layer = tf.feature_column.input_layer(features, feature_columns)
input_layer = tf.reshape(input_layer, [-1, 70, 70, 70, 1])
# Convolution layers
conv1 = tf.layers.conv3d(inputs=input_layer, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
conv2 = tf.layers.conv3d(inputs=conv1, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
pool3 = tf.layers.max_pooling3d(inputs=conv2, pool_size=[2, 2, 2], strides=2)
flat = tf.layers.flatten(pool3)
dense1 = tf.layers.dense(inputs=flat, units=10, activation=tf.nn.relu)
dense2 = tf.layers.dense(inputs=dense1, units=10, activation=tf.nn.relu)
output = tf.layers.dense(inputs=dense2, units=1)
predictions = {'binding_energy': output}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics
eval_metric_ops = {"mse": tf.metrics.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
When calling model.train using
model = tf.estimator.Estimator(model_fn=my_model_function, model_dir='./model_dir')
model.train(input_fn=my_input_function('A-100-h2-h2o.tfrecords'), steps=100)
I get the following error.
TypeError: Failed to convert object of type to Tensor.
Found it!
changing
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
to
# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])
solves the issue.
I'm doing text matching using tensorflow, before i call tf.nn.embedding_lookup(word_embedding_matrix, combine_result), I have to combine some words from 2 sentence(get m words from sentence S1 and also get m words from sentence S2, then combine them together as "combine_result"), but when the code gose to tf.nn.embedding_lookup(word_embedding_matrix, combine_result) it gives me the error:
ValueError: Tensor("Reshape_7:0", shape=(1, 6), dtype=int32) must be
from the same graph as Tensor("word_embedding_matrix:0", shape=(26320,
50), dtype=float32_ref).
the code is as bellow:
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
NUM_CLASS = 2
SEQUENCE_LENGTH = 47
# Placeholders for input, output and dropout
input_x = tf.placeholder(tf.int32, [None, 2, SEQUENCE_LENGTH], name="input_x")
input_y = tf.placeholder(tf.float32, [None, NUM_CLASS], name="input_y")
dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
def n_grams(text, window_size):
text_left_window = []
# text_left_window = tf.convert_to_tensor(text_left_window, dtype=tf.int32)
for z in range(SEQUENCE_LENGTH-2):
text_left = tf.slice(text, [z], [window_size])
text_left_window = tf.concat(0, [text_left_window, text_left])
text_left_window = tf.reshape(text_left_window, [-1, window_size])
return text_left_window
def inference(vocab_size, embedding_size, batch_size, slide_window_size, conv_window_size):
# # Embedding layer
word_embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="word_embedding_matrix")
# convo_unit = tf.Variable(tf.random_uniform([slide_window_size*2, ], -1.0, 1.0), name="convo_unit")
text_comp_result = []
for x in range(batch_size):
# input_x_slice_reshape = [[1 1 1...]
# [2 2 2...]]
input_x_slice = tf.slice(input_x, [x, 0, 0], [1, 2, SEQUENCE_LENGTH])
input_x_slice_reshape = tf.reshape(input_x_slice, [2, SEQUENCE_LENGTH])
# text_left_flat: [294, 6, 2, 6, 2, 57, 2, 57, 147, 57, 147, 5, 147, 5, 2,...], length = SEQUENCE_LENGTH
# text_right_flat: [17, 2, 2325, 2, 2325, 5366, 2325, 5366, 81, 5366, 81, 1238,...]
text_left = tf.slice(input_x_slice_reshape, [0, 0], [1, SEQUENCE_LENGTH])
text_left_flat = tf.reshape(text_left, [-1])
text_right = tf.slice(input_x_slice_reshape, [1, 0], [1, SEQUENCE_LENGTH])
text_right_flat = tf.reshape(text_right, [-1])
# extract both text.
# text_left_window: [[294, 6, 2], [6, 2, 57], [2, 57, 147], [57, 147, 5], [147, 5, 2],...]
# text_right_window: [[17, 2, 2325], [2, 2325, 5366], [2325, 5366, 81], [5366, 81, 1238],...]
text_left_window = n_grams(text_left_flat, slide_window_size)
text_right_window = n_grams(text_right_flat, slide_window_size)
text_left_window_sha = text_left_window.get_shape()
print 'text_left_window_sha:', text_left_window_sha
# composite the slice
text_comp_list = []
# text_comp_list = tf.convert_to_tensor(text_comp_list, dtype=tf.float32)
for l in range(SEQUENCE_LENGTH-slide_window_size+1):
text_left_slice = tf.slice(text_left_window, [l, 0], [1, slide_window_size])
text_left_slice_flat = tf.reshape(text_left_slice, [-1])
for r in range(SEQUENCE_LENGTH-slide_window_size+1):
text_right_slice = tf.slice(text_right_window, [r, 0], [1, slide_window_size])
text_right_slice_flat = tf.reshape(text_right_slice, [-1])
# convo_unit = [294, 6, 2, 17, 2, 2325]
convo_unit = tf.concat(0, [text_left_slice_flat, text_right_slice_flat])
convo_unit_reshape = tf.reshape(convo_unit, [-1, slide_window_size*2])
# convo_unit_shape_val = convo_unit_reshape.get_shape()
# print 'convo_unit_shape_val:', convo_unit_shape_val
embedded_chars = tf.nn.embedding_lookup(word_embedding_matrix, convo_unit_reshape)
embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)
...
could please someone help me? Thank you very much!
Yaroslav answered in a comment above - moving to an answer:
This error happens when you create new default graph. Try to do tf.reset_default_graph() before the computation and not create any more graphs (i.e., calls to tf.Graph)
I want to remove 362968 from below list-
list=[362976,362974,362971,362968,362969]
code-
list.remove(362968)
I am getting error: 'str' object has no attribute 'remove'
Actual code -
def matchmaker():
exportersfree = exporters[:]
engaged = {}
exprefers2 = copy.deepcopy(exprefers)
imprefers2 = copy.deepcopy(imprefers)
while exportersfree:
exporter = exportersfree.pop(0)
exporterslist = exprefers2[exporter]
importer = exporterslist.pop(0)
match = engaged.get(importer)
if not match:
# impo's free
engaged[importer] = exporter #both parties are added to the engaged list
importerslist = imprefers2[importer]
for z in range (importerslist.index(exporter)-1):
importerslist.index(exporter)
exprefers2[importerslist[z]].remove(importer)
del importerslist[0:(importerslist.index(exporter)-1)]
else
engaged[importer] = exporter
if exprefers2[match]:
# Ex has more importers to try
exportersfree.append(match)
return engaged
Without additional code to really debug, exprefers2is clearly a dict of strings; however, if you really want to delete it. You can cast the string to a list, or eval the value to convert it into a list, then use list.remove
import ast
list = [1, 2, 3, 4, 5, 6, 7]
list.remove(5)
print list
#[1, 2, 3, 4, 6, 7]
#Data Structure you most likely have
import_list = [1, 2]
exprefers2 = {1: "abc", 2: "xyz"}
print exprefers2[import_list[1]]
#xyz
#Or need to eval the string of a list
import_list = [1, 2]
exprefers2 = {1: u'[ "A","B","C" , " D"]', 2: u'[ "z","x","y" , " y"]'}
exprefers2[import_list[1]] = ast.literal_eval(exprefers2[import_list[1]])
exprefers2[import_list[1]].remove("y")
print exprefers2[import_list[1]]
#['z', 'x', ' y']
Try it in this way then, name your list "a".
a = [362976,362974,362971,362968,362969]
a.remove(362968)
print a
I think you need to check if the X has been changed.
x = [1, 40, 33, 20]
x.remove(33)
print (x)
I have no idea what is wrong but I keep getting this
Exception in Tkinter callback
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/7.3/lib/python2.7/lib-tk/Tkinter.py", line 1410, in call
return self.func(*args)
File "/Users/Zane/Desktop/Factorial GUI.py", line 72, in reveal2
self.text2.insert(0.0, message)
File "/Library/Frameworks/Python.framework/Versions/7.3/lib/python2.7/lib-tk/Tkinter.py", line 2986, in insert
self.tk.call((self._w, 'insert', index, chars) + args)
TclError: wrong # args: should be ".22186144.22187184 insert index chars ?tagList chars tagList ...?"
here is my code:`
from Tkinter import*
class App(Frame):
def fac(self, n):
if n >= 0:
if n == 1 or n == 0:
return 1
else:
return n*self.fac(n-1)
else:
print('Error')
def per(self, n, r):
y = (self.fac(n)) / self.fac(n - r)
print (y)
def __init__(self, master):
Frame.__init__(self,master)
self.grid()
self.create_widgets()
def create_widgets(self):
self.instruction1 = Label(self, text = "Factorial:")
self.instruction1.grid(row = 0, column = 0, columnspan = 1, sticky = W)
self.password1 = Entry(self)
self.password1.grid(row = 0, column = 1, sticky = W)
self.submit_button1 = Button(self, text ="Enter", command = self.reveal1)
self.submit_button1.grid(row = 2, column = 0, sticky = W)
self.text1 = Text(self, width = 30, height = 1, wrap = WORD)
self.text1.grid(row = 3, column = 0, columnspan = 2, sticky = W)
self.instruction2 = Label(self, text = "Permutation:")
self.instruction2.grid(row = 4, column = 0, columnspan = 1, sticky = W)
self.password2 = Entry(self)
self.password2.grid(row = 4, column = 1, sticky = W)
self.password3 = Entry(self)
self.password3.grid(row = 6, column = 1, sticky = W)
self.submit_button2 = Button(self, text ="Enter", command = self.reveal2)
self.submit_button2.grid(row = 7, column = 0, sticky = W)
self.text2 = Text(self, width = 30, height = 1, wrap = WORD)
self.text2.grid(row = 8, column = 0, columnspan = 2, sticky = W)
def reveal1(self):
y = int(self.password1.get())
message = self.fac(y)
self.text1.delete(0.0, END)
self.text1.insert(0.0, message)
def reveal2(self):
y = int(self.password2.get())
z = int(self.password3.get())
message = self.per(y, z)
self.text2.delete(0.0, END)
self.text2.insert(0.0, message)
root = Tk()
root.title('Factorial')
root.geometry("340x300")
app = App(root)
root.mainloop()
`
Almost the only way to get the error you say you get with the code you posted, is if the insert method is called when the data to insert is None. message comes from the result of per, but per returns None because you don't explicitly return anything else.
One of the first things to try when trying to debug is to check that the data you're sending to the failing function is what you think it is. You can do this in a very low-tech way by simply printing out the values being passed to the insert message. This instantly told me that message was None. Once I learned that, it's pretty simple to answer the question "why was it None?".