FIle Descriptor out of range error while reading from arduino - python-2.7

I am getting this error again and again while running my code.
Exception in thread Thread-213:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/dist-packages/pyfirmata/util.py", line 47, in run
self.board.iterate()
File "/usr/local/lib/python2.7/dist-packages/pyfirmata/pyfirmata.py", line 264, in iterate
byte = self.sp.read()
File "/usr/local/lib/python2.7/dist-packages/serial/serialposix.py", line 483, in read
ready, _, _ = select.select([self.fd, self.pipe_abort_read_r], [], [], timeout.time_left())
ValueError: filedescriptor out of range in select()
All i am trying to do is calculate power consumption in my project.
I am sharing part of my code as well. my counter was still counting but after
getting this error, my counter stopped.
Can i use it without iterator, or do i need to stop the iterator. This application should run continuously 24x7.
so there should be no chance of code error. Please help me get over it. please help me if i am wrong, i learned python from internet only.
Traceback (most recent call last):
File "/media/pi/abc/MYPythonGUI.py", line 127, in <lambda>
File "/media/pi/abc/MYPythonGUI.py", line 119, in updateLabel
IOError: [Errno 24] Too many open files: 'Memory'
here's my code
import sys
import logging
from PyQt4 import QtGui, QtCore
import datetime
import time
from pyfirmata import Arduino, util
logging.basicConfig(filename='test.ods',level=logging.INFO, format='%(asctime)s:%(message)s')
PowerAccumulated = [0,0]
Mem=open('Memory','r')
PowerAccumulated[0] = float(Mem.read())
Mem.close()
class Window(QtGui.QMainWindow):
def _Power_Calculations(Self):
Date_labe2 = QtGui.QLabel(((datetime.datetime.today()).strftime("%d/%m/%y %H:%M")), Self)
newfont = QtGui.QFont("Times", 20, QtGui.QFont.Bold)
Date_labe2.setFont(newfont)
Date_labe2.setStyleSheet("color: blue")
Date_labe2.resize(250,120)
Date_labe2.move(610,400)
power_label2 = QtGui.QLabel(str(PowerAccumulated[1]), Self)
newfont = QtGui.QFont("Times", 20, QtGui.QFont.Bold)
power_label2.setFont(newfont)
power_label2.setStyleSheet("color: blue")
power_label2.resize(270,120)
power_label2.move(440,75)
QtCore.QTimer.singleShot(500, lambda: Self.updateLabel(power_label2,Date_labe2))
power_label3 = QtGui.QLabel("KW", Self)
newfont = QtGui.QFont("Times", 20, QtGui.QFont.Bold)
power_label3.setFont(newfont)
power_label3.setStyleSheet("color: blue")
power_label3.resize(270,120)
power_label3.move(600,75)
Self.show()
def updateLabel(Self, power_label2,Date_labe2):
try:
board=Arduino('/dev/ttyACM0')
iterator=util.Iterator(board)
iterator.start()
V1=board.get_pin('a:0:i')
time.sleep(0.1)
Voltage=100*(V1.read())
I1=board.get_pin('a:1:i')
time.sleep(0.1)
Current=20*float(I1.read())
if Current < 0.0976 :
Current = 0.0976
except:
Voltage=0.0
Current=0.0
Power=round( ((Voltage * Current * 0.98 * 1.732)/1000),3)
PowerAccumulated[1] =round((Power + PowerAccumulated[0]),3)
Self.Voltage = Voltage
Self.Current = Current
Self.Power = Power
Self.PowerAccumulated=PowerAccumulated[1]
logging.info('Valotage:{} - Current:{} - Power:{} - Power Accumulation:{}'.format(Self.Voltage, Self.Current, Self.Power, Self.PowerAccumulated))
Mem1=open('Memory','w')
Mem1.write(str(PowerAccumulated[1]))
Mem1.close()
PowerAccumulated[0] = PowerAccumulated[1]
power_label2.setText(str(PowerAccumulated[1]))
Date_labe2.setText((datetime.datetime.today()).strftime("%d/%m/%y %H:%M"))
QtCore.QTimer.singleShot(500, lambda: Self.updateLabel(power_label2,Date_labe2))
def run():
app = QtGui.QApplication(sys.argv)
GUI = Window()
sys.exit(app.exec_())
run()

Related

Django celery task unable to use cairo svg2pdf

I have a small task that reads a svg file from a path and uses cairo svg2pdf to convert the svg to pdf. If I run the function without using celery delay then the function runs fine and converts the file to pdf. If I run the function as a celery task then I get some errors:
Traceback (most recent call last):
File "/..../tasks.py", line 24, in create_download_pdf
svg2pdf(bytestring=bytestring, write_to=completeName)
File "/.../lib/python3.10/site-packages/cairosvg/__init__.py", line 67, in svg2pdf
return surface.PDFSurface.convert(
File "/.../lib/python3.10/site-packages/cairosvg/surface.py", line 131, in convert
instance = cls(
File "/.../lib/python3.10/site-packages/cairosvg/surface.py", line 202, in __init__
self.cairo, self.width, self.height = self._create_surface(
File "/.../lib/python3.10/site-packages/cairosvg/surface.py", line 242, in _create_surface
cairo_surface = self.surface_class(self.output, width, height)
File "/.../lib/python3.10/site-packages/cairocffi/surfaces.py", line 876, in __init__
Surface.__init__(self, pointer, target_keep_alive=write_func)
File "/.../lib/python3.10/site-packages/cairocffi/surfaces.py", line 158, in __init__
self._check_status()
File "/../lib/python3.10/site-packages/cairocffi/surfaces.py", line 170, in _check_status
_check_status(cairo.cairo_surface_status(self._pointer))
File "/../lib/python3.10/site-packages/cairocffi/__init__.py", line 88, in _check_status
raise exception(message, status)
OSError: [Errno cairo returned CAIRO_STATUS_WRITE_ERROR: b'error while writing to output stream'] 11
Here is the function:
#app.task(name="create_download_pdf")
def create_download_pdf(folder_path: str, svg_data=None, filename=None, file_path=None) -> None:
try:
completeName = os.path.join(folder_path, f"{filename}.pdf")
if svg_data:
svg2pdf(bytestring=svg_data, write_to=completeName)
elif file_path:
with open(file_path, 'r') as f:
bytestring=f.read()
print(bytestring)
svg2pdf(bytestring=bytestring, write_to=completeName)
except (OSError, ValueError):
logger.exception(
f"PDF creation and download exception: Unable to download | create PDF from svg data for {str(filename) or ''}. \n \
{traceback.format_exc()}"
)
pass
How can this be solved. I am not sending a file to celery task, therefore, its not that problem.

Seaborn KDE visualisation, value error on dataset

I am attempting to visualise a KDE plot in Seaborn, but am encountering an error on entering data.
The data is a set of scores ranging from 1-13 and is in the form of a numpy array.
Below is the section of code I'm using.
query_CNM = 'SELECT SCORE from CNMATCH LIMIT 2000'
df = pd.read_sql(query_CNM, conn, index_col = None)
yy = np.array(df)
plot = sns.kdeplot(yy)
Below is the full error that I'm receiving.
Traceback (most recent call last):
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/pydevd.py", line 1758, in <module>
main()
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/pydevd.py", line 1752, in main
globals = debugger.run(setup['file'], None, None, is_module)
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/pydevd.py", line 1147, in run
pydev_imports.execfile(file, globals, locals) # execute the script
File "/Users/uni/Desktop/Proof_Of_Concept/PYQTKDE.py", line 66, in <module>
plot = sns.kdeplot(yy)
File "/Users/uni/.conda/envs/fing.py/lib/python2.7/site-packages/seaborn/distributions.py", line 664, in kdeplot
x, y = data.T
ValueError: need more than 1 value to unpack
I can't seem to find exactly how the data needs to be formatted for sea-born in order to fit a KDE, if any insights can be provided on this it would be greatly appreciated.

NoneType has no attribute 'select' KerasDML SystemML

Im have some issue while running example Keras2DML code in this page. While running the code, i've got this error:
Traceback (most recent call last):
File "/home/fregy/kerasplayground/sysml/examplenn.py", line 12, in <module>
sysml_model = Keras2DML(spark, keras_model,input_shape=(3,224,224))
File "/usr/local/lib/python2.7/dist-packages/systemml/mllearn/estimators.py", line 909, in __init__
convertKerasToCaffeNetwork(keras_model, self.name + ".proto")
File "/usr/local/lib/python2.7/dist-packages/systemml/mllearn/keras2caffe.py", line 201, in convertKerasToCaffeNetwork
jsonLayers = list(chain.from_iterable(imap(lambda layer: _parseKerasLayer(layer), kerasModel.layers)))
File "/usr/local/lib/python2.7/dist-packages/systemml/mllearn/keras2caffe.py", line 201, in <lambda>
jsonLayers = list(chain.from_iterable(imap(lambda layer: _parseKerasLayer(layer), kerasModel.layers)))
File "/usr/local/lib/python2.7/dist-packages/systemml/mllearn/keras2caffe.py", line 137, in _parseKerasLayer
ret = { 'layer': { 'name':layer.name, 'type':supportedLayers[layerType], 'bottom':_getBottomLayers(layer), 'top':layer.name, paramName:param[paramName] } }
File "/usr/local/lib/python2.7/dist-packages/systemml/mllearn/keras2caffe.py", line 112, in _getBottomLayers
return [ bottomLayer.name for bottomLayer in _getInboundLayers(layer) ]
File "/usr/local/lib/python2.7/dist-packages/systemml/mllearn/keras2caffe.py", line 70, in _getInboundLayers
for node in layer.inbound_nodes: # get inbound nodes to current layer
AttributeError: 'Conv2D' object has no attribute 'inbound_nodes'
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/lib/python2.7/SocketServer.py", line 230, in serve_forever
r, w, e = _eintr_retry(select.select, [self], [], [],
AttributeError: 'NoneType' object has no attribute 'select'
Im using Tensorflow-GPU 1.5 , and Keras 2.1.3 .
Thanks for trying out Keras2DML. The issue arises because the newer Keras versions renamed the attribute inbound_nodes to _inbound_nodes. This issue was fixed in yesterday's commit: https://github.com/apache/systemml/commit/9c3057a34c84d5bf1c698ad0a5c3c34d90412dbb.
Since you are using TensorFlow-GPU, you may want to check if TF grabs onto most of GPU memory when Keras model is compiled using nvidia-smi. If yes, here are two easy workarounds:
a. Hide GPUs from TF:
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import tensorflow as tf
b. Or minimize the overhead due to TensorFlow:
from keras.backend.tensorflow_backend import set_session
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
set_session(tf.Session(config=tf_config))

pysparkDistributedKmodes lib error

I'm trying to run pyspark-distributed-kmodes example:
import numpy as np
data = np.random.choice(["a", "b", "c"], (50000, 10))
data2 = np.random.choice(["e", "f", "g"], (50000, 10))
data = list(data) + list(data2)
from random import shuffle
shuffle(data)
# Create a Spark RDD from our sample data and decrease partitions to max_partions
max_partitions = 32
rdd = sc.parallelize(data)
rdd = rdd.coalesce(max_partitions)
for x in rdd.take(10):
print x
method = EnsembleKModes(n_clusters, max_iter)
model = method.fit(df.rdd)
print(model.clusters)
print(method.mean_cost)
predictions = method.predictions
datapoints = method.indexed_rdd
combined = datapoints.zip(predictions)
print(combined.take(10))
model.predict(rdd).take(5)
I'm using Python 2.7, Apache Zeppelin 0.7.1 and Apache Spark 2.1.0.
This is the output error:
('Iteration ', 0)
Traceback (most recent call last):
File "/tmp/zeppelin_pyspark-1298251609305129154.py", line 349, in <module>
raise Exception(traceback.format_exc())
Exception: Traceback (most recent call last):
File "/tmp/zeppelin_pyspark-1298251609305129154.py", line 337, in <module>
exec(code)
File "<stdin>", line 13, in <module>
File "/usr/local/lib/python2.7/dist-packages/pyspark_kmodes/pyspark_kmodes.py", line 430, in fit
self.n_clusters,self.max_dist_iter)
File "/usr/local/lib/python2.7/dist-packages/pyspark_kmodes/pyspark_kmodes.py", line 271, in k_modes_partitioned
clusters = check_for_empty_cluster(clusters, rdd)
File "/usr/local/lib/python2.7/dist-packages/pyspark_kmodes/pyspark_kmodes.py", line 317, in check_for_empty_cluster
random_element = random.choice(clusters[biggest_cluster].members)
File "/usr/lib/python2.7/random.py", line 275, in choice
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
IndexError: list index out of range
the RDD used to fit the model is not empty, i've checked it. I think that is a versions incompatibility problem between pyspark-distributed-kmodes and spark, but I can't downgrade Spark.
Any idea how to fix it?
What is df? Doesn't look like a spark error. The code from https://github.com/ThinkBigAnalytics/pyspark-distributed-kmodes is working for me under Spark 2.1.0. Even when I changed this line of code from yours it also works:
method = EnsembleKModes(n_clusters, max_iter)
model = method.fit(rdd)

Tensorflow FailedPreconditionError while trying to save frozen model

I'm training a CNN with word embeddings and for some reason I'm getting FailedPreconditionError exception whenever I try to save a frozen version of the model for later use.
This is despite the fact that I call sess.run(tf.global_variables_initializer()) just before training and I have no problem training and checkpointing the model.
The problem occurs when I try to load a model from a checkpoint and save a frozen model. The function I'm using is as follows:
def freeze_model(checkpoint_path, model_save_path, output_node_names):
checkpoint = tf.train.get_checkpoint_state(checkpoint_path)
input_checkpoint = checkpoint.model_checkpoint_path
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True)
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
with tf.Session() as sess:
saver.restore(sess, input_checkpoint)
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names
)
with tf.gfile.GFile(model_save_path, "wb") as f:
f.write(output_graph_def.SerializeToString())
The error I get is:
Traceback (most recent call last):
File "myproject/train.py", line 522, in <module>
tf.app.run()
File "/home/foo/anaconda2/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 44, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "myproject/train.py", line 518, in main
trainer.save_model(preprocessor)
File "myproject/train.py", line 312, in save_model
ut.freeze_model(self.checkpoint_dir, model_save_path, C.OUTPUT_NODE_NAMES)
File "/home/foo/anaconda2/lib/python2.7/site-packages/myproject/utils.py", line 224, in freeze_model
output_node_names
File "/home/foo/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/graph_util_impl.py", line 218, in convert_variables_to_constants
returned_variables = sess.run(variable_names)
File "/home/foo/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 767, in run
run_metadata_ptr)
File "/home/foo/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 965, in _run
feed_dict_string, options, run_metadata)
File "/home/foo/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1015, in _do_run
target_list, options, run_metadata)
File "/home/foo/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1035, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value embeddings/W
[[Node: embeddings/W/_20 = _Send[T=DT_FLOAT, client_terminated=false, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/gpu:0", send_device_incarnation=1, tensor_name="edge_30_embeddings/W", _device="/job:localhost/replica:0/task:0/gpu:0"](embeddings/W)]]
[[Node: conv_maxpool_4/W/_17 = _Recv[_start_time=0, client_terminated=false, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/gpu:0", send_device_incarnation=1, tensor_name="edge_26_conv_maxpool_4/W", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Turns out I was constructing a Saver object before I made a Session so nothing from the session was being saved.