Re-using Queued Data in Python - python-2.7

I have a data stream that adds price information to a queue where it is consumed while processing an order. Could someone advise on how I could save the last price streamed and only send to the queue IF the price were (for instance) higher than the last, or vice versa?
Here is my code, thanks in advance for any help!
def stream_to_queue(self):
response = self.connect_to_stream()
if response.status_code != 200:
return
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
except Exception as e:
print "Caught exception when converting message into json\n" + str(e)
return
if msg.has_key("instrument") or msg.has_key("tick"):
print msg["tick"]
instrument = msg["tick"]["instrument"]
time = msg["tick"]["time"]
bid = msg["tick"]["bid"]
ask = msg["tick"]["ask"]
stop = msg["tick"]["ask"]
tev = TickEvent(instrument, time, bid, ask)
self.events_queue.put(tev)

Related

pcap parsing in python2.7

To carry on from this question.https://stackoverflow.com/questions/9330686/parsing-pcap-in-python-2-6
I'm now trying to perform print summary but still not sure what to include in my final argument before print summary. Please see the the code below:
def run_example():
global total_packet_count, total_bytes_count, average_pkt_size
try:
sys.argv[1]
dmp_file = sys.argv[1]
fp_dmp_file = open(dmp_file)
except Exception as e:
print 'Error: please supply pcap filename!\n'
return
f = open('test1.pcap')
try:
sys.argv[1]
dmp_file = sys.argv[1]
file = open(dmp_file)
except Exception as e:
print 'Error: please supply pcap filename!\n'
return
pcap = dpkt.pcap.Reader(file)
for ts, buf in pcap:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
tcp = ip.data
src_ip = socket.inet_ntoa(ip.src)
src_port = str(ip.data.sport)
dst_ip = socket.inet_ntoa(ip.dst)
dst_port = str(ip.data.dport)
if type(ip.data) == dpkt.tcp.TCP:
protocol = 'tcp'
elif type(ip.data) == dpkt.udp.UDP:
protocol = 'udp'
print_packet_info (ts, src_ip, src_port, dst_ip, dst_port, protocol, ip.len, ip.ttl)
print_summary(len (total_packet_count), len (total_bytes_count), len (average_pkt_size))
##fp_dmp_file.close()
if name == 'main':
run_example()
I managed to print packet data but still unable to print summary. I guess I need to do count values from global to be able to print summary.
Any help is much appreciated
So firstly, we need to identify global variables again on top of our file coming after added libraries in order to have it called outside "def run_example()".
Then, after "dst_port" we can call our summary variables with the fist one will increment packets in file. The second one will check the length of packets size in this case (bytes) This can be found in dkpt manual. Lastly, "print summary" variables I did wasn't wright. Instead we call our defined variables as for the average we will divide "total bytes"/"total packets" witch will give us the average size of packets.

Error while closing the python serial port

" i am trying to read data from the serial connection and doing some stuff if it matches my string but its giving me errors when i close the serial connection port"
" for some reason i do not see this error if i use the serial.readline() method "
import time
import serial
from Queue import Queue
from threading import Thread
class NonBlocking:
def __init__(self, serial_connection, radio_serial_connection):
self._s = serial_connection
self._q = Queue()
self.buf = bytearray()
def _populateQueue(serial_connection, queue):
if type(serial_connection) == str:
return
self.s = serial_connection
while True:
i = self.buf.find(b"\n")
if i >= 0:
r = self.buf[:i + 1]
self.buf = self.buf[i + 1:]
queue.put(r)
while True:
i = max(1, min(2048, self.s.in_waiting))
data = self.s.read(i)
i = data.find(b"\n")
if i >= 0:
r = self.buf + data[:i + 1]
self.buf[0:] = data[i + 1:]
a = r.split('\r\n')
for item in a:
if item:
queue.put(item)
else:
self.buf.extend(data)
self._t = Thread(target=_populateQueue, args=(self._s, self._q))
self._t.daemon = True
self._t.start()
def read_all(self, timeout=None):
data = list()
if self._q.empty():
pass
while not self._q.empty():
data.append(self._q.get(block=timeout is not None, timeout=timeout))
return data
class SerialCommands:
def __init__(self, port, baudrate):
self.serial_connection = serial.Serial(port, baudrate)
self.queue_data = NonBlocking(self.serial_connection, '')
def read_data(self):
returned_info = self.queue_data.read_all()
return returned_info
def close_q(self):
self.serial_connection.close()
class qLibrary:
def __init__(self):
self.q = None
self.port = None
def close_q_connection(self):
self.q.close_q()
def establish_connection_to_q(self, port, baudrate=115200, delay=2):
self.delay = int(delay)
self.port = port
try:
if not self.q:
self.q = SerialCommands(self.port, int(baudrate))
except IOError:
raise AssertionError('Unable to open {0}'.format(port))
def verify_event(self, data, timeout=5):
timeout = int(timeout)
data = str(data)
# print data
while timeout:
try:
to_analyze = self.q.read_data()
for item in to_analyze:
print "item: ", item
if str(item).find(str(data)) > -1:
print "Found data: '{0}' in string: '{1}'".format(data, item)
except:
pass
time.sleep(1)
timeout -= 1
if __name__ == '__main__':
q1 = qLibrary()
q1.establish_connection_to_q('COM5')
q1.verify_event("ATE")
q1.close_q_connection()
" i expect the code to close the serial connection without any exceptions or errors "
the output is
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python27\Lib\threading.py", line 801, in __bootstrap_inner
self.run()
File "C:\Python27\Lib\threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "C:/Program Files (x86)/serialtest1.py", >line 27, in _populateQueue
data = self.s.read(i)
File "C:\Program Files (x86)\venv\lib\site->packages\serial\serialwin32.py", line 283, in read
ctypes.byref(self._overlapped_read))
TypeError: byref() argument must be a ctypes instance, not 'NoneType'
If you define your serial port with no timeout it will get the default setting timeout=None which means when you call serial.read(x) the code will block until you read x bytes.
If you never get those x bytes your code will get stuck in there waiting forever, or at least until you receive more data on the buffer to get the total number of bytes received equal to x.
If you mix that up with threading, I'm afraid you are quite likely closing the port while you are trying to read.
You can probably fix this issue just defining a sensible read timeout on your port or changing the way you read. The general advice is to set a timeout that works for your application and read at least the maximum number of bytes you expect. Reading your code, that seems to be what you wanted to do. If so, you forgot to set the timeout.
If you have a reason not to set a timeout or you want to keep your reading routine as it is, you can make your code work if you cancel reading before closing. You can do that with serial.cancel_read()

Can`t receive GRPC stream

I am trying to receive stream from some GRPC server, code of which is not accessible to me.
Here is the code of my GRPC client:
class Client():
def __init__ (self, grpcIP, grpcPort, params):
self.channel = grpc.insecure_channel('%s:%d' % (grpcIP, grpcPort))
grpc.channel_ready_future(self.channel).result()
self.stub = pb2_grpc.DataCloneStub(self.channel)
self.params = params
self.host = grpcIP
def StartSender(self):
params = pb2.StartParameters(**self.params)
try:
res = self.stub.Start(params)
print(type(res))
for pr in res:
print(pr.current_progress)
except grpc.RpcError as e:
print(e)
Here are snippets from proto file that is used.
Method:
rpc Start (StartParameters) returns (stream Progress) {}
Message in stream:
message Progress {
double current_progress = 1;
uint64 total_sent = 2;
uint64 total_size = 3;
string file_name = 4;
Error error = 5;
}
As I understand self.stub.Start(params) should return iterator with objects of type Progress. The problem is that it returns something with type grpc._channel._Rendezvous. I can`t iterate through the response. It doesn`t catch any exceptions either.
Did someone experience such behavior? Is it possible that issue comes not from client side?
Class grpc._channel._Rendezvous is also an iterator, you can check the streaming RPC example here.
The code you posted is correct implementation. If the program blocked at the for pr in res line, it is possible that the server is not sending any response (or taking a long time), hence the client side is blocked.

Python Twisted sending large a file across network

I am trying to send a file across the network using Twisted with the LineReceiver protocol. The issue I am seeing is that when I read a binary file and try to send the chunks they simply don't send.
I am reading the file using:
import json
import time
import threading
from twisted.internet import reactor, threads
from twisted.protocols.basic import LineReceiver
from twisted.internet import protocol
MaximumMsgSize = 15500
trySend = True
connectionToServer = None
class ClientInterfaceFactory(protocol.Factory):
def buildProtocol(self, addr):
return WoosterInterfaceProtocol(self._msgProcessor, self._logger)
class ClientInterfaceProtocol(LineReceiver):
def connectionMade(self):
connectionToServer = self
def _DecodeMessage(self, rawMsg):
header, body = json.loads(rawMsg)
return (header, json.loads(body))
def ProcessIncomingMsg(self, rawMsg, connObject):
# Decode raw message.
decodedMsg = self._DecodeMessage(rawMsg)
self.ProccessTransmitJobToNode(decodedMsg, connObject)
def _BuildMessage(self, id, msgBody = {}):
msgs = []
fullMsgBody = json.dumps(msgBody)
msgBodyLength = len(fullMsgBody)
totalParts = 1 if msgBodyLength <= MaximumMsgSize else \
int(math.ceil(msgBodyLength / MaximumMsgSize))
startPoint = 0
msgBodyPos = 0
for partNo in range(totalParts):
msgBodyPos = (partNo + 1) * MaximumMsgSize
header = {'ID' : id, 'MsgParts' : totalParts,
'MsgPart' : partNo }
msg = (header, fullMsgBody[startPoint:msgBodyPos])
jsonMsg = json.dumps(msg)
msgs.append(jsonMsg)
startPoint = msgBodyPos
return (msgs, '')
def ProccessTransmitJobToNode(self, msg, connection):
rootDir = '../documentation/configs/Wooster'
exportedFiles = ['consoleLog.txt', 'blob.dat']
params = {
'Status' : 'buildStatus',
'TaskID' : 'taskID',
'Name' : 'taskName',
'Exports' : len(exportedFiles),
}
msg, statusStr = self._BuildMessage(101, params)
connection.sendLine(msg[0])
for filename in exportedFiles:
with open (filename, "rb") as exportFileHandle:
data = exportFileHandle.read().encode('base64')
params = {
ExportFileToMaster_Tag.TaskID : taskID,
ExportFileToMaster_Tag.FileContents : data,
ExportFileToMaster_Tag.Filename : filename
}
msgs, _ = self._BuildMessage(MsgID.ExportFileToMaster, params)
for m in msgs:
connection.sendLine(m)
def lineReceived(self, data):
threads.deferToThread(self.ProcessIncomingMsg, data, self)
def ConnectFailed(reason):
print 'Connection failed..'
reactor.callLater(20, reactor.callFromThread, ConnectToServer)
def ConnectToServer():
print 'Connecting...'
from twisted.internet.endpoints import TCP4ClientEndpoint
endpoint = TCP4ClientEndpoint(reactor, 'localhost', 8181)
deferItem = endpoint.connect(factory)
deferItem.addErrback(ConnectFailed)
netThread = threading.Thread(target=reactor.run, kwargs={"installSignalHandlers": False})
netThread.start()
reactor.callFromThread(ConnectToServer)
factory = ClientInterfaceFactory()
protocol = ClientInterfaceProtocol()
while 1:
time.sleep(0.01)
if connectionToServer == None: continue
if trySend == True:
protocol.ProccessTransmitJobToNode(None, None)
trySend = False
Is there something I am doing wrong?file is sent, it's when the write is multi part or there are more than one file it struggles.
If a single write occurs then the m
Note: I have updated the question with a crude piece of sample code in the hope it makes sense.
_BuildMessage returns a two-tuple: (msgs, '').
Your network code iterates over this:
msgs = self._BuildMessage(MsgID.ExportFileToMaster, params)
for m in msgs:
So your network code first tries to send a list of json encoded data and then tries to send the empty string. It most likely raises an exception because you cannot send a list of anything using sendLine. If you aren't seeing the exception, you've forgotten to enable logging. You should always enable logging so you can see any exceptions that occur.
Also, you're using time.sleep and you shouldn't do this in a Twisted-based program. If you're doing this to try to avoid overloading the receiver, you should use TCP's native backpressure instead by registering a producer which can receive pause and resume notifications. Regardless, time.sleep (and your loop over all the data) will block the entire reactor thread and prevent any progress from being made. The consequence is that most of the data will be buffered locally before being sent.
Also, your code calls LineReceiver.sendLine from a non-reactor thread. This has undefined results but you can probably count on it to not work.
This loop runs in the main thread:
while 1:
time.sleep(0.01)
if connectionToServer == None: continue
if trySend == True:
protocol.ProccessTransmitJobToNode(None, None)
trySend = False
while the reactor runs in another thread:
netThread = threading.Thread(target=reactor.run, kwargs={"installSignalHandlers": False})
netThread.start()
ProcessTransmitJobToNode simply calls self.sendLine:
def ProccessTransmitJobToNode(self, msg, connection):
rootDir = '../documentation/configs/Wooster'
exportedFiles = ['consoleLog.txt', 'blob.dat']
params = {
'Status' : 'buildStatus',
'TaskID' : 'taskID',
'Name' : 'taskName',
'Exports' : len(exportedFiles),
}
msg, statusStr = self._BuildMessage(101, params)
connection.sendLine(msg[0])
You should probably remove the use of threading entirely from the application. Time-based events are better managed using reactor.callLater (your main-thread loop effectively generates a call to ProcessTransmitJobToNode once hundred times a second (modulo effects of the trySend flag)).
You may also want to take a look at https://github.com/twisted/tubes as a better way to manage large amounts of data with Twisted.

Django Model Objects Bulk Update

I have reset_company_limits module for updating limit of a company, this task is running on daily basis in morning. But somehow some company limit is unable to update. I am not able to find any error in my code
def reset_company_limits(comp_id=None, reset_type=['daily', 'monthly']):
if comp_id:
company_accounts = CompanyAccount.objects.filter(id=comp_id, enabled=1)
else:
company_accounts = CompanyAccount.objects.filter(enabled=1)
reset, rcache, cache_rec = ({} for l in xrange(3))
error = []
for rtype in reset_type:
for account in company_accounts:
try:
counts = account.get_feature_reset_counts(feature_type=reset_type)
limits = account.get_limits()
recruiters = account.recruiter_set.filter(is_active=True).values('pk')
for limit in limits:
key = RECRUITER_LOOKUP[limit.type]
if key in counts[rtype]:
if rtype == 'daily':
limit.daily = counts[rtype][key]
elif rtype == 'monthly':
limit.monthly = counts[rtype][key]
limit.save()
except Exception, e:
error.append(account.pk)
print "Error occurred for company id %s :: %s" % (account.pk, e)
return reset, rcache, cache_rec, error