I am trying to build a basic messaging system, but I have hit a major roadblock in the process. I can't get the window to close without it not responding and me having to close it in the Task Manager. From what I've read online, it sounds like I need to close when a sys.exit(0) to exit all the threads and connections. I have been stuck on this problem for days so I would really appreciate an answer and explanation to why it doesn't work! The meat of the problem is in the close_window() function. It is run able provided you have a basic server that accepts a connection. Thanks!
import wx
import socket
import threading
import sys
class oranges(wx.Frame):
def __init__(self,parent,id):
##Unimportant stuff
wx.Frame.__init__(self,parent,id," Retro Message",size=(500,500))
self.frame=wx.Panel(self)
self.input_box=wx.TextCtrl(self.frame, -1,pos=(15,350),size=(455,120),style=wx.NO_BORDER| wx.TE_MULTILINE)
self.messaging_box=wx.TextCtrl(self.frame, -1,pos=(15,15),size=(455,285),style=wx.NO_BORDER | wx.TE_MULTILINE|wx.TE_READONLY)
send_button=wx.Button(self.frame,label="Send",pos=(350,315),size=(75,40))
self.Bind(wx.EVT_BUTTON, self.sender,send_button)
self.Bind(wx.EVT_CLOSE, self.close_window)
self.counter = 1
self.socket_connect = socket.socket()
self.setup()
def sender(self,event):
self.socket_connect.send(self.input_box.GetValue())
self.input_box.Clear()
self.Refresh()
##Important stuff
def close_window(self,event): #This is the function in question#
self.counter = 0
self.socket_connect.shutdown(socket.SHUT_RDWR)
sys.exit(0)
def setup(self):
self.ip_enter = wx.TextEntryDialog(None, "Enter in the IP:", "Setup", "192.168.1.1")
if self.ip_enter.ShowModal() ==wx.ID_OK:
self.offical_ip = self.ip_enter.GetValue()
try:
self.socket_connect.connect((self.offical_ip,5003))
self.username = "false" #Tells the server just to give the user a IP username
self.Thread1 = threading.Thread(target = self.listening_connect)
self.Thread1.start()
except socket.error:
self.error_connect()
else:
sys.exit(0)
def listening_connect(self):
self.socket_connect.send(self.username)
while self.counter != 0:
data = self.socket_connect.recv(1024)
self.messaging_box.AppendText(data)
self.Refresh()
if not data:
break
self.socket_connect.close()
def error_connect(self):
pop_ups = wx.MessageDialog(None, "Failed to Connect to Server!", 'Error', wx.OK)
pop_ups.ShowModal()
self.setup()
if __name__=="__main__":
app=wx.App(False)
window=oranges(parent=None,id=-1)
window.Show()
app.MainLoop()
Here is a basic server program that should work with it(I am unable to test it but it is very similar to the real one)
import socket
HOST = '192.168.1.1'
PORT=5003
s = socket.socket()
s.bind((HOST, PORT))
s.listen(1)
c,addr = s.accept()
while True:
data = c.recv(1024)
if not data:
break
c.close()
You need to wait for the thread to end. Otherwise it's probably going to make the script hang. Why? Well the thread is separate from the GUI thread, so it doesn't get killed just because you closed down your wxPython application. Here is what I would recommend:
def close_window(self, event):
self.Thread1.join()
self.Destroy()
This makes the script wait for the thread to finish before closing the application. If you want the frame to disappear, then you should call self.Hide() before the join. Another method would be to put some logic in your thread where you can send it a message that tells it the application is shutting down, so the thread needs to abort.
You should probably check out the following Stack answer:
Is there any way to kill a Thread in Python?
Related
In order to push real time database info to client, I use flask-socketio in server side by using websocket to push all real-time database info to client.
There is a snippet of my view file:
from ..models import Host
from flask_socketio import emit, disconnect
from threading import Thread
thread = None
def background_thread(app=None):
"""
send host status information to client
"""
with app.app_context():
while True:
# update host info interval
socketio.sleep(app.config['HOST_UPDATE_INTERVAL'])
# socketio.sleep(5)
all_hosts = dict([(host.id, host.status) for host in Host.query.all()])
socketio.emit('update_host', all_hosts, namespace='/hostinfo')
#main.route('/', methods=['GET', 'POST'])
def index():
all_hosts = Host.query.all()
return render_template('dashboard.html', hosts=all_hosts, async_mode=socketio.async_mode)
#socketio.on('connect', namespace='/hostinfo')
def on_connect():
global thread
if thread is None:
app = current_app._get_current_object()
thread = socketio.start_background_task(target=background_thread, app=app)
emit('my_response', {'data': 'conncted'})
#socketio.on('disconnect', namespace='/hostinfo')
def on_disconnect():
print 'Client disconnected...', request.sid
#socketio.on('my_ping', namespace="/hostinfo")
def ping_pong():
emit('my_pong')
However, when I update my database Host table, Host.query.all() still get old information. I don't know why?
Thanks a lot to #miguelgrinberg. Because background thread just use an old session, so each iteration, the thread just get the cached session. So add db.session.remove() at the end of while True loop, each iteration will start a clean session.
I am pretty new to Twisted, so I am sure this is a rookie mistake. I have built a simple server which receives a message from the client and upon receipt of message the server fires a callback which prints the message to the console.
At first instance, the server works as expected. Unfortunately, when I start up a second client I get the follow error "twisted.internet.defer.AlreadyCalledError." It was my understanding that the factory would make a new instance of the deferred i.e. the new deferred wouldn't have been called before?
Please see the code below. Any help would be very appreciated.
import sys
from twisted.internet.protocol import ServerFactory, Protocol
from twisted.internet import defer
class LockProtocol(Protocol):
lockData = ''
def dataReceived(self, data):
self.lockData += data
if self.lockData.endswith('??'):
self.lockDataReceived(self.lockData)
def lockDataReceived(self, lockData):
self.factory.lockDataFinished(lockData)
class LockServerFactory(ServerFactory):
protocol = LockProtocol
def __init__(self):
self.deferred = defer.Deferred() # Initialise deferred
def lockDataFinished(self, lockData):
self.deferred.callback(lockData)
def clientConnectionFailed(self, connector, reason):
self.deferred.errback(reason)
def main():
HOST = '127.0.0.1' # localhost
PORT = 10001
def got_lockData(lockData):
print "We have received lockData. It is as follows:", lockData
def lockData_failed(err):
print >> sys.stderr, 'The lockData download failed.'
errors.append(err)
factory = LockServerFactory()
from twisted.internet import reactor
# Listen for TCP connections on a port, and use our factory to make a protocol instance for each new connection
port = reactor.listenTCP(PORT,factory)
print 'Serving on %s' % port.getHost()
# Set up callbacks
factory.deferred.addCallbacks(got_lockData,lockData_failed)
reactor.run() # Start the reactor
if __name__ == '__main__':
main()
Notice that there is only one LockServerFactory ever created in your program:
factory = LockServerFactory()
However, as many LockProtocol instances are created as connections are accepted. If you have per-connection state, the place to put it is on LockProtocol.
It looks like your "lock data completed" event is not a one-off so a Deferred is probably not the right abstraction for this job.
Instead of a LockServerFactory with a Deferred that fires when that event happens, perhaps you want a multi-use event handler, perhaps custom built:
class LockServerFactory(ServerFactory):
protocol = LockProtocol
def __init__(self, lockDataFinished):
self.lockDataFinished = lockDataFinished
factory = LockServerFactory(got_lockData)
(Incidentally, notice that I've dropped clientConnectionFailed from this implementation: that's a method of ClientFactory. It will never be called on a server factory.)
My python project has multiple threads running. I want to add a gauge widget from the wxpython library to show the progress. I want the gauge to fill until my first thread completes. How do I achieve this? I am using Python 2.7 on Windows.
Use wx.CallAfter
def add_to_the_gauge(value):
your_gauge.Value += value
...
#in some thread
def some_thread_running():
wx.CallAfter(add_to_the_gauge, 2)
You need to post events from your worker thread to the main thread asking it to update the gauge, see this overview.
You can use some simple things
remember to import the module:
import os
and then put this on the frame
def __init__(self, *a, **k):
filename = 'game.exe'
self.timer = wx.Timer()
self.Bind(wx.EVT_TIMER, self.OnUpdateGauge, self.timer)
self.timer.Start()
self.proc = os.popen(filename) # start the process
def OnUpdateGauge(self, event):
if self.proc.poll() == None: # return None if still running and 0 if stopped
self.timer.Stop()
return
else:
'''do something with the gauge'''
hope those help
Scrapy application, but the question is really about the Python language - experts can probably answer this immediately without knowing the framework at all.
I've got a class called CrawlWorker that knows how to talk to so-called "spiders" - schedule their crawls, and manage their lifecycle.
There's a TwistedRabbitClient that has-one CrawlWorker. The client only knows how to talk to the queue and hand off messages to the worker - it gets completed work back from the worker asynchronously by using the worker method connect_to_scrape below to connect to a signal emitted by a running spider:
def connect_to_scrape(self, callback):
self._connect_to_signal(callback, signals.item_scraped)
def _connect_to_signal(self, callback, signal):
if signal is signals.item_scraped:
def _callback(item, response, sender, signal, spider):
scrape_config = response.meta['scrape_config']
delivery_tag = scrape_config.delivery_tag
callback(item.to_dict(), delivery_tag)
else:
_callback = callback
dispatcher.connect(_callback, signal=signal)
So the worker provides a layer of "work deserialization" for the Rabbit client, who doesn't know about spiders, responses, senders, signals, items (anything about the nature of the work itself) - only dicts that'll be published as JSON with their delivery tags.
So the callback below isn't registering properly (no errors either):
def publish(self, item, delivery_tag):
self.log('item_scraped={0} {1}'.format(item, delivery_tag))
publish_message = json.dumps(item)
self._channel.basic_publish(exchange=self.publish_exchange,
routing_key=self.publish_key,
body=publish_message)
self._channel.basic_ack(delivery_tag=delivery_tag)
But if I remove the if branch in _connect_to_signal and connect the callback directly (and modify publish to soak up all the unnecessary arguments), it works.
Anyone have any ideas why?
So, I figured out why this wasn't working, by re-stating it in a more general context:
import functools
from scrapy.signalmanager import SignalManager
SIGNAL = object()
class Sender(object):
def __init__(self):
self.signals = SignalManager(self)
def wrap_receive(self, receive):
#functools.wraps(receive)
def wrapped_receive(message, data):
message = message.replace('World', 'Victor')
value = data['key']
receive(message, value)
return wrapped_receive
def bind(self, receive):
_receive = self.wrap_receive(receive)
self.signals.connect(_receive, signal=SIGNAL,
sender=self, weak=False)
def send(self):
message = 'Hello, World!'
data = {'key': 'value'}
self.signals.send_catch_log(SIGNAL, message=message, data=data)
class Receiver(object):
def __init__(self, sender):
self.sender = sender
self.sender.bind(self.receive)
def receive(self, message, value):
"""Receive data from a Sender."""
print 'Receiver received: {0} {1}.'.format(message, value)
if __name__ == '__main__':
sender = Sender()
receiver = Receiver(sender)
sender.send()
This works if and only if weak=False.
The basic problem is that when connecting to the signal, weak=False needs to be specified. Hopefully someone smarter than me can expound on why that's needed.
We have a Django app running Gunicorn with sync workers that's deployed on Heroku. Our request response time shows several requests that hit 30s (and die), which is the default Gunicorn timeout.
What is the best way to log these requests and analyze the timeout? Gunicorn doesn't seem to provide a hook for catching these timeouts, at least not something that's obvious.
One rather rough way to do it is have a "watchdog" timer that interrupts the process after, say, 25 seconds. Once you have an idea of which procs are slow, you can refine the data to figure out what's going on.
Example:
import signal
def timeout(_signum, _frame):
print 'TIMEOUT'
signal.signal(signal.SIGALRM, timeout)
signal.alarm(1) # send SIGALRM in 1 second
print 'waiting'
signal.pause()
print 'done'
Another approach is to fire off a Thread which pokes the main code after a certain amount of elapsed time. It has several caveats -- be sure to read the ActiveState link.
Here's one implementation by Aaron Swartz from ActiveState.com
import threading
class TimeoutError(Exception): pass
def timelimit(timeout):
def internal(function):
def internal2(*args, **kw):
class Calculator(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()[0]
c = Calculator()
c.start()
c.join(timeout)
if c.isAlive():
raise TimeoutError
if c.error:
raise c.error
return c.result
return internal2
return internal
https://github.com/benoitc/gunicorn/pull/768/files added a worker_abort signal which is what I'm using in this case.