Django manage.py runserver graceful reloading - django

I am working on a Django project, which integrates a webcam and OpenCV. For the webcam access, I use following code. The webcam can be released if I use Ctrl + C to end a running server, but if the server reloads itself after the code change, the webcam can not be released properly and therefore will be not available. How can I detect hot reloading so I can close the webcam properly?
I am aware of the option of forbidding hot reloading but this is rather uncomfortable. Is there any option I can realize programmatically?
class VideoCamera(object):
def __init__(self):
self.video = None
def __del__(self):
if self.video is not None and self.video.isOpened():
self.video.release()
def get_frame(self):
try:
self.video = cv2.VideoCapture(0)
success, image = self.video.read()
self.video.release()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
except (SystemExit, KeyboardInterrupt, Exception) as e:
self.video.release()
raise e

Few ideas
1) You could connect on the file_change signal that is triggered on file change before reload takes place
https://github.com/django/django/blob/9386586f31b8a0bccf59a1bff647cd829d4e79aa/django/utils/autoreload.py#L24
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug('%s notified as changed. Signal results: %s.', path, results)
if not any(res[1] for res in results):
trigger_reload(path)
2) Simply monkey patch trigger reload function and inject webcam closing code
https://github.com/django/django/blob/9386586f31b8a0bccf59a1bff647cd829d4e79aa/django/utils/autoreload.py#L221

If your class is a singleton, which it appears to be, create an instance of your class in its file and then add Django signal handlers to perform the clean up. As it is generally not recommended to use __del__, I would recommend you add a signal (system signals like SIGINT/ctrl+C) handler which forwards it as a Django signal. This means, listen on file_changed from django.utils.autoreload and SIGINT.
To use the singleton in other files, import it with from my_app.singleton import my_singleton.
# my_app/singleton.py
class MySingleton:
def __init__(self):
"""Init code here"""
def close(self):
"""Shutdown code here"""
my_singleton = MySingleton()
Then, register your signal handlers for your app. This is the standard AFAIK.
# my_app/apps.py
from django.apps import AppConfig
class MyAppConfig(AppConfig):
name = "my_app"
def ready(self):
import my_app.signals
The signal handler file itself, import the singleton and close it on shutdown. This is called on file_changed and SIGINT.
# my_app/signals.py
from django.dispatch import receiver
from django.utils.autoreload import file_changed
from my_app.signal_definitions import system_shutdown_signal
from my_app.singleton import my_singleton
#receiver(file_changed)
#receiver(system_shutdown_signal)
def my_shutdown_handler(sender, **kwargs):
my_singleton.close()
Register a handler to catch the SIGINT signal from the system.
# my_app/__init__.py
import signal
import sys
from iot.signal_definitions import system_shutdown_signal
def _forward_to_django_shutdown_signal(signal, frame):
print(f"Shutting down Django {sys.argv}")
system_shutdown_signal.send("system")
sys.exit(0)
signal.signal(signal.SIGINT, _forward_to_django_shutdown_signal)
The Django signal used to propagate the system SIGINT signal.
# my_app/signal_definitions.py
from django.dispatch import Signal
system_shutdown_signal = Signal()

Related

Tornado on pika consumer can't run

I want to build monitoring system using RabbitMQ and Tornado. I can run the producer and my consumer can consume the data on queue but the data cant be show on website.
This just my experiment before I using the sensor
import pika
import tornado.ioloop
import tornado.web
import tornado.websocket
import logging
from threading import Thread
logging.basicConfig(lvl=logging.INFO)
clients=[]
credentials = pika.credentials.PlainCredentials('ayub','ayub')
connection = pika.BlockingConnection(pika.ConnectionParameters('192.168.43.101',
5672,
'/',
credentials))
channel = connection.channel()
def threaded_rmq():
channel.basic_consume('Queue',
on_message_callback= consumer_callback,
auto_ack=True,
exclusive=False,
consumer_tag=None,
arguments=None)
channel.start_consuming()
def disconect_rmq():
channel.stop_consuming()
Connection.close()
logging.info('Disconnected from broker')
def consumer_callback(ch,method,properties,body):
for itm in clients:
itm.write_message(body)
class SocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
logging.info('websocket open')
clients.remove(self)
def close(self):
logging.info('websocket closed')
clients.remove(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("websocket.html")
application = tornado.web.Application([
(r'/ws',SocketHandler),
(r"/", MainHandler),
])
def startTornado():
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
def stopTornado():
tornado.ioloop.IOLoop.instance().stop()
if __name__ == "__main__":
logging.info('starting thread RMQ')
threadRMQ = Thread(target=threaded_rmq)
threadRMQ.start()
logging.info('starting thread tornado')
threadTornado = Thread(target=startTornado)
threadTornado.start()
try:
raw_input("server ready")
except SyntaxError:
pass
try:
logging.info('disconnected')
disconnect_rmq()
except Exception, e:
pass
stopTornado()
but I got this error
WARNING:tornado.access:404 GET /favicon.ico (192.168.43.10) 0.98ms
please help me
In your SocketHandler.open function you need to add the client not remove it.
Also consider using a set for clients instead of a list because the remove operation will be faster:
clients = set()
...
class SocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
logging.info('websocket open')
clients.add(self)
def close(self):
logging.info('websocket closed')
clients.remove(self)
The message you get regarding favicon.ico is actually a warning and it's harmless (the browser is requesting an icon to show for web application but won't complain if none is available).
You might also run into threading issues because Tornado and Pika are running in different threads so you will have to synchronize them; you can use Tornado's IOLoop.add_callback method for that.

Rabbitmq listener using pika in django

I have a django application and I want to consume messages from a rabbit mq. I want the listener to start consuming when I start the django server.I am using pika library to connect to rabbitmq.Proving some code example will really help.
First you need to somehow run your application at the start of the django project
https://docs.djangoproject.com/en/2.0/ref/applications/#django.apps.AppConfig.ready
def ready(self):
if not settings.IS_ACCEPTANCE_TESTING and not settings.IS_UNITTESTING:
consumer = AMQPConsuming()
consumer.daemon = True
consumer.start()
Further in any convenient place
import threading
import pika
from django.conf import settings
class AMQPConsuming(threading.Thread):
def callback(self, ch, method, properties, body):
# do something
pass
#staticmethod
def _get_connection():
parameters = pika.URLParameters(settings.RABBIT_URL)
return pika.BlockingConnection(parameters)
def run(self):
connection = self._get_connection()
channel = connection.channel()
channel.queue_declare(queue='task_queue6')
print('Hello world! :)')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(self.callback, queue='queue')
channel.start_consuming()
This will help
http://www.rabbitmq.com/tutorials/tutorial-six-python.html

PyQt :Parent Window not waiting until child window closes.

PyQt :Parent Window not waiting until child window closes. with reference to code shared below ,My welcome class object should wait till first_time class object completely finishes executing , but instead goes ahead and closes it self before first_time object finishes executing .
code :
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PySide.QtCore import QSettings
import sys
from PyQt4 import uic
#importing first configuration class
import configure as config_first
#loading initial settings
settings=QSettings('settings.ini',QSettings.IniFormat)
#loading the ui screens
form_class=uic.loadUiType("screens/firstscreen.ui")[0]
class welcome(QDialog,form_class):
#this signal is emitted when first configuration is done and ready to go
done_and_go_to_use = pyqtSignal()
def __init__(self):
super(welcome, self).__init__()
self.setupUi(self)
self.done_and_go_to_use.connect(self.close)
self.ready_btn.clicked.connect(self.ready)
def ready(self):
if_configured = settings.value('isConfigured', False)
if not if_configured :
first_time=config_first.configureFirst(self)
first_time.show()
self.close()
app = QApplication(sys.argv)
p = welcome()
p.show()
app.exec_()
below is the code for configure.py
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import sqlite3
import csv
from PySide.QtCore import QSettings
from PyQt4 import uic
#loading initial settings
settings=QSettings('settings.ini',QSettings.IniFormat)
#loading ui screens
form_class=uic.loadUiType("screens/config_first.ui")[0]
#database connecting
con = sqlite3.connect("local.db")
cur = con.cursor()
cur.execute("CREATE TABLE if not exists marks (student_id int,student_name varchar(200));")
class configureFirst(QDialog,form_class):
done_configuring=pyqtSignal()
try_again=pyqtSignal()
def __init__(self,parent=None):
super(configureFirst, self).__init__(parent)
self.setupUi(self)
self.ok_btn.clicked.connect(self.ok_clicked)
self.cancel_btn.clicked.connect(self.cancel_clicked)
self.try_again.connect(self.ok_clicked)
self.done_configuring.connect(self.cancel_clicked)
self.show()
def ok_clicked(self):
file_select=QFileDialog.getOpenFileName(self,"open file","/")
if file_select:
with open(file_select, 'rb') as f:
reader = csv.reader(f)
ed = list(reader)
for row in ed:
if "name" not in row or "id" not in row:
cur.execute("Insert into marks Values (?,?);",(row[0],row[1]))
con.commit()
settings.setValue("isConfigured",True)
self.done_configuring.emit()
else:
#if recurssion is used the no of time it has to close increases and leads to integration problems
self.try_again.emit()
def cancel_clicked(self):
if_configured=settings.value("isConfigured")
if if_configured:
self.close()
else:
QMessageBox.critical(self,"PerfAnalyser","You Need to Configure For PerfAnalyser To Work")
def closeEvent(self,event):
#this method is triggered when 'X' is clicked i.e close button is clicked at the upper right corner
if_configured = settings.value("isConfigured")
if if_configured:
event.accept()
else:
QMessageBox.critical(self, "PerfAnalyser", "You Need to Configure For PerfAnalyser To Work")
event.ignore()
Thanks for the help in advance ...
I will try to help out since I notice few people have seen your post. I have had this happen a long time ago so I need a reminder, but I was unable to get your code running, I also tried to recreate your ui files and the screens directory but I was not successful. However, maybe the following is still useful.
In my working code, any time I needed to create a subwindow, I executed subwindows as follows from the main window's module:
dlg = SubWindowModuleName.StartSub()
dlg.exec_()
This will execute the subwindow and waits for it to close. Then, on the subwindow module (SubWindowModuleName in the above code, "configure" for you), I did this:
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(982, 521)
... # here I build the window (I noticed that you import UI files which is a much better way of doing this)
...
class StartSub(QtGui.QDialog, Ui_Dialog):
def __init__(self,parent=None):
QtGui.QDialog.__init__(self,parent)
self.setupUi(self)

Terminate All QThreads on GUI Close

I have a PyQT gui that has one gui thread, and 24 "tester threads." They work fine, but still seem to stay up when I close the gui. How can I gracefully close the threads to avoid python crashing?
#!/usr/bin/python
# Standard Lib
from datetime import datetime
import logging
import os
import random
import sys
import time
# Third Party
from PyQt4 import QtGui, QtCore
# Local Kung Fu
stuff
class SSITesterThread(QtCore.QThread):
# vars for updating gui using signals
updateText = QtCore.pyqtSignal(str)
updateColor = QtCore.pyqtSignal(str)
updateSN = QtCore.pyqtSignal(str)
def __init__(self, thread_number, port, path, parent=None):
super(SSITesterThread, self).__init__(parent)
self.delay = random.random()
def run(self):
self.ssitester()
def ssitester(self):
# stuff
class SSITestSuiteGUI(QtGui.QMainWindow):
def __init__(self, parent=None):
self._threads = []
QtGui.QWidget.__init__(self, parent)
# Init class from template and paths
self.launch_tester_threads()
def init_gui_nodes(self, com_ports_list):
for num, port, in zip(range(1, 25), range(0, 24)):
label = getattr(self.ui, 'com_{}'.format(num))
label.setText("COM Port: {}".format(com_ports_list[port]["COM"]))
def launch_tester_threads(self):
logging.info("Spinning up threads...")
for num, com_port_chunk in zip(range(1, 25), self.com_ports_list):
tester_thread = SSITesterThread(thread_number=num, port=com_port_chunk["COM"], path=self.vc_test_path)
# get a reference to the associated textbox somehow...
status_box = getattr(self.ui, 'status_{}'.format(num))
tester_thread.updateText.connect(status_box.setText)
status_box = getattr(self.ui, 'status_{}'.format(num))
tester_thread.updateColor.connect(status_box.setStyleSheet)
sn_label = getattr(self.ui, 'sn_{}'.format(num))
tester_thread.updateSN.connect(sn_label.setText)
sn_label.setText("S/N: None")
tester_thread.start()
self._threads.append(tester_thread)
logging.info("Ready for tests.")
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
test_suite = SSITestSuiteGUI()
test_suite.show()
# Close app only when window is closed.
sys.exit(app.exec_())
I tried implementing this solution: https://gist.githubusercontent.com/metalman/10721983/raw/15c6f115f9918fee7c1b88d0a549d4cc59a5b346/qapplication_about_to_quit_signal.py
But got an error:
attributeerror: 'function' object has no attribute '__pyqtSignature__'
Thanks for your time.
UPDATE:
Added the suggestion below as:
#QtCore.pyqtSlot()
def stop(self):
return
in my SSITesterThread Class, and it errors out when various "emits" I've used as singals in the thread sudden try to access NoneType objects:
File in gui.py, line 75 in tester,
self.updateColor.emit("{}".format(thread_colors.green_alert)
AttributeError: "NoneType" object has no attribute 'green alert'
Did the fix work, and this is a new problem? Because it seems like things still aren't shutting down gracefully.
It looks like you're importing your GUI from a form generated by QTDesigner. Try this:
self.ui.closeEvent = self.closeEvent
I believe your problem is that you're editing the wrong QMainWindow instance in the wrong place.
You probably just need to decorate the stop method as a slot.
#QtCore.pyqtSlot()
def stop(self):
# do thread cleanup, stop thread

Django celery task keep global state

I am currently developing a Django application based on django-tenants-schema. You don't need to look into the actual code of the module, but the idea is that it has a global setting for the current database connection defining which schema to use for the application tenant, e.g.
tenant = tenants_schema.get_tenant()
And for setting
tenants_schema.set_tenant(xxx)
For some of the tasks I would like them to remember the current global tenant selected during the instantiation, e.g. in theory:
class AbstractTask(Task):
'''
Run this method before returning the task future
'''
def before_submit(self):
self.run_args['tenant'] = tenants_schema.get_tenant()
'''
This method is run before related .run() task method
'''
def before_run(self):
tenants_schema.set_tenant(self.run_args['tenant'])
Is there an elegant way of doing it in celery?
Celery (as of 3.1) has signals you can hook into to do this. You can alter the kwargs that were passed in, and on the other side, undo your alterations before they're given to the actual task:
from celery import shared_task
from celery.signals import before_task_publish, task_prerun, task_postrun
from threading import local
current_tenant = local()
#before_task_publish.connect
def add_tenant_to_task(body=None, **unused):
body['kwargs']['tenant_middleware.tenant'] = getattr(current_tenant, 'id', None)
print 'sending tenant: {t}'.format(t=current_tenant.id)
#task_prerun.connect
def extract_tenant_from_task(kwargs=None, **unused):
tenant_id = kwargs.pop('tenant_middleware.tenant', None)
current_tenant.id = tenant_id
print 'current_tenant.id set to {t}'.format(t=tenant_id)
#task_postrun.connect
def cleanup_tenant(**kwargs):
current_tenant.id = None
print 'cleaned current_tenant.id'
#shared_task
def get_current_tenant():
# Here is where you would do work that relied on current_tenant.id being set.
import time
time.sleep(1)
return current_tenant.id
And if you run the task (not showing logging from the worker):
In [1]: current_tenant.id = 1234; ct = get_current_tenant.delay(); current_tenant.id = 5678; ct.get()
sending tenant: 1234
Out[1]: 1234
In [2]: current_tenant.id
Out[2]: 5678
The signals are not called if no message is sent (when you call the task function directly, without delay() or apply_async()). If you want to filter on the task name, it is available as body['task'] in the before_task_publish signal handler, and the task object itself is available in the task_prerun and task_postrun handlers.
I am a Celery newbie, so I can't really tell if this is the "blessed" way of doing "middleware"-type stuff in Celery, but I think it will work for me.
I'm not sure what you mean here, is before_submit executed before the task is called by a client?
In that case I would rather use a with statement here:
from contextlib import contextmanager
#contextmanager
def set_tenant_db(tenant):
prev_tenant = tenants_schema.get_tenant()
try:
tenants_scheme.set_tenant(tenant)
yield
finally:
tenants_schema.set_tenant(prev_tenant)
#app.task
def tenant_task(tenant=None):
with set_tenant_db(tenant):
do_actions_here()
tenant_task.delay(tenant=tenants_scheme.get_tenant())
You can of course create a base task that does this automatically,
you can apply the context in Task.__call__ for example, but I'm not sure
if that saves you much if you can just use the with statement explicitly.