Tornado on pika consumer can't run - python-2.7

I want to build monitoring system using RabbitMQ and Tornado. I can run the producer and my consumer can consume the data on queue but the data cant be show on website.
This just my experiment before I using the sensor
import pika
import tornado.ioloop
import tornado.web
import tornado.websocket
import logging
from threading import Thread
logging.basicConfig(lvl=logging.INFO)
clients=[]
credentials = pika.credentials.PlainCredentials('ayub','ayub')
connection = pika.BlockingConnection(pika.ConnectionParameters('192.168.43.101',
5672,
'/',
credentials))
channel = connection.channel()
def threaded_rmq():
channel.basic_consume('Queue',
on_message_callback= consumer_callback,
auto_ack=True,
exclusive=False,
consumer_tag=None,
arguments=None)
channel.start_consuming()
def disconect_rmq():
channel.stop_consuming()
Connection.close()
logging.info('Disconnected from broker')
def consumer_callback(ch,method,properties,body):
for itm in clients:
itm.write_message(body)
class SocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
logging.info('websocket open')
clients.remove(self)
def close(self):
logging.info('websocket closed')
clients.remove(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("websocket.html")
application = tornado.web.Application([
(r'/ws',SocketHandler),
(r"/", MainHandler),
])
def startTornado():
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
def stopTornado():
tornado.ioloop.IOLoop.instance().stop()
if __name__ == "__main__":
logging.info('starting thread RMQ')
threadRMQ = Thread(target=threaded_rmq)
threadRMQ.start()
logging.info('starting thread tornado')
threadTornado = Thread(target=startTornado)
threadTornado.start()
try:
raw_input("server ready")
except SyntaxError:
pass
try:
logging.info('disconnected')
disconnect_rmq()
except Exception, e:
pass
stopTornado()
but I got this error
WARNING:tornado.access:404 GET /favicon.ico (192.168.43.10) 0.98ms
please help me

In your SocketHandler.open function you need to add the client not remove it.
Also consider using a set for clients instead of a list because the remove operation will be faster:
clients = set()
...
class SocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
logging.info('websocket open')
clients.add(self)
def close(self):
logging.info('websocket closed')
clients.remove(self)
The message you get regarding favicon.ico is actually a warning and it's harmless (the browser is requesting an icon to show for web application but won't complain if none is available).
You might also run into threading issues because Tornado and Pika are running in different threads so you will have to synchronize them; you can use Tornado's IOLoop.add_callback method for that.

Related

How to get the most up-to-date database in app_context thread? [duplicate]

In order to push real time database info to client, I use flask-socketio in server side by using websocket to push all real-time database info to client.
There is a snippet of my view file:
from ..models import Host
from flask_socketio import emit, disconnect
from threading import Thread
thread = None
def background_thread(app=None):
"""
send host status information to client
"""
with app.app_context():
while True:
# update host info interval
socketio.sleep(app.config['HOST_UPDATE_INTERVAL'])
# socketio.sleep(5)
all_hosts = dict([(host.id, host.status) for host in Host.query.all()])
socketio.emit('update_host', all_hosts, namespace='/hostinfo')
#main.route('/', methods=['GET', 'POST'])
def index():
all_hosts = Host.query.all()
return render_template('dashboard.html', hosts=all_hosts, async_mode=socketio.async_mode)
#socketio.on('connect', namespace='/hostinfo')
def on_connect():
global thread
if thread is None:
app = current_app._get_current_object()
thread = socketio.start_background_task(target=background_thread, app=app)
emit('my_response', {'data': 'conncted'})
#socketio.on('disconnect', namespace='/hostinfo')
def on_disconnect():
print 'Client disconnected...', request.sid
#socketio.on('my_ping', namespace="/hostinfo")
def ping_pong():
emit('my_pong')
However, when I update my database Host table, Host.query.all() still get old information. I don't know why?
Thanks a lot to #miguelgrinberg. Because background thread just use an old session, so each iteration, the thread just get the cached session. So add db.session.remove() at the end of while True loop, each iteration will start a clean session.

Django manage.py runserver graceful reloading

I am working on a Django project, which integrates a webcam and OpenCV. For the webcam access, I use following code. The webcam can be released if I use Ctrl + C to end a running server, but if the server reloads itself after the code change, the webcam can not be released properly and therefore will be not available. How can I detect hot reloading so I can close the webcam properly?
I am aware of the option of forbidding hot reloading but this is rather uncomfortable. Is there any option I can realize programmatically?
class VideoCamera(object):
def __init__(self):
self.video = None
def __del__(self):
if self.video is not None and self.video.isOpened():
self.video.release()
def get_frame(self):
try:
self.video = cv2.VideoCapture(0)
success, image = self.video.read()
self.video.release()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
except (SystemExit, KeyboardInterrupt, Exception) as e:
self.video.release()
raise e
Few ideas
1) You could connect on the file_change signal that is triggered on file change before reload takes place
https://github.com/django/django/blob/9386586f31b8a0bccf59a1bff647cd829d4e79aa/django/utils/autoreload.py#L24
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug('%s notified as changed. Signal results: %s.', path, results)
if not any(res[1] for res in results):
trigger_reload(path)
2) Simply monkey patch trigger reload function and inject webcam closing code
https://github.com/django/django/blob/9386586f31b8a0bccf59a1bff647cd829d4e79aa/django/utils/autoreload.py#L221
If your class is a singleton, which it appears to be, create an instance of your class in its file and then add Django signal handlers to perform the clean up. As it is generally not recommended to use __del__, I would recommend you add a signal (system signals like SIGINT/ctrl+C) handler which forwards it as a Django signal. This means, listen on file_changed from django.utils.autoreload and SIGINT.
To use the singleton in other files, import it with from my_app.singleton import my_singleton.
# my_app/singleton.py
class MySingleton:
def __init__(self):
"""Init code here"""
def close(self):
"""Shutdown code here"""
my_singleton = MySingleton()
Then, register your signal handlers for your app. This is the standard AFAIK.
# my_app/apps.py
from django.apps import AppConfig
class MyAppConfig(AppConfig):
name = "my_app"
def ready(self):
import my_app.signals
The signal handler file itself, import the singleton and close it on shutdown. This is called on file_changed and SIGINT.
# my_app/signals.py
from django.dispatch import receiver
from django.utils.autoreload import file_changed
from my_app.signal_definitions import system_shutdown_signal
from my_app.singleton import my_singleton
#receiver(file_changed)
#receiver(system_shutdown_signal)
def my_shutdown_handler(sender, **kwargs):
my_singleton.close()
Register a handler to catch the SIGINT signal from the system.
# my_app/__init__.py
import signal
import sys
from iot.signal_definitions import system_shutdown_signal
def _forward_to_django_shutdown_signal(signal, frame):
print(f"Shutting down Django {sys.argv}")
system_shutdown_signal.send("system")
sys.exit(0)
signal.signal(signal.SIGINT, _forward_to_django_shutdown_signal)
The Django signal used to propagate the system SIGINT signal.
# my_app/signal_definitions.py
from django.dispatch import Signal
system_shutdown_signal = Signal()

Rabbitmq listener using pika in django

I have a django application and I want to consume messages from a rabbit mq. I want the listener to start consuming when I start the django server.I am using pika library to connect to rabbitmq.Proving some code example will really help.
First you need to somehow run your application at the start of the django project
https://docs.djangoproject.com/en/2.0/ref/applications/#django.apps.AppConfig.ready
def ready(self):
if not settings.IS_ACCEPTANCE_TESTING and not settings.IS_UNITTESTING:
consumer = AMQPConsuming()
consumer.daemon = True
consumer.start()
Further in any convenient place
import threading
import pika
from django.conf import settings
class AMQPConsuming(threading.Thread):
def callback(self, ch, method, properties, body):
# do something
pass
#staticmethod
def _get_connection():
parameters = pika.URLParameters(settings.RABBIT_URL)
return pika.BlockingConnection(parameters)
def run(self):
connection = self._get_connection()
channel = connection.channel()
channel.queue_declare(queue='task_queue6')
print('Hello world! :)')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(self.callback, queue='queue')
channel.start_consuming()
This will help
http://www.rabbitmq.com/tutorials/tutorial-six-python.html

How to get the "full" async result in Celery link_error callback

I have Celery 3.1.18 running with Django 1.6.11 and RabbitMQ 3.5.4, and trying to test my async task in a failure state (CELERY_ALWAYS_EAGER=True). However, I cannot get the proper "result" in the error callback. The example in the Celery docs shows:
#app.task(bind=True)
def error_handler(self, uuid):
result = self.app.AsyncResult(uuid)
print('Task {0} raised exception: {1!r}\n{2!r}'.format(
uuid, result.result, result.traceback))
When I do this, my result is still "PENDING", result.result = '', and result.traceback=''. But the actual result returned by my .apply_async call has the right "FAILURE" state and traceback.
My code (basically a Django Rest Framework RESTful endpoint that parses a .tar.gz file, and then sends a notification back to the user, when the file is done parsing):
views.py:
from producer_main.celery import app as celery_app
#celery_app.task()
def _upload_error_simple(uuid):
print uuid
result = celery_app.AsyncResult(uuid)
print result.backend
print result.state
print result.result
print result.traceback
msg = 'Task {0} raised exception: {1!r}\n{2!r}'.format(uuid,
result.result,
result.traceback)
class UploadNewFile(APIView):
def post(self, request, repository_id, format=None):
try:
uploaded_file = self.data['files'][self.data['files'].keys()[0]]
self.path = default_storage.save('{0}/{1}'.format(settings.MEDIA_ROOT,
uploaded_file.name),
uploaded_file)
print type(import_file)
self.async_result = import_file.apply_async((self.path, request.user),
link_error=_upload_error_simple.s())
print 'results from self.async_result:'
print self.async_result.id
print self.async_result.backend
print self.async_result.state
print self.async_result.result
print self.async_result.traceback
return Response()
except (PermissionDenied, InvalidArgument, NotFound, KeyError) as ex:
gutils.handle_exceptions(ex)
tasks.py:
from producer_main.celery import app
from utilities.general import upload_class
#app.task
def import_file(path, user):
"""Asynchronously import a course."""
upload_class(path, user)
celery.py:
"""
As described in
http://celery.readthedocs.org/en/latest/django/first-steps-with-django.html
"""
from __future__ import absolute_import
import os
import logging
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'producer_main.settings')
from django.conf import settings
log = logging.getLogger(__name__)
app = Celery('producer') # pylint: disable=invalid-name
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) # pragma: no cover
#app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
My backend is configured as such:
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = False
BROKER_URL = 'amqp://'
CELERY_RESULT_BACKEND = 'redis://localhost'
CELERY_RESULT_PERSISTENT = True
CELERY_IGNORE_RESULT = False
When I run my unittest for the link_error state, I get:
Creating test database for alias 'default'...
<class 'celery.local.PromiseProxy'>
130ccf13-c2a0-4bde-8d49-e17eeb1b0115
<celery.backends.redis.RedisBackend object at 0x10aa2e110>
PENDING
None
None
results from self.async_result:
130ccf13-c2a0-4bde-8d49-e17eeb1b0115
None
FAILURE
Non .zip / .tar.gz file passed in.
Traceback (most recent call last):
So the task results are not available in my _upload_error_simple() method, but they are available from the self.async_result returned variable...
I could not get the link and link_error callbacks to work, so I finally had to use the on_failure and on_success task methods described in the docs and this SO question. My tasks.py then looks like:
class ErrorHandlingTask(Task):
abstract = True
def on_failure(self, exc, task_id, targs, tkwargs, einfo):
msg = 'Import of {0} raised exception: {1!r}'.format(targs[0].split('/')[-1],
str(exc))
def on_success(self, retval, task_id, targs, tkwargs):
msg = "Upload successful. You may now view your course."
#app.task(base=ErrorHandlingTask)
def import_file(path, user):
"""Asynchronously import a course."""
upload_class(path, user)
You appear to have _upload_error() as a bound method of your class - this is probably not what you want. try making it a stand-along task:
#celery_app.task(bind=True)
def _upload_error(self, uuid):
result = celery_app.AsyncResult(uuid)
msg = 'Task {0} raised exception: {1!r}\n{2!r}'.format(uuid,
result.result,
result.traceback)
class Whatever(object):
....
self.async_result = import_file.apply_async((self.path, request.user),
link=self._upload_success.s(
"Upload finished."),
link_error=_upload_error.s())
in fact there's no need for the self paramater since it's not used so you could just do this:
#celery_app.task()
def _upload_error(uuid):
result = celery_app.AsyncResult(uuid)
msg = 'Task {0} raised exception: {1!r}\n{2!r}'.format(uuid,
result.result,
result.traceback)
note the absence of bind=True and self
Be careful with UUID instance!
If you will try to get status of a task with id not string type but UUID type, you will only get PENDING status.
from uuid import UUID
from celery.result import AsyncResult
task_id = UUID('d4337c01-4402-48e9-9e9c-6e9919d5e282')
print(AsyncResult(task_id).state)
# PENDING
print(AsyncResult(str(task_id)).state)
# SUCCESS

Emit/Broadcast Messages on REST Call in Python With Flask and Socket.IO

Background
The purpose of this project is to create a SMS based kill switch for a program I have running locally. The plan is to create web socket connection between the local program and an app hosted on Heroku. Using Twilio, receiving and SMS will trigger a POST request to this app. If it comes from a number on my whitelist, the application should send a command to the local program to shut down.
Problem
What can I do to find a reference to the namespace so that I can broadcast a message to all connected clients from a POST request?
Right now I am simply creating a new web socket client, connecting it and sending the message, because I can't seem to figure out how to get access to the namespace object in a way that I can call an emit or broadcast.
Server Code
from gevent import monkey
from flask import Flask, Response, render_template, request
from socketio import socketio_manage
from socketio.namespace import BaseNamespace
from socketio.mixins import BroadcastMixin
from time import time
import twilio.twiml
from socketIO_client import SocketIO #only necessary because of the hack solution
import socketIO_client
monkey.patch_all()
application = Flask(__name__)
application.debug = True
application.config['PORT'] = 5000
# White list
callers = {
"+15555555555": "John Smith"
}
# Part of 'hack' solution
stop_namespace = None
socketIO = None
# Part of 'hack' solution
def on_connect(*args):
global stop_namespace
stop_namespace = socketIO.define(StopNamespace, '/chat')
# Part of 'hack' solution
class StopNamespace(socketIO_client.BaseNamespace):
def on_connect(self):
self.emit("join", 'server#email.com')
print '[Connected]'
class ChatNamespace(BaseNamespace, BroadcastMixin):
stats = {
"people" : []
}
def initialize(self):
self.logger = application.logger
self.log("Socketio session started")
def log(self, message):
self.logger.info("[{0}] {1}".format(self.socket.sessid, message))
def report_stats(self):
self.broadcast_event("stats",self.stats)
def recv_connect(self):
self.log("New connection")
def recv_disconnect(self):
self.log("Client disconnected")
if self.session.has_key("email"):
email = self.session['email']
self.broadcast_event_not_me("debug", "%s left" % email)
self.stats["people"] = filter(lambda e : e != email, self.stats["people"])
self.report_stats()
def on_join(self, email):
self.log("%s joined chat" % email)
self.session['email'] = email
if not email in self.stats["people"]:
self.stats["people"].append(email)
self.report_stats()
return True, email
def on_message(self, message):
message_data = {
"sender" : self.session["email"],
"content" : message,
"sent" : time()*1000 #ms
}
self.broadcast_event_not_me("message",{ "sender" : self.session["email"], "content" : message})
return True, message_data
#application.route('/stop', methods=['GET', 'POST'])
def stop():
'''Right here SHOULD simply be Namespace.broadcast("stop") or something.'''
global socketIO
if socketIO == None or not socketIO.connected:
socketIO = SocketIO('http://0.0.0.0:5000')
socketIO.on('connect', on_connect)
global stop_namespace
if stop_namespace == None:
stop_namespace = socketIO.define(StopNamespace, '/chat')
stop_namespace.emit("join", 'server#bayhill.com')
stop_namespace.emit('message', 'STOP')
return "Stop being processed."
#application.route('/', methods=['GET'])
def landing():
return "This is Stop App"
#application.route('/socket.io/<path:remaining>')
def socketio(remaining):
try:
socketio_manage(request.environ, {'/chat': ChatNamespace}, request)
except:
application.logger.error("Exception while handling socketio connection",
exc_info=True)
return Response()
I borrowed code heavily from this project chatzilla which is admittedly pretty different because I am not really working with a browser.
Perhaps Socketio was a bad choice for web sockets and I should have used Tornado, but this seemed like it would work well and this set up helped me easily separate the REST and web socket pieces
I just use Flask-SocketIO for that.
from gevent import monkey
monkey.patch_all()
from flask import Flask
from flask.ext.socketio import SocketIO
app = Flask(__name__)
socketio = SocketIO(app)
#app.route('/trigger')
def trigger():
socketio.emit('response',
{'data': 'someone triggered me'},
namespace='/global')
return 'message sent via websocket'
if __name__ == '__main__':
socketio.run(app)