Within the application context, I can't seem to set objects in memcached. Logs indicate that I connect to memcached, but when I attempt to set an object the set function returns "0" or False. Outside of the application context, I can connect to the server IP and port, and easily get and set objects. Here is my setup:
application/__init__.py
class App(Flask):
def __init__(self):
super(App, self).__init__(__name__)
self.config.from_object('app.config')
self.config.from_object('app.deployments.Prod')
logging.basicConfig(filename=self.config['LOG_PATH'] + config.LOG_FILE, level=logging.INFO, format=config.LOG_FORMAT, datefmt='%m/%d/%Y %I:%M:%S')
self.static_folder=config.STATICS
self.before_request(self.init_dbs)
self.teardown_request(self.teardown)
self.after_request(self.teardown)
try:
self.init_session()
self.init_login()
self.init_templates()
except Exception as e:
logging.info(e)
def init_dbs(self):
g.ES = init_elasticsearch(hosts=self.config['ES_HOSTS'])
g.MEMCACHED = init_memcached(host=self.config['MEMCACHED_HOST'],port=self.config['MEMCACHED_PORT'])
...
cache/__init__.py
from werkzeug.contrib.cache import MemcachedCache
import gevent
import logging
def init_memcached(host,port):
memcached_connected = False
while not memcached_connected:
try:
MEMCACHED = MemcachedCache([host + ':' + str(port)])
memcached_connected = True
except Exception as e:
logging.info("Memcached not connected")
logging.error(e)
gevent.sleep(1)
return MEMCACHED
controllers/page.py
from flask import Blueprint, request, render_template, url_for, flash, g, redirect
from flask.views import MethodView
from flask.ext.login import current_user
from json import dumps
from app import config
...
items = Blueprint(
'items',
__name__,
template_folder=config.TEMPLATES,
)
class Item(MethodView):
def get(self,item):
result = g.MEMCACHED.get('item')
if result is None:
...
g.MEMCACHED.set('item', result, timeout=60)
return render_template('item.html',result=result)
items.add_url_rule("/path/<item>", view_func=Item.as_view('item'))
I'm assuming this has something to do with using memcached within the g object. I'd prefer setting the connection to memcached once, as I'm doing with the dbs, but it seems like memcached doesn't respond in the same way.
Related
I need to detect the duplicate session when user opens a new tab. Since I am using it for survey, I am not having any of the user's data. I am working on anonymous users.
Reading through documentation and various other thread I understood that I need to send client a session data which will be a uuid and check if the user is already authenticated for new connection.
My code is below -
from flask import Flask, render_template, session
from flask_session import Session
from flask_socketio import SocketIO, send, emit
from flask_login import LoginManager, UserMixin, current_user, login_user, logout_user, AnonymousUserMixin
import time, json, uuid, os
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top-secret!'
app.config['SESSION_TYPE'] = 'filesystem'
login_manager = LoginManager(app)
login_manager.init_app(app)
Session(app)
socketio = SocketIO(app, cors_allowed_origins="*", logger=True, manage_session=False)
class User(UserMixin, object):
def __init__(self, id=None):
self.id = id
#login_manager.user_loader
def load_user(user_id):
return User.get(user_id)
time_now = 0
msg = "Hello User. Please wait other users to join. Survey will start once minimum users will join. Max waiting time " \
"is 5 min "
# connected_msg_json = json.dumps(connected_msg, indent=4)
client_count = 0
#socketio.on('message')
def handle_message(msg):
print("Connected with the client with data " + msg)
#socketio.on('connect')
def test_connect():
print("Connected")
f = open('data.json')
data = json.load(f)
minUserCount = data['minimumNoOfUser']
global client_count, time_now
if current_user.is_authenticated:
pass
else:
client_count += 1
login_user(User(id=uuid.uuid1()))
if client_count == 0:
time_now = int(time.time())
print("Total no of connected client " + str(client_count))
print("About to send the time when first user connected " + str(time_now))
send(time_now)
if client_count > minUserCount:
send("Continue", broadcast=True)
#socketio.on('disconnect')
def test_disconnect():
print('Client disconnected')
logout_user()
global client_count
client_count -= 1
print("Total no of connected client " + str(client_count))
Since I need to make sure that survey opens when there are minimum no of unique users, I decided to login the users upon connection. And if the user is already authenticated then I believe it means it's the new connection via tab.
Now I am not sure if my code is okay but the packages are incorrect or both. I have tried to resolved the error but I am stuck with this error -
ImportError: cannot import name 'ContextVar' from 'werkzeug.local' (/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/werkzeug/local.py)
This appears when I put line
app.config['SESSION_TYPE'] = 'filesystem'
else I get different error about secret key not being set.
My requirements.txt are:
Flask==2.0.2
Flask-Cors==3.0.10
Flask-SocketIO==4.3.1
gevent==21.8.0
gevent-websocket==0.10.1
greenlet==1.1.2
gunicorn==20.1.0
python-engineio==3.13.2
python-socketio==4.6.0
simple-websocket==0.5.0
websocket-client==1.2.1
websockets==10.1
Werkzeug==0.14.1
you need to update werkzeug to 2.0 +
I want to build monitoring system using RabbitMQ and Tornado. I can run the producer and my consumer can consume the data on queue but the data cant be show on website.
This just my experiment before I using the sensor
import pika
import tornado.ioloop
import tornado.web
import tornado.websocket
import logging
from threading import Thread
logging.basicConfig(lvl=logging.INFO)
clients=[]
credentials = pika.credentials.PlainCredentials('ayub','ayub')
connection = pika.BlockingConnection(pika.ConnectionParameters('192.168.43.101',
5672,
'/',
credentials))
channel = connection.channel()
def threaded_rmq():
channel.basic_consume('Queue',
on_message_callback= consumer_callback,
auto_ack=True,
exclusive=False,
consumer_tag=None,
arguments=None)
channel.start_consuming()
def disconect_rmq():
channel.stop_consuming()
Connection.close()
logging.info('Disconnected from broker')
def consumer_callback(ch,method,properties,body):
for itm in clients:
itm.write_message(body)
class SocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
logging.info('websocket open')
clients.remove(self)
def close(self):
logging.info('websocket closed')
clients.remove(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("websocket.html")
application = tornado.web.Application([
(r'/ws',SocketHandler),
(r"/", MainHandler),
])
def startTornado():
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
def stopTornado():
tornado.ioloop.IOLoop.instance().stop()
if __name__ == "__main__":
logging.info('starting thread RMQ')
threadRMQ = Thread(target=threaded_rmq)
threadRMQ.start()
logging.info('starting thread tornado')
threadTornado = Thread(target=startTornado)
threadTornado.start()
try:
raw_input("server ready")
except SyntaxError:
pass
try:
logging.info('disconnected')
disconnect_rmq()
except Exception, e:
pass
stopTornado()
but I got this error
WARNING:tornado.access:404 GET /favicon.ico (192.168.43.10) 0.98ms
please help me
In your SocketHandler.open function you need to add the client not remove it.
Also consider using a set for clients instead of a list because the remove operation will be faster:
clients = set()
...
class SocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
logging.info('websocket open')
clients.add(self)
def close(self):
logging.info('websocket closed')
clients.remove(self)
The message you get regarding favicon.ico is actually a warning and it's harmless (the browser is requesting an icon to show for web application but won't complain if none is available).
You might also run into threading issues because Tornado and Pika are running in different threads so you will have to synchronize them; you can use Tornado's IOLoop.add_callback method for that.
I think the correct way to have instance variables in Flask is by adding users and sessions, but I'm trying to test a concept and I don't want to go through all of that just yet. I'm trying to have a web app load an image into a variable that can then have different image operations performed on it. Obviously you don't want to have to keep performing a list of operations on the image on each new request because that would be horribly inefficient.
Is there a way of having an app.var in Flask that I can access from different routes? I've tried using the global context and Flask's current_app, but I get the impression that's not what they're for.
The code for my blueprint is:
import os
from flask import Flask, url_for, render_template, \
g, send_file, Blueprint
from io import BytesIO
from PIL import Image, ImageDraw, ImageOps
home = Blueprint('home', __name__)
#home.before_request
def before_request():
g.img = None
g.user = None
#home.route('/')
def index():
return render_template('home/index.html')
#home.route('/image')
def image():
if g.img is None:
root = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(root, '../static/images/lena.jpg')
g.img = Image.open(filename)
img_bytes = BytesIO()
g.img.save(img_bytes, 'jpeg')
img_bytes.seek(0)
return send_file(img_bytes, mimetype='image/jpg')
#home.route('/grayscale', methods=['POST'])
def grayscale():
if g.img:
print('POST grayscale request')
g.img = ImageOps.grayscale(img)
return "Grayscale operation successful"
else:
print('Grayscale called with no image loaded')
return "Grayscale operation failed"
The /image route returns the image correctly, but I'd like to be able to call /grayscale, perform the operation, and be able to make another call to /image and have it return the image from memory without loading it.
You could save a key in your session variable and use that to identify the image in a global dictionary. However this might lead to some trouble if you use multiple Flask application instances. But with one it would be fine. Otherwise you could use Redis when working with multiple workers. I haven't tried the following code but it should show the concept.
from flask import session
import uuid
app.config['SECRET_KEY'] = 'your secret key'
img_dict = {}
#route('/image')
def image():
key = session.get('key')
if key is None:
session['key'] = key = uuid.uuid1()
img_dict[key] = yourimagedata
#home.route('/grayscale', methods=['POST'])
def grayscale():
key = session.get('key')
if key is None:
print('Grayscale called with no image loaded')
return "Grayscale operation failed"
else:
img = img_dict[key]
print('POST grayscale request')
g.img = ImageOps.grayscale(img)
return "Grayscale operation successful"
I have Celery 3.1.18 running with Django 1.6.11 and RabbitMQ 3.5.4, and trying to test my async task in a failure state (CELERY_ALWAYS_EAGER=True). However, I cannot get the proper "result" in the error callback. The example in the Celery docs shows:
#app.task(bind=True)
def error_handler(self, uuid):
result = self.app.AsyncResult(uuid)
print('Task {0} raised exception: {1!r}\n{2!r}'.format(
uuid, result.result, result.traceback))
When I do this, my result is still "PENDING", result.result = '', and result.traceback=''. But the actual result returned by my .apply_async call has the right "FAILURE" state and traceback.
My code (basically a Django Rest Framework RESTful endpoint that parses a .tar.gz file, and then sends a notification back to the user, when the file is done parsing):
views.py:
from producer_main.celery import app as celery_app
#celery_app.task()
def _upload_error_simple(uuid):
print uuid
result = celery_app.AsyncResult(uuid)
print result.backend
print result.state
print result.result
print result.traceback
msg = 'Task {0} raised exception: {1!r}\n{2!r}'.format(uuid,
result.result,
result.traceback)
class UploadNewFile(APIView):
def post(self, request, repository_id, format=None):
try:
uploaded_file = self.data['files'][self.data['files'].keys()[0]]
self.path = default_storage.save('{0}/{1}'.format(settings.MEDIA_ROOT,
uploaded_file.name),
uploaded_file)
print type(import_file)
self.async_result = import_file.apply_async((self.path, request.user),
link_error=_upload_error_simple.s())
print 'results from self.async_result:'
print self.async_result.id
print self.async_result.backend
print self.async_result.state
print self.async_result.result
print self.async_result.traceback
return Response()
except (PermissionDenied, InvalidArgument, NotFound, KeyError) as ex:
gutils.handle_exceptions(ex)
tasks.py:
from producer_main.celery import app
from utilities.general import upload_class
#app.task
def import_file(path, user):
"""Asynchronously import a course."""
upload_class(path, user)
celery.py:
"""
As described in
http://celery.readthedocs.org/en/latest/django/first-steps-with-django.html
"""
from __future__ import absolute_import
import os
import logging
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'producer_main.settings')
from django.conf import settings
log = logging.getLogger(__name__)
app = Celery('producer') # pylint: disable=invalid-name
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) # pragma: no cover
#app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
My backend is configured as such:
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = False
BROKER_URL = 'amqp://'
CELERY_RESULT_BACKEND = 'redis://localhost'
CELERY_RESULT_PERSISTENT = True
CELERY_IGNORE_RESULT = False
When I run my unittest for the link_error state, I get:
Creating test database for alias 'default'...
<class 'celery.local.PromiseProxy'>
130ccf13-c2a0-4bde-8d49-e17eeb1b0115
<celery.backends.redis.RedisBackend object at 0x10aa2e110>
PENDING
None
None
results from self.async_result:
130ccf13-c2a0-4bde-8d49-e17eeb1b0115
None
FAILURE
Non .zip / .tar.gz file passed in.
Traceback (most recent call last):
So the task results are not available in my _upload_error_simple() method, but they are available from the self.async_result returned variable...
I could not get the link and link_error callbacks to work, so I finally had to use the on_failure and on_success task methods described in the docs and this SO question. My tasks.py then looks like:
class ErrorHandlingTask(Task):
abstract = True
def on_failure(self, exc, task_id, targs, tkwargs, einfo):
msg = 'Import of {0} raised exception: {1!r}'.format(targs[0].split('/')[-1],
str(exc))
def on_success(self, retval, task_id, targs, tkwargs):
msg = "Upload successful. You may now view your course."
#app.task(base=ErrorHandlingTask)
def import_file(path, user):
"""Asynchronously import a course."""
upload_class(path, user)
You appear to have _upload_error() as a bound method of your class - this is probably not what you want. try making it a stand-along task:
#celery_app.task(bind=True)
def _upload_error(self, uuid):
result = celery_app.AsyncResult(uuid)
msg = 'Task {0} raised exception: {1!r}\n{2!r}'.format(uuid,
result.result,
result.traceback)
class Whatever(object):
....
self.async_result = import_file.apply_async((self.path, request.user),
link=self._upload_success.s(
"Upload finished."),
link_error=_upload_error.s())
in fact there's no need for the self paramater since it's not used so you could just do this:
#celery_app.task()
def _upload_error(uuid):
result = celery_app.AsyncResult(uuid)
msg = 'Task {0} raised exception: {1!r}\n{2!r}'.format(uuid,
result.result,
result.traceback)
note the absence of bind=True and self
Be careful with UUID instance!
If you will try to get status of a task with id not string type but UUID type, you will only get PENDING status.
from uuid import UUID
from celery.result import AsyncResult
task_id = UUID('d4337c01-4402-48e9-9e9c-6e9919d5e282')
print(AsyncResult(task_id).state)
# PENDING
print(AsyncResult(str(task_id)).state)
# SUCCESS
Background
The purpose of this project is to create a SMS based kill switch for a program I have running locally. The plan is to create web socket connection between the local program and an app hosted on Heroku. Using Twilio, receiving and SMS will trigger a POST request to this app. If it comes from a number on my whitelist, the application should send a command to the local program to shut down.
Problem
What can I do to find a reference to the namespace so that I can broadcast a message to all connected clients from a POST request?
Right now I am simply creating a new web socket client, connecting it and sending the message, because I can't seem to figure out how to get access to the namespace object in a way that I can call an emit or broadcast.
Server Code
from gevent import monkey
from flask import Flask, Response, render_template, request
from socketio import socketio_manage
from socketio.namespace import BaseNamespace
from socketio.mixins import BroadcastMixin
from time import time
import twilio.twiml
from socketIO_client import SocketIO #only necessary because of the hack solution
import socketIO_client
monkey.patch_all()
application = Flask(__name__)
application.debug = True
application.config['PORT'] = 5000
# White list
callers = {
"+15555555555": "John Smith"
}
# Part of 'hack' solution
stop_namespace = None
socketIO = None
# Part of 'hack' solution
def on_connect(*args):
global stop_namespace
stop_namespace = socketIO.define(StopNamespace, '/chat')
# Part of 'hack' solution
class StopNamespace(socketIO_client.BaseNamespace):
def on_connect(self):
self.emit("join", 'server#email.com')
print '[Connected]'
class ChatNamespace(BaseNamespace, BroadcastMixin):
stats = {
"people" : []
}
def initialize(self):
self.logger = application.logger
self.log("Socketio session started")
def log(self, message):
self.logger.info("[{0}] {1}".format(self.socket.sessid, message))
def report_stats(self):
self.broadcast_event("stats",self.stats)
def recv_connect(self):
self.log("New connection")
def recv_disconnect(self):
self.log("Client disconnected")
if self.session.has_key("email"):
email = self.session['email']
self.broadcast_event_not_me("debug", "%s left" % email)
self.stats["people"] = filter(lambda e : e != email, self.stats["people"])
self.report_stats()
def on_join(self, email):
self.log("%s joined chat" % email)
self.session['email'] = email
if not email in self.stats["people"]:
self.stats["people"].append(email)
self.report_stats()
return True, email
def on_message(self, message):
message_data = {
"sender" : self.session["email"],
"content" : message,
"sent" : time()*1000 #ms
}
self.broadcast_event_not_me("message",{ "sender" : self.session["email"], "content" : message})
return True, message_data
#application.route('/stop', methods=['GET', 'POST'])
def stop():
'''Right here SHOULD simply be Namespace.broadcast("stop") or something.'''
global socketIO
if socketIO == None or not socketIO.connected:
socketIO = SocketIO('http://0.0.0.0:5000')
socketIO.on('connect', on_connect)
global stop_namespace
if stop_namespace == None:
stop_namespace = socketIO.define(StopNamespace, '/chat')
stop_namespace.emit("join", 'server#bayhill.com')
stop_namespace.emit('message', 'STOP')
return "Stop being processed."
#application.route('/', methods=['GET'])
def landing():
return "This is Stop App"
#application.route('/socket.io/<path:remaining>')
def socketio(remaining):
try:
socketio_manage(request.environ, {'/chat': ChatNamespace}, request)
except:
application.logger.error("Exception while handling socketio connection",
exc_info=True)
return Response()
I borrowed code heavily from this project chatzilla which is admittedly pretty different because I am not really working with a browser.
Perhaps Socketio was a bad choice for web sockets and I should have used Tornado, but this seemed like it would work well and this set up helped me easily separate the REST and web socket pieces
I just use Flask-SocketIO for that.
from gevent import monkey
monkey.patch_all()
from flask import Flask
from flask.ext.socketio import SocketIO
app = Flask(__name__)
socketio = SocketIO(app)
#app.route('/trigger')
def trigger():
socketio.emit('response',
{'data': 'someone triggered me'},
namespace='/global')
return 'message sent via websocket'
if __name__ == '__main__':
socketio.run(app)