I'm trying to emit a message from a celery task. However the emit message doesn't reach the client, even when trying to broadcast:
#celery.task(bind=True)
def search_pdf(self, path, keyword, room):#, url):
socketio.emit('message', {'msg': 'results' + ':' + 'message from a celery'}, namespace='/chat', broadcast=True)
socketio.emit('message', {'msg': 'results' + ':' + 'message from a celery'}, broadcast=True)
Upon debugging it seems that the emit doesn't reach the client whenever I call it outside the socketio event handler, so there must be something I misunderstand about flask-socketio, can someone enlighten me?
For reference this is my init call within my create_app() function:
socketio.init_app(app, async_mode='eventlet', message_queue=app.config['CELERY_BROKER_URL'])
And this is my celery app function:
def create_celery_app(app=None):
"""
Create a new Celery object and tie together the Celery config to the app's
config. Wrap all tasks in the context of the application.
:param app: Flask app
:return: Celery app
"""
app = app or create_app()
# app.app_context().push()
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'],
include=CELERY_TASK_LIST)
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
I'm using Flask with a gunicorn server with the eventlet worker class for flask-socketio.
I'm also using docker-compose with celery and redis containers as the task queue
And using Miguel Grinberg's example socketio chat app which works fine
Related
I would like to do cron jobs from Flask using Celery but I have an issue regarding celery beat schedule, because it seems that my task is not loaded and I don't know how to check where the issue is.
This is where I define my Flask app in views.py :
from celery.schedules import crontab
app = Flask(__name__)
app.config.update(
CELERY_BROKER_URL='redis://localhost:6379',
CELERY_RESULT_BACKEND='redis://localhost:6379',
CELERY_BEAT_SCHEDULE={
'task-number-one': {
'task': 'app.tasks.test',
'schedule': crontab(minute="*"),
}
},
CELERY_IMPORTS = ('app.tasks'),
CELERY_TASK_RESULT_EXPIRES = 30,
CELERY_TIMEZONE = 'UTC',
)
and this where my celery object is created in tasks.py:
from celery import Celery
from app.views import app
from celery import shared_task
def make_celery(app):
celery = Celery(app.import_name, backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
celery_app = make_celery(app)
#celery_app.task()
def test():
logger = test.get_logger()
logger.info("Hello")
views.py and tasks.py are under the same directories which is called app
This is what I have when launching celery worker (everything seems normal here):
But this is what I have when launching celery beat, it seems that my task is never sent by my schedule but I don't know where to check:
Can you help me on this?
Best
I believe Celery-Beat Tasks need to be configured at least after the #app.on_after_configure.connect signal is sent. You should be able to do the following in your tasks.py file.
celery_app.conf.CELERYBEAT_SCHEDULE = {
"test-every-minue": {
"task": "tasks.test",
"schedule": crontab(minute="*"),
},
}
Alternatively you can use this decorator syntax if your task is defined in the same file as your celery application instance.
#celery_app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(5 , test_two.s(), name='test-every-5')
If your tasks are defined in a separate module you can use the #app.on_after_finalize.connect decorator after importing your tasks.
#app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
from app.tasks import test
sender.add_periodic_task(10.0, test.s(), name='test-every-10')
Celery Beat Entry Docs
I have a Flask app that connects to Cassandra. When I run this app under Gunicorn and invoke Gunicorn as a flask-script command python manage.py gunicorn, it hangs. But when I run this same app on the command line as gunicorn manage:app, it succeeds. Why?
Explanation
Gunicorn forks off workers to handle incoming requests.
If the Cassandra session (connection pool) is created before the worker fork (e.g., during app creation using an application factory pattern), the workers will have problems using Cassandra. DataStax recommends that each worker get its own session, and so you need to defer session creation until after the fork.
This is a problem when you bundle Gunicorn and Flask together as a custom application, but presumably on the command line Gunicorn can obviously initialize and fork fully before creating the Flask app.
Example
To see the two behaviors, manually change bad=False to bad=True.
from cassandra.cluster import Cluster
from flask import Flask
from flask_script import Command, Manager
from gunicorn.app.base import BaseApplication
from gunicorn.six import iteritems
class CassandraClient:
def __init__(self, bad=False):
self.session = None
self.cluster = None
if bad:
self.connect()
def connect(self):
self.cluster = Cluster(['127.0.0.1'])
self.session = self.cluster.connect('keyspace')
def execute(self):
if not self.session:
self.connect()
query = '''
select now()
from system.local
'''
return self.session.execute(query)[0]
class GunicornApp(BaseApplication):
'''
Bundle Gunicorn and Flask together, so that we can call it as a
flask-script command.
http://docs.gunicorn.org/en/stable/custom.html
'''
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(GunicornApp, self).__init__()
def load_config(self):
config = dict(
[(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
return self.application
class GunicornCommand(Command):
'''
Modeled off of flask_script.Server
'''
def __init__(self, app, options):
self.app = app
self.options = options
def __call__(self, *args, **kwargs):
GunicornApp(self.app, self.options).run()
app = Flask(__name__)
app.cassandra = CassandraClient()
#app.route('/')
def hello():
return str(app.cassandra.execute())
if __name__ == '__main__':
manager = Manager(app)
gunicorn_options = {
'bind': '127.0.0.1',
'port': 8000,
'workers': 4
}
manager.add_command("gunicorn", GunicornCommand(app, gunicorn_options))
manager.run()
Versions
Flask==0.12.1
Flask-Script==2.0.5
gunicorn==19.7.1
cassandra-driver==3.8.1
References
http://docs.gunicorn.org/en/stable/custom.html
https://datastax.github.io/python-driver/faq.html
How to use Flask-Script and Gunicorn
https://groups.google.com/a/lists.datastax.com/forum/#!topic/python-driver-user/XuSjjWVnE9Y
I'm new to Celery. I have a task that is not working adn I don't know why. Im using rabbitmq Here is my code:
In settings.py:
BROKER_URL = "amqp://guest#localhost//"
tasks.py:
from celery.decorators import task
from celery.utils.log import get_task_logger
from hisoka.models import FeralSpirit, Fireball
logger = get_task_logger(__name__)
#task
def test_task():
fireball = Fireball.objects.last()
feral_spirit = FeralSpirit.objects.filter(fireball=fireball).last()
counters = feral_spirit.increase_counter()
logger.info(feral_spirit + "counters: " + counters)
The task is just a test, it is designed to increase a counter that is a field of the FeralSpirit model. It works correctly if I don't call the function with delay()
views.py
class FireballDetail(ListView):
def get_queryset(self, *args, **kwargs):
test_task.delay()
...
I have a rabbitmq server running correctly (or at least it looks like that) on one terminal and the django localhost server on another terminal. Am I missing something obvious? I have a celery.py and a modified __init__ file, exactly following the documentation.
Most probably your celery worker is not running, try
celery -A {project_name} worker --loglevel=info -Q {queue_name}
Substitute the value of project_name and queue_name. Default queue_name is default
I'm trying to flip a boolean flag for particular types of objects in my database using sqlalchemy+celery beats. But how do I access my orm from the tasks.py file?
from models import Book
from celery.decorators import periodic_task
from application import create_celery_app
celery = create_celery_app()
# Create celery: http://flask.pocoo.org/docs/0.10/patterns/celery/
# This task works fine
#celery.task
def celery_send_email(to,subject,template):
with current_app.app_context():
msg = Message(
subject,
recipients=[to],
html=template,
sender=current_app.config['MAIL_DEFAULT_SENDER']
)
return mail.send(msg)
#This fails
#periodic_task(name='release_flag',run_every=timedelta(seconds=10))
def release_flag():
with current_app.app_context(): <<< #Fails on this line
books = Book.query.all() <<<< #Fails here too
for book in books:
book.read = True
book.save()
I'm using celery beat command to run this:
celery -A tasks worker -l INFO --beat
But I'm getting the following error:
raise RuntimeError('working outside of application context')
RuntimeError: working outside of application context
Which points back to the with current_app.app_context() line
If I remove the current_app.app_context() line I will get the following error:
RuntimeError: application not registered on db instance and no application bound to current context
Is there a particular way to access the flask-sqlalchemy orm for celery tasks? Or would there be a better approach to what I'm trying to do?
So far the only workaround which works was to add the following line after db.init_app(app) in my application factory pattern:
db.app = app
I was following this repo to create my celery app https://github.com/mattupstate/overholt/blob/master/overholt/factory.py
You're getting that error because current_app requires an app context to work, but you're trying to use it to set up an app context. You need to use the actual app to set up the context, then you can use current_app.
with app.app_context():
# do stuff that requires the app context
Or you can use a pattern such as the one described in the Flask docs to subclass celery.Task so it knows about the app context by default.
from celery import Celery
def make_celery(app):
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
celery = make_celery(app)
I'm trying to add some background tasks to my flask app using celery. I'm using blueprint with my flask app like this:
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
app.config.from_object('config')
db.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(api_1_0_blueprint, url_prefix='/api/v1.0')
return app
And I have run.py like this:
app = create_app()
app.run(port=7000, debug=True, threaded=True)
Then I have a separate file with some tasks:
celery = Celery(__name__, broker=CELERY_BROKER_URL)
#celery.task
def send_async_sms(to, msg):
app = current_app._get_current_object()
with app.app_context():
time.sleep(12)
print "Sending to {0}: {1}".format(to, msg)
Without the two lines with app context, things work fine. But adding them cause the following problem:
ERROR/MainProcess] Task app.notify.send_async_sms[0d535fc4-7465-470e-9204-548ecca2c6e0] raised unexpected: RuntimeError('working outside of application context',)
What am I doing wrong here?