I want to setup docker-compose for my app, which contains Django, Celery (+Beat), RabbitMQ and PSQL. My problem is that celery beat container does not work as intended (does not schedule a task with interval 10s).
Docker logs - celery beat wakes up in 5.00 minutes, celery worker works fine
celery-worker_1 | [2021-03-29 21:05:58,201: INFO/MainProcess] mingle: all alone
celery-worker_1 | [2021-03-29 21:05:58,225: WARNING/MainProcess] /usr/local/lib/python3.9/site-packages/celery/fixups/django.py:205: UserWarning: Using settings.DEBUG leads to a memory
celery-worker_1 | leak, never use this setting in production environments!
celery-worker_1 | warnings.warn('''Using settings.DEBUG leads to a memory
celery-worker_1 | [2021-03-29 21:05:58,225: INFO/MainProcess] celery#247ea26ee17b ready.
celery-beat_1 | [2021-03-29 21:06:00,525: DEBUG/MainProcess] Setting default socket timeout to 30
celery-beat_1 | [2021-03-29 21:06:00,526: INFO/MainProcess] beat: Starting...
celery-beat_1 | [2021-03-29 21:06:00,749: DEBUG/MainProcess] Current schedule:
celery-beat_1 | <ScheduleEntry: celery.backend_cleanup celery.backend_cleanup() <crontab: 0 4 * * * (m/h/d/dM/MY)>
celery-beat_1 | [2021-03-29 21:06:00,749: DEBUG/MainProcess] beat: Ticking with max interval->5.00 minutes
celery-beat_1 | [2021-03-29 21:06:00,751: DEBUG/MainProcess] beat: Waking up in 5.00 minutes.
Dockerfile
FROM python:3
ENV PYTHONBUFFERED=1
WORKDIR /usr/src/app
COPY requirements.txt ./
RUN pip install -r requirements.txt
docker-compose.yml
version: '3.8'
services:
server:
build: .
command: python manage.py runserver 0.0.0.0:8000
volumes:
- .:/usr/src/app
ports:
- "8000:8000"
env_file:
- .env
depends_on:
- pgdb
- rabbit
- celery-worker
celery-worker:
build: .
command: sh -c 'sleep 8; celery -A core worker -l INFO'
env_file:
- .env
volumes:
- .:/usr/src/app
depends_on:
- pgdb
- rabbit
celery-beat:
build: .
command: sh -c 'sleep 10; celery -A core beat -l debug'
env_file:
- .env
volumes:
- .:/usr/src/app
depends_on:
- pgdb
- rabbit
- celery-worker
pgdb:
image: postgres
env_file:
- .env
volumes:
- pgdata:/var/lib/postgresql/data/
ports:
- "5432:5432"
restart: on-failure
rabbit:
image: "rabbitmq:3-management-alpine"
env_file:
- .env
ports:
- "5672:5672"
- "15672:15672"
restart: on-failure
volumes:
pgdata: {}
settings.py
CELERY_BROKER_URL = 'amqp://admin:mypass#rabbit:5672/'
CELERY_RESULT_BACKEND = 'db+postgresql://news_user:news#pgdb/news'
CELERY_BEAT_SCHEDULE = {
'scheduled_task': {
'task': 'news.tasks.get_news',
'schedule': 10.0
}
}
Related
I'm having a slight issue with spinning up a Dockerised stack of: Django, Redis, Celery on a server backbone of nginx and gunicorn. The Django application connects to an external PostgreSQL database.
I have the following docker-compose.yaml file:
version: '3.7'
services:
web:
build:
context: ./django
dockerfile: Dockerfile
expose:
- "8000"
volumes:
- django-static:/usr/src/app/static
- django-uploads:/usr/src/app/uploads
depends_on:
- redis
links:
- redis:redis
environment:
...
command: /usr/src/app/wave.sh
nginx:
build:
context: nginx
dockerfile: Dockerfile
restart: on-failure
ports:
- 80:80
- 443:443
volumes:
- ssl_data:/etc/resty-auto-ssl
environment:
ALLOWED_DOMAINS: "${STAGING_ALLOWED_DOMAINS}"
SITES: "${STAGING_SITES}"
redis:
restart: always
image: redis:latest
ports:
- '6379:6379'
celery:
build: ./django
command: celery --app=Wave.celery.app worker --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- redis:redis
depends_on:
- web
- redis
- nginx
restart: on-failure
celery-beat:
build: ./django
command: celery --app=Wave.celery.app beat --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- redis:redis
depends_on:
- web
- redis
- nginx
restart: on-failure
volumes:
ssl_data:
django-static:
django-uploads:
However, on utilising:
docker-compose -f docker-compose.wave-api-staging.yml up --build --force-recreate
The wave.sh script is as follows:
#!/bin/sh
rm -rf celerybeat.pid
ln -s /run/shm /dev/shm
python3 manage.py collectstatic --no-input
python3 manage.py migrate
python3 manage.py shell < createsuperuser.py
pip3 freeze
/usr/local/bin/gunicorn Wave.wsgi:application --timeout 3600 --log-level=DEBUG -w 5 -b :8000
Which replaces the following command instruction in the web service:
command: bash -c "rm -rf celerybeat.pid && python3 manage.py makemigrations && python3 manage.py migrate --run-syncdb && python3 manage.py shell < createsuperuser.py && python3 manage.py runserver 0.0.0.0:8000"
celery.py:
import os
import datetime
from celery import Celery
from celery.schedules import crontab
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Wave.settings')
app = Celery('Wave', include=['amq.v1.SurfTaskRunner.demand', 'amq.v1.SurfTaskRunner.periodic'])
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
#app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(
crontab(minute='6,16,26,36,46,56', hour='*', day_of_week='*', day_of_month='*'),
dispatch_surfers_waivers_periodic_task.s(),
)
#app.task(bind=True)
def dispatch_surfers_waivers_periodic_task(self):
Dockerfile:
FROM python:3.7.2-slim
RUN apt-get update && apt-get -y install cron && apt-get -y install nano
ENV PYTHONUNBUFFERED 1
RUN mkdir /usr/src/app
WORKDIR /usr/src/app
COPY ./requirements.txt /usr/src/app/requirements.txt
RUN pip install -r requirements.txt
COPY ./wave.sh /usr/src/app/wave.sh
RUN ["chmod", "+x", "/usr/src/app/wave.sh"]
COPY . /usr/src/app
I receive the following errors from the celery and celery beat services:
celery_1 | Error:
celery_1 | Unable to load celery application.
celery_1 | The module Wave was not found.
celery-beat_1 | Error:
celery-beat_1 | Unable to load celery application.
celery-beat_1 | The module Wave was not found.
I have tried reading and google extensively, but so far have found zero clues as to what could be going wrong here...I'm at the point of completely ditching Celery if I can't get this to work?
However, the same process can be spun up locally without nginx, and with the standard python manage.py runserver command.
Update: here is my local development docker-compose.yml file:
version: '3.7'
services:
django:
build:
context: ./django
dockerfile: Dockerfile
ports:
- 8982:8982
depends_on:
- postgres
- redis
links:
- postgres:postgres
- redis:redis
command: bash -c "rm -rf celerybeat.pid && python3 manage.py makemigrations && python3 manage.py migrate --run-syncdb && python3 manage.py shell < createsuperuser.py && python3 manage.py runserver 0.0.0.0:8982"
volumes: ['./django:/usr/src/app/']
environment:
...
redis:
restart: always
image: redis:latest
ports:
- '6379:6379'
celery:
build: ./django
command: celery -A Wave worker --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- postgres:postgres
- redis:redis
depends_on:
- django
- postgres
- redis
restart: on-failure
celery-beat:
build: ./django
command: celery -A Wave beat --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- postgres:postgres
- redis:redis
depends_on:
- django
- postgres
- redis
restart: on-failure
postgres:
build:
context: ./postgres
dockerfile: Dockerfile
ports:
- 5433:5432
expose:
- 5432
environment:
- POSTGRES_DB=conveyor
- POSTGRES_USER=conveyor
- POSTGRES_PASSWORD=89$r55cP%fSDDghypoAsdd
I am building an app with Docker, Django and PostgreSQL. I was trying to parse through rows in an excel file (about 7000 rows) and got this error:
web_1 | [2019-09-26 23:39:34 +0000] [1] [CRITICAL] WORKER TIMEOUT
(pid:11) web_1 | [2019-09-26 23:39:34 +0000] [11] [INFO] Worker
exiting (pid: 11) web_1 | [2019-09-26 23:39:34 +0000] [12] [INFO]
Booting worker with pid: 12
I searched for solution and found a suggestion to increase the TIMEOUT but I don't know where to find it.
Here is my yml files.
version: '3.7'
services:
web:
build: .
command: python /code/manage.py runserver 0.0.0.0:8000
command: gunicorn bookstore_project.wsgi -b 0.0.0.0:8000 # new
environment:
- SECRET_KEY=<secret_key>
- DEBUG=1
- ENVIRONMENT=development
volumes:
- .:/code
ports:
- 8000:8000
depends_on:
- db
db:
image: postgres:11
volumes:
- postgres_data:/var/lib/postgresql/data/
volumes:
postgres_data:
docker-compose-prod-yml
version: '3.7'
services:
web:
build: .
command: python /code/manage.py runserver 0.0.0.0:8000
environment:
- ENVIRONMENT=production
- SECRET_KEY=<Secret_key>
- DEBUG=0
ports:
- 8000:8000
depends_on:
- db
db:
image: postgres:11
heroku.yml:
setup:
addons:
- plan: heroku-postgresql
build:
docker:
web: Dockerfile
release:
image: web
command:
- python manage.py collectstatic --noinput
run:
web: gunicorn bookstore_project.wsgi
Any help is appreciated!
Sorry for my english. I have project in django, In my project i want use celery for background task and now i need set settings in docker for this library. This my docker file:
FROM python:3
MAINTAINER Alex2
RUN apt-get update
# Install wkhtmltopdf
RUN curl -L#o wk.tar.xz https://downloads.wkhtmltopdf.org/0.12/0.12.4/wkhtmltox-0.12.4_linux-generic-amd64.tar.xz \
&& tar xf wk.tar.xz \
&& cp wkhtmltox/bin/wkhtmltopdf /usr/bin \
&& cp wkhtmltox/bin/wkhtmltoimage /usr/bin \
&& rm wk.tar.xz \
&& rm -r wkhtmltox
RUN apt-get install -y cron
# for celery
ENV APP_USER user
ENV APP_ROOT /src
RUN groupadd -r ${APP_USER} \
&& useradd -r -m \
--home-dir ${APP_ROOT} \
-s /usr/sbin/nologin \
-g ${APP_USER} ${APP_USER}
# create directory for application source code
RUN mkdir -p /usr/django/app
COPY requirements.txt /usr/django/app/
WORKDIR /usr/django/app
RUN pip install -r requirements.txt
this my docker-compose.dev
version: '2.0'
services:
web:
build: .
container_name: api_dev
image: img/api_dev
volumes:
- .:/usr/django/app/
- ./static:/static
expose:
- "8001"
env_file: env/dev.env
command: bash django_run.sh
nginx:
build: nginx
container_name: ng_dev
image: img/ng_dev
ports:
- "8001:8001"
volumes:
- ./nginx/dev_api.conf:/etc/nginx/conf.d/api.conf
- .:/usr/django/app/
- ./static:/static
depends_on:
- web
links:
- web:web
db:
image: postgres:latest
container_name: pq01
ports:
- "5432:5432"
redis:
image: redis:latest
container_name: rd01
command: redis-server
ports:
- "8004:8004"
celery:
build: .
container_name: cl01
command: celery worker --app=myapp.celery
volumes:
- .:/usr/django/app/
links:
- db
- redis
and i have this error:
cl01 | User information: uid=0 euid=0 gid=0 egid=0
cl01 |
cl01 | uid=uid, euid=euid, gid=gid, egid=egid,
cl01 | [2018-07-31 16:40:00,207: ERROR/MainProcess] consumer: Cannot connect to redis://redis:8004/0: Error 111 connecting to redis:8004. Connection refused..
cl01 | Trying again in 2.00 seconds...
cl01 |
cl01 | [2018-07-31 16:40:02,211: ERROR/MainProcess] consumer: Cannot connect to redis://redis:8004/0: Error 111 connecting to redis:8004. Connection refused..
cl01 | Trying again in 4.00 seconds...
cl01 |
cl01 | [2018-07-31 16:40:06,217: ERROR/MainProcess] consumer: Cannot connect to redis://redis:8004/0: Error 111 connecting to redis:8004. Connection refused..
cl01 | Trying again in 6.00 seconds...
i cant understand why it not connect. My settings file project
CELERY_BROKER_URL = 'redis://redis:8004/0'
CELERY_RESULT_BACKEND = 'redis://redis:8004/0'
Everything looks like good, but mayby in some file i dont add some settings. Please help me solve this problem
I think the port mapping causes the problem, So, change redis settings in docker-compose.dev file as (removed ports option)
redis:
image: redis:latest
container_name: rd01
command: redis-server
and in your settings.py
CELERY_BROKER_URL = 'redis://redis:6379/0'
CELERY_RESULT_BACKEND = 'redis://redis:6379/0'
You dont have to map the ports unless you are using them in your local envirnment
docker-compose.yml
version: '3'
services:
# Django web server
web:
volumes:
- "./app/back:/app"
- "../front/public/static:/app/static"
- "./phantomjs-2.1.1:/app/phantomjs"
build:
context: .
dockerfile: dockerfile_django
#command: python manage.py runserver 0.0.0.0:8080
#command: ["uwsgi", "--ini", "/app/back/uwsgi.ini"]
ports:
- "8080:8080"
links:
- async
- ws_server
- mysql
- redis
async:
volumes:
- "./app/async_web:/app"
build:
context: .
dockerfile: dockerfile_async
ports:
- "8070:8070"
# Aiohtp web socket server
ws_server:
volumes:
- "./app/ws_server:/app"
build:
context: .
dockerfile: dockerfile_ws_server
ports:
- "8060:8060"
# MySQL db
mysql:
image: mysql/mysql-server:5.7
volumes:
- "./db_mysql:/var/lib/mysql"
- "./my.cnf:/etc/my.cnf"
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_USER: user_b520
MYSQL_PASSWORD: buzz_17KN
MYSQL_DATABASE: dev_NT_pr
MYSQL_PORT: 3306
ports:
- "3300:3306"
# Redis
redis:
image: redis:4.0.6
build:
context: .
dockerfile: dockerfile_redis
volumes:
- "./redis.conf:/usr/local/etc/redis/redis.conf"
ports:
- "6379:6379"
# Celery worker
celery:
build:
context: .
dockerfile: dockerfile_celery
command: celery -A backend worker -l info --concurrency=20
volumes:
- "./app/back:/app"
- "../front/public/static:/app/static"
links:
- redis
# Celery beat
beat:
build:
context: .
dockerfile: dockerfile_beat
command: celery -A backend beat
volumes:
- "./app/back:/app"
- "../front/public/static:/app/static"
links:
- redis
# Flower monitoring
flower:
build:
context: .
dockerfile: dockerfile_flower
command: celery -A backend flower
volumes:
- "./app/back:/app"
- "../front/public/static:/app/static"
ports:
- "5555:5555"
links:
- redis
dockerfile_django
FROM python:3.4
RUN mkdir /app
WORKDIR /app
ADD app/back/requirements.txt /app
RUN pip3 install -r requirements.txt
# Apply migrations
CMD ["python", "manage.py", "migrate"]
#CMD python manage.py runserver 0.0.0.0:8080 & cron && tail -f /var/log/cron.log
CMD ["uwsgi", "--ini", "/app/uwsgi.ini"]
In a web container migrations applied and everything is working.
I also added CMD ["python", "manage.py", "migrate"] to dockerfile_celery-flower-beat, but they dont applied.
I restart the container using the command:
docker-compose up --force-recreate
How to make the rest of the containers see the migration?
log
flower_1 | File "/usr/local/lib/python3.4/site-packages/MySQLdb/connections.py", line 292, in query
flower_1 | _mysql.connection.query(self, query)
flower_1 | django.db.utils.OperationalError: (1054, "Unknown column 'api_communities.is_closed' in 'field list'")
I have setup my celery app for django and trying to test a simple periodic tasks in my django app.
I have used RabbitMQ as message broker and setup broker_url by passing environment variables from docker-compose.yml file as well.
I have following structure in docker-compose.yml file.
version: '3'
services:
nginx:
restart: always
image: nginx:latest
container_name: NGINX
ports:
- "8000:8000"
volumes:
- ./src:/src
- ./config/nginx:/etc/nginx/conf.d
- /static:/static
depends_on:
- web
web:
restart: always
build: .
container_name: DJANGO
command: bash -c "python manage.py makemigrations && python manage.py migrate && gunicorn loop.wsgi -b 0.0.0.0:8000 --reload"
depends_on:
- db
volumes:
- ./src:/src
- /static:/static
expose:
- "8000"
links:
- db
- rabbit
db:
restart: always
image: postgres:latest
container_name: PSQL
rabbit:
hostname: rabbit
image: rabbitmq:latest
ports:
# We forward this port for debugging purposes.
- "5672:5672"
# Here, we can access the rabbitmq management plugin.
- "15672:15672"
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=mypass
# Celery worker
worker:
build: .
command: bash -c "python manage.py celery worker -B --concurrency=1"
volumes:
- .:/src
links:
- db
- rabbit
depends_on:
- rabbit
For testing purpose, I have created a schedule tasks in settings.py as follows:(scheduling for 10 seconds)
CELERYBEAT_SCHEDULE = {
'schedule-task': {
'task': 'myapp.tasks.test_print',
'schedule': 10, # in seconds
},
}
and the tasts.py file inside myapp has following code:
# -*- coding: utf-8 -*-
from celery.task import task
#task(ignore_result=True, max_retries=1, default_retry_delay=10)
def test_print():
print ("Print from celery task")
When I run command : docker-compose run web python manage.py celery worker -B --concurrency=1
The test_print is executed and the output is printed.
But when I run docker-compose up --build, the following output is printed. But, the tasks test_print is not executed?
rabbit_1 | =INFO REPORT==== 15-Jun-2017::13:05:30 ===
rabbit_1 | accepting AMQP connection <0.354.0> (172.18.0.7:51400 -> 172.18.0.3:5672)
rabbit_1 | =INFO REPORT==== 15-Jun-2017::12:54:25 ===
rabbit_1 | connection <0.421.0> (172.18.0.6:52052 -> 172.18.0.3:5672): user 'admin' authenticated and granted access to vhost '/'
I am new to docker. Any guidance will be very helpful.