Set Up RabbitMQ on Docker with Django - django

I'm trying to set up a Docker container with Django. My Docker file is:
FROM python:3.7-alpine
MAINTAINER Freshness Productions
ENV PYTHONUNBUFFERED 1
COPY ./requirements.txt /requirements.txt
RUN apk add --update --no-cache postgresql-client
RUN apk add --update --no-cache --virtual .tmp-build-deps \
gcc libc-dev linux-headers postgresql-dev
RUN pip install -r /requirements.txt
RUN apk del .tmp-build-deps
RUN mkdir /app
WORKDIR /app
COPY ./app /app
RUN adduser -D user
USER user
RUN rabbitmqctl add_user test testpass1
RUN rabbitmqctl add_vhost myvhost
RUN rabbitmqctl set_permissions -p myvhost test ".*" ".*" ".*"
RUN rabbitmq-server
And my docker-compose.yml is:
version: "3"
services:
app:
build:
context: .
image: &app app
ports:
- "8000:8000"
volumes:
- ./app:/app
command: >
sh -c "python manage.py wait_for_db &&
python manage.py migrate &&
python manage.py runserver 0.0.0.0:8000
export C_FORCE_ROOT='true'
celery -A app beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler"
env_file: &envfile
- env.env
depends_on:
- db
- broker
db:
image: postgres:10-alpine
environment:
- POSTGRES_DB=app
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=supersecretpassword
worker:
build: .
image: *app
restart: "no"
env_file: *envfile
command: sh -c "celery -A app worker --loglevel=info"
volumes:
- ./app:/app
depends_on:
- broker
broker:
image: rabbitmq:3
env_file: *envfile
ports:
- 5672:5672
I don't see what I need to do - other than I'm using two different images in my docker-compose? Does that matter? I get the error:
ERROR/MainProcess] consumer: Cannot connect to amqp://guest:**#127.0.0.1:5672//: [Errno 111] Connection refused.
when I try to run docker-compose up

There you can find this configuration it works perfectly on with RabbitMQ, Python Django, MYQL, and Celery and Celery Beat.
don't forget to up-vote if you like. Thanks.
docker-compose.yml
version: "3.7"
services:
django:
container_name: django_python
image: python:3.6
command: bash -c "pip3 install -r requirements.txt && python manage.py runserver 0.0.0.0:8000"
volumes:
- ${APP_PATH}var/www/api.mylaser.fr/mylazer_backend:/app
- ${APP_PATH}var/www/static.mylaser.fr:/app/static
depends_on:
- mysql
working_dir: /app
environment:
- PYTHONUNBUFFERED=1
- MYSQL_HOST=mysql
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- CELERY_BROKER=${CELERY_BROKER}
mysql:
container_name: python_mysql
image: mariadb:latest
#ports:
# - "3306:3306"
volumes:
- ${APP_PATH}var/mysql:/var/lib/mysql
- ${APP_PATH}etc/mysql:/etc/mysql
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- TZ=Europe/Paris
rabbitmq:
image: rabbitmq:3.8.2-management
container_name: rabbitmq
volumes:
- ${APP_PATH}etc/rabbitmq:/etc/rabbitmq/:rw
- ${APP_PATH}var/rabbitmq:/var/lib/rabbitmq/:rw
- ${APP_PATH}var/log/rabbitmq:/var/log/rabbitmq/:rw
environment:
HOSTNAME: ${RABBITMQ_HOSTNAME}
RABBITMQ_ERLANG_COOKIE: ${RABBITMQ_ERLANG_COOKIE}
RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER}
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS}
ports:
- ${RABBITMQ_PORT}:5672
- ${RABBITMQ_MGT_PORT}:15672
worker:
image: python:3.6
container_name: worker
command: bash -c "pip3 install -r requirements.txt && celery -A ${PROJECT_NAME} worker -l info"
working_dir: /app
volumes:
- ${APP_PATH}var/www/api.mylaser.fr/mylazer_backend:/app
environment:
- PYTHONUNBUFFERED=1
- MYSQL_HOST=mysql
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- CELERY_BROKER=${CELERY_BROKER}
depends_on:
- mysql
- rabbitmq
beat-worker:
image: python:3.6
container_name: beat-worker
command: bash -c "pip3 install -r requirements.txt && celery -A ${PROJECT_NAME} beat -l info"
working_dir: /app
volumes:
- ${APP_PATH}var/www/api.mylaser.fr/mylazer_backend:/app
environment:
- PYTHONUNBUFFERED=1
- MYSQL_HOST=mysql
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- CELERY_BROKER=${CELERY_BROKER}
depends_on:
- mysql
- rabbitmq

Related

Django + Celery + Nginx

I try to run my django application in docker with Celery and Nginx.
Docker-compose
version: '3'
services:
helpdesk_web:
build:
context: ./
dockerfile: Dockerfile
container_name: helpdesk_web
volumes:
- ./static:/usr/src/app/static
- media:/usr/src/app/media
ports:
- "8000:8000"
- "5678:5678"
env_file:
- ./.env
restart: always
depends_on:
- helpdesk_db
- helpdesk_redis
helpdesk_db:
image: postgres
container_name: helpdesk_db
volumes:
- postgres_data:/var/lib/postgresql/data/
env_file:
- ./.env
ports:
- "5432:5432"
environment:
POSTGRES_DB: helpdesk_db
POSTGRES_PASSWORD: itds
POSTGRES_USER: itds
nginx:
build:
context: ./docker/nginx
dockerfile: Dockerfile
container_name: helpdesk_nginx
restart: on-failure
depends_on:
- helpdesk_web
- helpdesk_db
ports:
- "80:80"
volumes:
- ./static:/usr/src/app/static
- media:/usr/src/app/media
helpdesk_redis:
image: redis
ports:
- "6379:6379"
helpdesk_celery:
build:
context: .
dockerfile: Dockerfile
command: celery -A helpdesk worker -l INFO --pool=solo
depends_on:
- helpdesk_web
- helpdesk_redis
helpdesk_celery-beat:
build:
context: .
dockerfile: Dockerfile
command: celery -A helpdesk beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler
depends_on:
- helpdesk_web
- helpdesk_redis
volumes:
postgres_data:
media:
Dockerfile
FROM python:3.10
WORKDIR /usr/src/app
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
RUN pip install --upgrade pip
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
RUN chmod +x entrypoint.sh
ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
entrypoint.sh
#! /bin/sh
if [ "$DATABASE" = "postgres" ]
then
echo "Waiting for postgres..."
while ! nc -z $SQL_HOST $SQL_PORT; do
sleep 0.1
done
echo "PostgreSQL started"
fi
python manage.py migrate --no-input
exec gunicorn helpdesk.wsgi:application -b 0.0.0.0:8000 --workers=$WORKERS_COUNT
When I run docker-compose I get that error
I have dev docker-compose without nginx and it works fine. It seems that there are some problems between celery and nginx. I am a beginner to docker so I don't know what to do.
What am I missing?
EDIT №1:
It's like celery and celery-beat are set as gunicorn.
The Compose command: overrides the Dockerfile CMD, and is passed as arguments to the ENTRYPOINT. A typical setup is to have your entrypoint script do first-time setup, like waiting for a database, but then end with the special shell command exec "$#" to run the command passed to it as arguments.
#!/bin/sh
# Wait for the database, run migrations, set environment variables, ...
if [ "$DATABASE" = "postgres" ]; then ...; fi
python manage.py migrate --no-input
# Run the command passed as arguments, not a fixed command here
exec "$#"
In your Dockerfile declare the default CMD you want the container to run, like GUnicorn.
ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
CMD gunicorn helpdesk.wsgi:application -b 0.0.0.0:8000 --workers=$WORKERS_COUNT
Now it will honor the Compose command:, or another command you docker-compose run.
version: '3.8'
services:
web:
build: .
ports: ['8000:8000', '5678:5678']
env_file: [.env]
restart: always
depends_on: [db, redis]
# and the default image CMD
celery:
build: .
command: celery -A helpdesk worker -l INFO --pool=solo
depends_on: [redis]
celery-beat:
build: .
command: celery -A helpdesk beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler
depends_on: [redis]
db: { ... }
redis: { image: redis }

How do you make a synced volume with Docker, Django and Gunicorn?

Google solutions does not help, I feel like the problem is in me using Gunicorn as local server. I just cant get my volume to sync and update with me changing local files, how do I do that? Force re-build volume ever ytime sounds like something highly inefficient
Tried used Watchtower but had no luck as well
compose.yml
services:
back:
container_name: blog-django
build: ./blog-master
command: gunicorn blog.wsgi:application --bind 0.0.0.0:8000
expose:
- 8000
links:
- db
volumes:
- .:/app
- blog-django:/usr/src/app/
- blog-static:/usr/src/app/static
env_file: ./.env
depends_on:
db:
condition: service_healthy
nginx:
container_name: blog-nginx
build: ./nginx/
ports:
- "1337:80"
volumes:
- blog-static:/usr/src/app/static
links:
- back
depends_on:
- back
db:
container_name: blog-db
image: postgres:14
restart: always
expose:
- "5432"
environment:
- POSTGRES_DB=docker
- POSTGRES_USER=docker
- POSTGRES_PASSWORD=docker
ports:
- "5432:5432"
volumes:
- pgdata:/var/lib/postgresql/data/
healthcheck:
test: ["CMD-SHELL", "pg_isready -U docker"]
interval: 5s
timeout: 5s
retries: 5
mailhog:
container_name: mailhog
image: mailhog/mailhog
#logging:
# driver: 'none' # disable saving logs
expose:
- 1025
ports:
- 1025:1025 # smtp server
- 8025:8025 # web ui
volumes:
blog-django:
blog-static:
pgdata:
Dockerfile
FROM python:3.9.6-alpine
WORKDIR /usr/src/app/
# set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# install psycopg2 dependencies
RUN apk update \
&& apk add postgresql-dev gcc python3-dev musl-dev
RUN pip install --upgrade pip
RUN apk update \
&& apk add postgresql-dev gcc python3-dev musl-dev
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .

psql not found in script for trying to control startup and shutdown order in Docker Compose

I'm trying to make sure my Django app waits for my Postgres db to start so I don't get this error django.db.utils.OperationalError: FATAL: the database system is starting up, I've read this https://docs.docker.com/compose/startup-order/, and here is what I have so far
docker-compose.yml
version: "3.9"
services:
db:
image: postgres
volumes:
- ./data/db:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 5s
retries: 5
environment:
- POSTGRES_DB=${DB_NAME}
- POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASSWORD}
backend:
build: ./backend
command: python3 manage.py runserver
volumes:
- ./backend:/code
ports:
- "8000:8000"
command: ["./wait-for-it.sh", "db", "bash", "entrypoint.sh"]
depends_on:
- db
wait-for-it.sh
#!/bin/sh
# wait-for-it.sh
set -e
host="$1"
shift
cmd="$#"
# postgres
until PGPASSWORD=$DB_PASSWORD psql -h "$host" -U "postgres" -c '\q'; do
>&2 echo "Postgres is unavailable - sleeping"
sleep 1
done
>&2 echo "Postgres is up - executing command"
exec $cmd
Dockerfile
# syntax=docker/dockerfile:1
FROM python:3.9.6-alpine3.14
ENV PYTHONUNBUFFERED=1
WORKDIR /code
COPY requirements.txt /code/
RUN \
apk add --no-cache postgresql-libs && \
apk add --no-cache --virtual .build-deps gcc musl-dev postgresql-dev && \
python3 -m pip install -r requirements.txt --no-cache-dir && \
apk --purge del .build-deps
COPY . /code/
RUN chmod u+x ./wait-for-it.sh
EDIT #1:
This is my directory structure
Root directory
Backend directory
You are trying to combine several different solutions.
First of all, if you use pg_isready, you don't need any custom wait-for-it.sh scripts because pg_isready works great. So just remove your wait-for-it.sh file.
Also if you use healthcheck in the docker-compose.yml, you don't need to run any check scripts manually before running your entrypoint.sh. But you need to add condition to depends_on section. So change your docker-compose.yml to the following:
version: "3.9"
services:
db:
image: postgres
volumes:
- ./data/db:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER}"]
interval: 5s
timeout: 5s
retries: 5
environment:
- POSTGRES_DB=${DB_NAME}
- POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASSWORD}
backend:
build: ./backend
volumes:
- ./backend:/code
ports:
- "8000:8000"
command: entrypoint.sh
depends_on:
db:
condition: service_healthy
Be aware, I also changed test command in healthcheck section and removed first command in backend image.

How to scale Heroku Django Celery app with Docker

I'm trying to deploy my Django app to Heroku and i have several doubts about dynos, workers and deploy configuration.
In my heroku.yml file I have 2 types of processes, one for the web and the other for celery, I would like them to both have only 1 dyno but with several workers and to be scalable if necessary.
heroky.yml:
build:
docker:
web: Dockerfile-django
celery: Dockerfile-django
run:
web: gunicorn project.wsgi --log-file -
celery: celery -A project worker -B --loglevel=INFO
docker-compose.yml:
version: '3.7'
services:
web:
container_name: dilains_django_ctnr
build:
context: .
dockerfile: Dockerfile-django
restart: always
env_file: ./project/project/.env
command: python manage.py check
command: python manage.py runserver 0.0.0.0:8000
volumes:
- ./project:/dilains
depends_on:
- postgres
- redis
ports:
- 8000:8000
networks:
- dilains-ntwk
redis:
container_name: dilains_redis_ctnr
build:
context: .
dockerfile: Dockerfile-redis
volumes:
- ./redis-data:/data
ports:
- 3679:3679
networks:
- dilains-ntwk
celery:
container_name: dilains_celery_ctnr
build:
context: .
dockerfile: Dockerfile-django
restart: always
env_file: ./project/project/.env
command: celery -A project worker -B --loglevel=INFO
volumes:
- ./project:/dilains
depends_on:
- redis
- web
- postgres
networks:
- dilains-ntwk
networks:
dilains-ntwk:
driver: bridge
Dockerfile-django:
FROM python:3.7-alpine
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
RUN apk update && apk add --no-cache bash postgresql postgresql-dev gcc python3-dev musl-dev jpeg-dev zlib-dev libjpeg
RUN mkdir /dilains
COPY ./project /dilains/
COPY ./requirements.txt /dilains/
WORKDIR /dilains
RUN pip install -r requirements.txt
EXPOSE 8000
I tried scale with this commands to scale each process type with 4 workers:
$ heroku ps -a app_name
=== celery (Standard-1X): /bin/sh -c celery\ -A\ project\ worker\ -B\ --loglevel\=INFO (1)
celery.1: up 2020/10/23 08:05:31 +0200 (~ 41m ago)
=== web (Standard-1X): /bin/sh -c gunicorn\ project.wsgi\ --log-file\ - (1)
web.1: up 2020/10/23 08:05:40 +0200 (~ 41m ago)
$ heroku ps:scale web=1 worker=4 -a app_name
$ heroku ps:scale celery=1 worker=4 -a app_name
I'm paying Stardar-1X and tells: number of process type - unlimited and horizontally scaling - yes
Anybody could help please ?

Issue Dockerising Django + Celery... Unable to load celery application

I'm having a slight issue with spinning up a Dockerised stack of: Django, Redis, Celery on a server backbone of nginx and gunicorn. The Django application connects to an external PostgreSQL database.
I have the following docker-compose.yaml file:
version: '3.7'
services:
web:
build:
context: ./django
dockerfile: Dockerfile
expose:
- "8000"
volumes:
- django-static:/usr/src/app/static
- django-uploads:/usr/src/app/uploads
depends_on:
- redis
links:
- redis:redis
environment:
...
command: /usr/src/app/wave.sh
nginx:
build:
context: nginx
dockerfile: Dockerfile
restart: on-failure
ports:
- 80:80
- 443:443
volumes:
- ssl_data:/etc/resty-auto-ssl
environment:
ALLOWED_DOMAINS: "${STAGING_ALLOWED_DOMAINS}"
SITES: "${STAGING_SITES}"
redis:
restart: always
image: redis:latest
ports:
- '6379:6379'
celery:
build: ./django
command: celery --app=Wave.celery.app worker --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- redis:redis
depends_on:
- web
- redis
- nginx
restart: on-failure
celery-beat:
build: ./django
command: celery --app=Wave.celery.app beat --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- redis:redis
depends_on:
- web
- redis
- nginx
restart: on-failure
volumes:
ssl_data:
django-static:
django-uploads:
However, on utilising:
docker-compose -f docker-compose.wave-api-staging.yml up --build --force-recreate
The wave.sh script is as follows:
#!/bin/sh
rm -rf celerybeat.pid
ln -s /run/shm /dev/shm
python3 manage.py collectstatic --no-input
python3 manage.py migrate
python3 manage.py shell < createsuperuser.py
pip3 freeze
/usr/local/bin/gunicorn Wave.wsgi:application --timeout 3600 --log-level=DEBUG -w 5 -b :8000
Which replaces the following command instruction in the web service:
command: bash -c "rm -rf celerybeat.pid && python3 manage.py makemigrations && python3 manage.py migrate --run-syncdb && python3 manage.py shell < createsuperuser.py && python3 manage.py runserver 0.0.0.0:8000"
celery.py:
import os
import datetime
from celery import Celery
from celery.schedules import crontab
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Wave.settings')
app = Celery('Wave', include=['amq.v1.SurfTaskRunner.demand', 'amq.v1.SurfTaskRunner.periodic'])
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
#app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(
crontab(minute='6,16,26,36,46,56', hour='*', day_of_week='*', day_of_month='*'),
dispatch_surfers_waivers_periodic_task.s(),
)
#app.task(bind=True)
def dispatch_surfers_waivers_periodic_task(self):
Dockerfile:
FROM python:3.7.2-slim
RUN apt-get update && apt-get -y install cron && apt-get -y install nano
ENV PYTHONUNBUFFERED 1
RUN mkdir /usr/src/app
WORKDIR /usr/src/app
COPY ./requirements.txt /usr/src/app/requirements.txt
RUN pip install -r requirements.txt
COPY ./wave.sh /usr/src/app/wave.sh
RUN ["chmod", "+x", "/usr/src/app/wave.sh"]
COPY . /usr/src/app
I receive the following errors from the celery and celery beat services:
celery_1 | Error:
celery_1 | Unable to load celery application.
celery_1 | The module Wave was not found.
celery-beat_1 | Error:
celery-beat_1 | Unable to load celery application.
celery-beat_1 | The module Wave was not found.
I have tried reading and google extensively, but so far have found zero clues as to what could be going wrong here...I'm at the point of completely ditching Celery if I can't get this to work?
However, the same process can be spun up locally without nginx, and with the standard python manage.py runserver command.
Update: here is my local development docker-compose.yml file:
version: '3.7'
services:
django:
build:
context: ./django
dockerfile: Dockerfile
ports:
- 8982:8982
depends_on:
- postgres
- redis
links:
- postgres:postgres
- redis:redis
command: bash -c "rm -rf celerybeat.pid && python3 manage.py makemigrations && python3 manage.py migrate --run-syncdb && python3 manage.py shell < createsuperuser.py && python3 manage.py runserver 0.0.0.0:8982"
volumes: ['./django:/usr/src/app/']
environment:
...
redis:
restart: always
image: redis:latest
ports:
- '6379:6379'
celery:
build: ./django
command: celery -A Wave worker --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- postgres:postgres
- redis:redis
depends_on:
- django
- postgres
- redis
restart: on-failure
celery-beat:
build: ./django
command: celery -A Wave beat --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- postgres:postgres
- redis:redis
depends_on:
- django
- postgres
- redis
restart: on-failure
postgres:
build:
context: ./postgres
dockerfile: Dockerfile
ports:
- 5433:5432
expose:
- 5432
environment:
- POSTGRES_DB=conveyor
- POSTGRES_USER=conveyor
- POSTGRES_PASSWORD=89$r55cP%fSDDghypoAsdd