Why other docker containers do not see the accepted migrations? - django

docker-compose.yml
version: '3'
services:
# Django web server
web:
volumes:
- "./app/back:/app"
- "../front/public/static:/app/static"
- "./phantomjs-2.1.1:/app/phantomjs"
build:
context: .
dockerfile: dockerfile_django
#command: python manage.py runserver 0.0.0.0:8080
#command: ["uwsgi", "--ini", "/app/back/uwsgi.ini"]
ports:
- "8080:8080"
links:
- async
- ws_server
- mysql
- redis
async:
volumes:
- "./app/async_web:/app"
build:
context: .
dockerfile: dockerfile_async
ports:
- "8070:8070"
# Aiohtp web socket server
ws_server:
volumes:
- "./app/ws_server:/app"
build:
context: .
dockerfile: dockerfile_ws_server
ports:
- "8060:8060"
# MySQL db
mysql:
image: mysql/mysql-server:5.7
volumes:
- "./db_mysql:/var/lib/mysql"
- "./my.cnf:/etc/my.cnf"
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_USER: user_b520
MYSQL_PASSWORD: buzz_17KN
MYSQL_DATABASE: dev_NT_pr
MYSQL_PORT: 3306
ports:
- "3300:3306"
# Redis
redis:
image: redis:4.0.6
build:
context: .
dockerfile: dockerfile_redis
volumes:
- "./redis.conf:/usr/local/etc/redis/redis.conf"
ports:
- "6379:6379"
# Celery worker
celery:
build:
context: .
dockerfile: dockerfile_celery
command: celery -A backend worker -l info --concurrency=20
volumes:
- "./app/back:/app"
- "../front/public/static:/app/static"
links:
- redis
# Celery beat
beat:
build:
context: .
dockerfile: dockerfile_beat
command: celery -A backend beat
volumes:
- "./app/back:/app"
- "../front/public/static:/app/static"
links:
- redis
# Flower monitoring
flower:
build:
context: .
dockerfile: dockerfile_flower
command: celery -A backend flower
volumes:
- "./app/back:/app"
- "../front/public/static:/app/static"
ports:
- "5555:5555"
links:
- redis
dockerfile_django
FROM python:3.4
RUN mkdir /app
WORKDIR /app
ADD app/back/requirements.txt /app
RUN pip3 install -r requirements.txt
# Apply migrations
CMD ["python", "manage.py", "migrate"]
#CMD python manage.py runserver 0.0.0.0:8080 & cron && tail -f /var/log/cron.log
CMD ["uwsgi", "--ini", "/app/uwsgi.ini"]
In a web container migrations applied and everything is working.
I also added CMD ["python", "manage.py", "migrate"] to dockerfile_celery-flower-beat, but they dont applied.
I restart the container using the command:
docker-compose up --force-recreate
How to make the rest of the containers see the migration?
log
flower_1 | File "/usr/local/lib/python3.4/site-packages/MySQLdb/connections.py", line 292, in query
flower_1 | _mysql.connection.query(self, query)
flower_1 | django.db.utils.OperationalError: (1054, "Unknown column 'api_communities.is_closed' in 'field list'")

Related

Project with django,docker,celery,redis giving error/mainprocess] cannot connect to amqp://guest:**#127.0.0.1:5672//: [errno 111] connection refused

I'm trying to create a Django project with celery and redis for the messaging service using docker-compose. I'm getting Cannot connect to amqp://guest:**#127.0.0.1:5672. I'm not using guest as a user anywhere or 127.0.0.1:5672 and amqp is for RabbitMQ but I'm not using RabbitMQ. So, I don't know if my docker-compose volumes are not set correctly for celery to get the settings, where is it getting amqp from, or is the broker miss configured.
docker-compose.yml:
version: '3'
# network
networks:
data:
management:
volumes:
postgres-data:
redis-data:
services:
nginx:
image: nginx
ports:
- "7001:80"
volumes:
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro
- ../static:/static
command: [nginx-debug, '-g', 'daemon off;']
networks:
- management
depends_on:
- web
db:
image: postgres:14
restart: always
volumes:
- postgres-data:/var/lib/postgresql/data/
- ../data:/docker-entrypoint-initdb.d # import SQL dump
environment:
- POSTGRES_DB=link_checker_db
- POSTGRES_USER=link_checker
- POSTGRES_PASSWORD=passw0rd
networks:
- data
ports:
- "5432:5432"
web:
image: link_checker_backend
build:
context: .
dockerfile: Dockerfile
environment:
- DJANGO_LOG_LEVEL=ERROR
- INITIAL_YAML=/code/initial.yaml
volumes:
- ../:/code
- ../link_checker:/code/link_checker
- ../link_checker_django/:/code/link_checker_django
- ./settings.py:/code/link_checker_django/settings.py
working_dir: /code
command: >
sh -c "
python manage.py migrate --noinput &&
python manage.py collectstatic --no-input &&
python manage.py runserver 0.0.0.0:7000
"
networks:
- data
- management
depends_on:
- db
redis:
image: redis
volumes:
- redis-data:/data
networks:
- data
celery-default:
image: link_checker_backend
volumes:
- ../:/code
- ../link_checker:/code/link_checker
- ../link_checker_django/:/code/link_checker_django
- ./settings.py:/code/link_checker_django/settings.py
working_dir: /code/link_checker
command: celery -A celery worker --pool=prefork --concurrency=30 -l DEBUG
networks:
- data
depends_on:
- db
- redis
celery.py
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "link_checker_django.settings")
app = Celery("link_checker")
app.config_from_object("django.conf:settings")
app.conf.task_create_missing_queues = True
app.autodiscover_tasks()
settings.py
BROKER_URL = "redis://redis:6379/0"
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_TASK_SERIALIZER = "json"
File structure:
link_checker_django
deploy
docker-compose.yml
link_checker
celery.py
link_checker_django
settings.py
manage.py
Thanks, for any help.

Django + Celery + Nginx

I try to run my django application in docker with Celery and Nginx.
Docker-compose
version: '3'
services:
helpdesk_web:
build:
context: ./
dockerfile: Dockerfile
container_name: helpdesk_web
volumes:
- ./static:/usr/src/app/static
- media:/usr/src/app/media
ports:
- "8000:8000"
- "5678:5678"
env_file:
- ./.env
restart: always
depends_on:
- helpdesk_db
- helpdesk_redis
helpdesk_db:
image: postgres
container_name: helpdesk_db
volumes:
- postgres_data:/var/lib/postgresql/data/
env_file:
- ./.env
ports:
- "5432:5432"
environment:
POSTGRES_DB: helpdesk_db
POSTGRES_PASSWORD: itds
POSTGRES_USER: itds
nginx:
build:
context: ./docker/nginx
dockerfile: Dockerfile
container_name: helpdesk_nginx
restart: on-failure
depends_on:
- helpdesk_web
- helpdesk_db
ports:
- "80:80"
volumes:
- ./static:/usr/src/app/static
- media:/usr/src/app/media
helpdesk_redis:
image: redis
ports:
- "6379:6379"
helpdesk_celery:
build:
context: .
dockerfile: Dockerfile
command: celery -A helpdesk worker -l INFO --pool=solo
depends_on:
- helpdesk_web
- helpdesk_redis
helpdesk_celery-beat:
build:
context: .
dockerfile: Dockerfile
command: celery -A helpdesk beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler
depends_on:
- helpdesk_web
- helpdesk_redis
volumes:
postgres_data:
media:
Dockerfile
FROM python:3.10
WORKDIR /usr/src/app
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
RUN pip install --upgrade pip
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
RUN chmod +x entrypoint.sh
ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
entrypoint.sh
#! /bin/sh
if [ "$DATABASE" = "postgres" ]
then
echo "Waiting for postgres..."
while ! nc -z $SQL_HOST $SQL_PORT; do
sleep 0.1
done
echo "PostgreSQL started"
fi
python manage.py migrate --no-input
exec gunicorn helpdesk.wsgi:application -b 0.0.0.0:8000 --workers=$WORKERS_COUNT
When I run docker-compose I get that error
I have dev docker-compose without nginx and it works fine. It seems that there are some problems between celery and nginx. I am a beginner to docker so I don't know what to do.
What am I missing?
EDIT №1:
It's like celery and celery-beat are set as gunicorn.
The Compose command: overrides the Dockerfile CMD, and is passed as arguments to the ENTRYPOINT. A typical setup is to have your entrypoint script do first-time setup, like waiting for a database, but then end with the special shell command exec "$#" to run the command passed to it as arguments.
#!/bin/sh
# Wait for the database, run migrations, set environment variables, ...
if [ "$DATABASE" = "postgres" ]; then ...; fi
python manage.py migrate --no-input
# Run the command passed as arguments, not a fixed command here
exec "$#"
In your Dockerfile declare the default CMD you want the container to run, like GUnicorn.
ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
CMD gunicorn helpdesk.wsgi:application -b 0.0.0.0:8000 --workers=$WORKERS_COUNT
Now it will honor the Compose command:, or another command you docker-compose run.
version: '3.8'
services:
web:
build: .
ports: ['8000:8000', '5678:5678']
env_file: [.env]
restart: always
depends_on: [db, redis]
# and the default image CMD
celery:
build: .
command: celery -A helpdesk worker -l INFO --pool=solo
depends_on: [redis]
celery-beat:
build: .
command: celery -A helpdesk beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler
depends_on: [redis]
db: { ... }
redis: { image: redis }

How can I setup celery beat in docker?

in settings.py
CELERY_TIMEZONE = 'Europe/Minsk'
CELERY_TASK_TRACK_STARTED = True
CELERY_TASK_TIME_LIMIT = 30 * 60
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = os.environ.get('CELERY_BROKER_URL')
CELERY_BROKER_URL = redis://redis:6379
config/celery.py:
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
app = Celery('config')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
app.conf.beat_schedule = {
'pulling-games-to-database': {
'task': 'gamehub.tasks.pull_games',
'schedule': 604800.0,
}
}
docker-compose.yml
version: '3'
services:
db:
build:
context: ./docker/postgres
dockerfile: Dockerfile
env_file:
- ./.env.db
volumes:
- ./docker/postgres/init.sql:/docker-entrypoint-initdb.d/init.sql
restart: always
ports:
- '5432:5432'
redis:
image: redis
ports:
- '6379:6379'
celery:
build: .
command: celery -A config worker -l info
volumes:
- .:/code
depends_on:
- db
- redis
celery-beat:
build: .
command: celery -A config beat -l info
volumes:
- .:/code
depends_on:
- db
- redis
app:
build:
context: ./
dockerfile: Dockerfile
env_file:
- ./.env
volumes:
- ./:/usr/src/app
depends_on:
- db
- redis
ports:
- '8000:8000'
restart: always
nginx:
build:
context: ./docker/nginx
dockerfile: Dockerfile
depends_on:
- app
- db
ports:
- '80:80'
When I run this by
sudo docker-compose build --no-cache
sudo docker-compose up
I do not see any errors. As well as I do not see celery output.
My task puts data to the database periodically. This data must be shown at main page. But it does not.
I'm pretty sure that database is connected because other functions work.
If you need something else to be shown from my project let me know please.
docker-compose.yml:
celeryworker:
image: celeryworker
ports: []
command: /start-celeryworker
celerybeat:
image: celerybeat
ports: []
command: /start-celerybeat
flower:
image: flower
ports:
- "5545:5545"
command: /start-flower
Dockerfile:
COPY ./compose/local/celery/worker/start /start-celeryworker
RUN sed -i 's/\r//' /start-celeryworker
RUN chmod +x /start-celeryworker
COPY ./compose/local/celery/beat/start /start-celerybeat
RUN sed -i 's/\r//' /start-celerybeat
RUN chmod +x /start-celerybeat
COPY ./compose/local/celery/flower/start /start-flower
RUN sed -i 's/\r//' /start-flower
RUN chmod +x /start-flower

Unable to Run Celery and celery beat using docker in django application (Unable to load celery application)

when I am trying to run my application I using without docker its working perfectly , but In docker-compose I am getting this error :
| Error: Invalid value for '-A' / '--app':
| Unable to load celery application.
| The module sampleproject was not found.
my docker-compose file
app:
container_name: myapp
hostname: myapp
build:
context: .
dockerfile: Dockerfile
image: sampleproject
tty: true
command: >
bash -c "
python manage.py migrate &&
python manage.py runserver 0.0.0.0:8000
"
env_file: .env
ports:
- "8000:8000"
volumes:
- .:/project
depends_on:
- database
- redis
redis:
image: redis:alpine
celery:
build:
context: ./
dockerfile: Dockerfile
command: celery -A sampleproject worker -l info
depends_on:
- database
- redis
celery-beat:
build: .
command: celery -A sampleproject beat -l info
depends_on:
- database
- redis
- celery
my Docker file
FROM python:3.8
COPY requirements.txt requirements.txt
RUN pip install -r requirements.txt --no-cache-dir \
&& rm -rf requirements.txt
RUN mkdir /project
WORKDIR /project
my folder structure is something like this :
I had the same problem and with that we found the solution. In fact, celery is right to complain about not being able to run, as it needs an instance of the application.
For that, it is only necessary to add the volume directive in the docker-compose .yaml file, directing to the project folder, in case into the service celery and celery-beat.
Example:
app:
container_name: myapp
hostname: myapp
build:
context: .
dockerfile: Dockerfile
image: sampleproject
tty: true
command: >
bash -c "
python manage.py migrate &&
python manage.py runserver 0.0.0.0:8000
"
env_file: .env
ports:
- "8000:8000"
volumes:
- .:/project
depends_on:
- database
- redis
redis:
image: redis:alpine
celery:
build:
context: ./
dockerfile: Dockerfile
command: celery -A sampleproject worker -l info
volumes:
- .:/project
depends_on:
- database
- redis
celery-beat:
build: .
command: celery -A sampleproject beat -l info
volumes:
- .:/project
depends_on:
- database
- redis
- celery
So when the celery container is executed it will see that there is a volume and will execute the project without any problems.

Issue Dockerising Django + Celery... Unable to load celery application

I'm having a slight issue with spinning up a Dockerised stack of: Django, Redis, Celery on a server backbone of nginx and gunicorn. The Django application connects to an external PostgreSQL database.
I have the following docker-compose.yaml file:
version: '3.7'
services:
web:
build:
context: ./django
dockerfile: Dockerfile
expose:
- "8000"
volumes:
- django-static:/usr/src/app/static
- django-uploads:/usr/src/app/uploads
depends_on:
- redis
links:
- redis:redis
environment:
...
command: /usr/src/app/wave.sh
nginx:
build:
context: nginx
dockerfile: Dockerfile
restart: on-failure
ports:
- 80:80
- 443:443
volumes:
- ssl_data:/etc/resty-auto-ssl
environment:
ALLOWED_DOMAINS: "${STAGING_ALLOWED_DOMAINS}"
SITES: "${STAGING_SITES}"
redis:
restart: always
image: redis:latest
ports:
- '6379:6379'
celery:
build: ./django
command: celery --app=Wave.celery.app worker --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- redis:redis
depends_on:
- web
- redis
- nginx
restart: on-failure
celery-beat:
build: ./django
command: celery --app=Wave.celery.app beat --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- redis:redis
depends_on:
- web
- redis
- nginx
restart: on-failure
volumes:
ssl_data:
django-static:
django-uploads:
However, on utilising:
docker-compose -f docker-compose.wave-api-staging.yml up --build --force-recreate
The wave.sh script is as follows:
#!/bin/sh
rm -rf celerybeat.pid
ln -s /run/shm /dev/shm
python3 manage.py collectstatic --no-input
python3 manage.py migrate
python3 manage.py shell < createsuperuser.py
pip3 freeze
/usr/local/bin/gunicorn Wave.wsgi:application --timeout 3600 --log-level=DEBUG -w 5 -b :8000
Which replaces the following command instruction in the web service:
command: bash -c "rm -rf celerybeat.pid && python3 manage.py makemigrations && python3 manage.py migrate --run-syncdb && python3 manage.py shell < createsuperuser.py && python3 manage.py runserver 0.0.0.0:8000"
celery.py:
import os
import datetime
from celery import Celery
from celery.schedules import crontab
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Wave.settings')
app = Celery('Wave', include=['amq.v1.SurfTaskRunner.demand', 'amq.v1.SurfTaskRunner.periodic'])
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
#app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(
crontab(minute='6,16,26,36,46,56', hour='*', day_of_week='*', day_of_month='*'),
dispatch_surfers_waivers_periodic_task.s(),
)
#app.task(bind=True)
def dispatch_surfers_waivers_periodic_task(self):
Dockerfile:
FROM python:3.7.2-slim
RUN apt-get update && apt-get -y install cron && apt-get -y install nano
ENV PYTHONUNBUFFERED 1
RUN mkdir /usr/src/app
WORKDIR /usr/src/app
COPY ./requirements.txt /usr/src/app/requirements.txt
RUN pip install -r requirements.txt
COPY ./wave.sh /usr/src/app/wave.sh
RUN ["chmod", "+x", "/usr/src/app/wave.sh"]
COPY . /usr/src/app
I receive the following errors from the celery and celery beat services:
celery_1 | Error:
celery_1 | Unable to load celery application.
celery_1 | The module Wave was not found.
celery-beat_1 | Error:
celery-beat_1 | Unable to load celery application.
celery-beat_1 | The module Wave was not found.
I have tried reading and google extensively, but so far have found zero clues as to what could be going wrong here...I'm at the point of completely ditching Celery if I can't get this to work?
However, the same process can be spun up locally without nginx, and with the standard python manage.py runserver command.
Update: here is my local development docker-compose.yml file:
version: '3.7'
services:
django:
build:
context: ./django
dockerfile: Dockerfile
ports:
- 8982:8982
depends_on:
- postgres
- redis
links:
- postgres:postgres
- redis:redis
command: bash -c "rm -rf celerybeat.pid && python3 manage.py makemigrations && python3 manage.py migrate --run-syncdb && python3 manage.py shell < createsuperuser.py && python3 manage.py runserver 0.0.0.0:8982"
volumes: ['./django:/usr/src/app/']
environment:
...
redis:
restart: always
image: redis:latest
ports:
- '6379:6379'
celery:
build: ./django
command: celery -A Wave worker --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- postgres:postgres
- redis:redis
depends_on:
- django
- postgres
- redis
restart: on-failure
celery-beat:
build: ./django
command: celery -A Wave beat --loglevel=DEBUG
environment:
...
volumes:
- './django:/usr/src/app/'
links:
- postgres:postgres
- redis:redis
depends_on:
- django
- postgres
- redis
restart: on-failure
postgres:
build:
context: ./postgres
dockerfile: Dockerfile
ports:
- 5433:5432
expose:
- 5432
environment:
- POSTGRES_DB=conveyor
- POSTGRES_USER=conveyor
- POSTGRES_PASSWORD=89$r55cP%fSDDghypoAsdd