Sorry for my english. I have project in django, In my project i want use celery for background task and now i need set settings in docker for this library. This my docker file:
FROM python:3
MAINTAINER Alex2
RUN apt-get update
# Install wkhtmltopdf
RUN curl -L#o wk.tar.xz https://downloads.wkhtmltopdf.org/0.12/0.12.4/wkhtmltox-0.12.4_linux-generic-amd64.tar.xz \
&& tar xf wk.tar.xz \
&& cp wkhtmltox/bin/wkhtmltopdf /usr/bin \
&& cp wkhtmltox/bin/wkhtmltoimage /usr/bin \
&& rm wk.tar.xz \
&& rm -r wkhtmltox
RUN apt-get install -y cron
# for celery
ENV APP_USER user
ENV APP_ROOT /src
RUN groupadd -r ${APP_USER} \
&& useradd -r -m \
--home-dir ${APP_ROOT} \
-s /usr/sbin/nologin \
-g ${APP_USER} ${APP_USER}
# create directory for application source code
RUN mkdir -p /usr/django/app
COPY requirements.txt /usr/django/app/
WORKDIR /usr/django/app
RUN pip install -r requirements.txt
this my docker-compose.dev
version: '2.0'
services:
web:
build: .
container_name: api_dev
image: img/api_dev
volumes:
- .:/usr/django/app/
- ./static:/static
expose:
- "8001"
env_file: env/dev.env
command: bash django_run.sh
nginx:
build: nginx
container_name: ng_dev
image: img/ng_dev
ports:
- "8001:8001"
volumes:
- ./nginx/dev_api.conf:/etc/nginx/conf.d/api.conf
- .:/usr/django/app/
- ./static:/static
depends_on:
- web
links:
- web:web
db:
image: postgres:latest
container_name: pq01
ports:
- "5432:5432"
redis:
image: redis:latest
container_name: rd01
command: redis-server
ports:
- "8004:8004"
celery:
build: .
container_name: cl01
command: celery worker --app=myapp.celery
volumes:
- .:/usr/django/app/
links:
- db
- redis
and i have this error:
cl01 | User information: uid=0 euid=0 gid=0 egid=0
cl01 |
cl01 | uid=uid, euid=euid, gid=gid, egid=egid,
cl01 | [2018-07-31 16:40:00,207: ERROR/MainProcess] consumer: Cannot connect to redis://redis:8004/0: Error 111 connecting to redis:8004. Connection refused..
cl01 | Trying again in 2.00 seconds...
cl01 |
cl01 | [2018-07-31 16:40:02,211: ERROR/MainProcess] consumer: Cannot connect to redis://redis:8004/0: Error 111 connecting to redis:8004. Connection refused..
cl01 | Trying again in 4.00 seconds...
cl01 |
cl01 | [2018-07-31 16:40:06,217: ERROR/MainProcess] consumer: Cannot connect to redis://redis:8004/0: Error 111 connecting to redis:8004. Connection refused..
cl01 | Trying again in 6.00 seconds...
i cant understand why it not connect. My settings file project
CELERY_BROKER_URL = 'redis://redis:8004/0'
CELERY_RESULT_BACKEND = 'redis://redis:8004/0'
Everything looks like good, but mayby in some file i dont add some settings. Please help me solve this problem
I think the port mapping causes the problem, So, change redis settings in docker-compose.dev file as (removed ports option)
redis:
image: redis:latest
container_name: rd01
command: redis-server
and in your settings.py
CELERY_BROKER_URL = 'redis://redis:6379/0'
CELERY_RESULT_BACKEND = 'redis://redis:6379/0'
You dont have to map the ports unless you are using them in your local envirnment
Related
I try to run my django application in docker with Celery and Nginx.
Docker-compose
version: '3'
services:
helpdesk_web:
build:
context: ./
dockerfile: Dockerfile
container_name: helpdesk_web
volumes:
- ./static:/usr/src/app/static
- media:/usr/src/app/media
ports:
- "8000:8000"
- "5678:5678"
env_file:
- ./.env
restart: always
depends_on:
- helpdesk_db
- helpdesk_redis
helpdesk_db:
image: postgres
container_name: helpdesk_db
volumes:
- postgres_data:/var/lib/postgresql/data/
env_file:
- ./.env
ports:
- "5432:5432"
environment:
POSTGRES_DB: helpdesk_db
POSTGRES_PASSWORD: itds
POSTGRES_USER: itds
nginx:
build:
context: ./docker/nginx
dockerfile: Dockerfile
container_name: helpdesk_nginx
restart: on-failure
depends_on:
- helpdesk_web
- helpdesk_db
ports:
- "80:80"
volumes:
- ./static:/usr/src/app/static
- media:/usr/src/app/media
helpdesk_redis:
image: redis
ports:
- "6379:6379"
helpdesk_celery:
build:
context: .
dockerfile: Dockerfile
command: celery -A helpdesk worker -l INFO --pool=solo
depends_on:
- helpdesk_web
- helpdesk_redis
helpdesk_celery-beat:
build:
context: .
dockerfile: Dockerfile
command: celery -A helpdesk beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler
depends_on:
- helpdesk_web
- helpdesk_redis
volumes:
postgres_data:
media:
Dockerfile
FROM python:3.10
WORKDIR /usr/src/app
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
RUN pip install --upgrade pip
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
RUN chmod +x entrypoint.sh
ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
entrypoint.sh
#! /bin/sh
if [ "$DATABASE" = "postgres" ]
then
echo "Waiting for postgres..."
while ! nc -z $SQL_HOST $SQL_PORT; do
sleep 0.1
done
echo "PostgreSQL started"
fi
python manage.py migrate --no-input
exec gunicorn helpdesk.wsgi:application -b 0.0.0.0:8000 --workers=$WORKERS_COUNT
When I run docker-compose I get that error
I have dev docker-compose without nginx and it works fine. It seems that there are some problems between celery and nginx. I am a beginner to docker so I don't know what to do.
What am I missing?
EDIT №1:
It's like celery and celery-beat are set as gunicorn.
The Compose command: overrides the Dockerfile CMD, and is passed as arguments to the ENTRYPOINT. A typical setup is to have your entrypoint script do first-time setup, like waiting for a database, but then end with the special shell command exec "$#" to run the command passed to it as arguments.
#!/bin/sh
# Wait for the database, run migrations, set environment variables, ...
if [ "$DATABASE" = "postgres" ]; then ...; fi
python manage.py migrate --no-input
# Run the command passed as arguments, not a fixed command here
exec "$#"
In your Dockerfile declare the default CMD you want the container to run, like GUnicorn.
ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
CMD gunicorn helpdesk.wsgi:application -b 0.0.0.0:8000 --workers=$WORKERS_COUNT
Now it will honor the Compose command:, or another command you docker-compose run.
version: '3.8'
services:
web:
build: .
ports: ['8000:8000', '5678:5678']
env_file: [.env]
restart: always
depends_on: [db, redis]
# and the default image CMD
celery:
build: .
command: celery -A helpdesk worker -l INFO --pool=solo
depends_on: [redis]
celery-beat:
build: .
command: celery -A helpdesk beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler
depends_on: [redis]
db: { ... }
redis: { image: redis }
I am trying to fire up a separate redis container which will work as a broker for celery. Can someone help me with as to why the docker user is not able to open the UNIX socket. I have even tried making the user as root but it doesn't seem to work. Please find below the Dockerfile, docker-compose file and redis.conf file.
Dockerfile:
FROM centos/python-36-centos7
USER root
ENV DockerHOME=/home/django
RUN mkdir -p $DockerHOME
ENV PYTHONWRITEBYCODE 1
ENV PYTHONUNBUFFERED 1
ENV PATH=/home/django/.local/bin:$PATH
COPY ./oracle-instantclient18.3-basiclite-18.3.0.0.0-3.x86_64.rpm /home/django
COPY ./oracle-instantclient18.3-basiclite-18.3.0.0.0-3.x86_64.rpm /home/django
COPY ./oracle.conf /home/django
RUN yum install -y dnf
RUN dnf install -y libaio libaio-devel
RUN rpm -i /home/django/oracle-instantclient18.3-basiclite-18.3.0.0.0-3.x86_64.rpm && \
cp /home/django/oracle.conf /etc/ld.so.conf.d/ && \
ldconfig && \
ldconfig -p | grep client64
COPY ./requirements /home/django/requirements
WORKDIR /home/django
RUN pip install --upgrade pip
RUN pip install --no-cache-dir -r ./requirements/development.txt
COPY . .
RUN chmod 777 /home/django
EXPOSE 8000
ENTRYPOINT ["/bin/bash", "-e", "docker-entrypoint.sh"]
Docker-compose file:
version : '3.8'
services:
app:
build: .
volumes:
- .:/django
- cache:/var/run/redis
image: app_name:django
container_name: app_name
ports:
- 8000:8000
depends_on:
- db
- redis
db:
image: postgres:10.0-alpine
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- 5432:5432
environment:
- POSTGRES_USER=app_name
- POSTGRES_PASSWORD=app_password
- POSTGRES_DB=app_db
labels:
description : "Postgres Database"
container_name: app_name-db-1
redis:
image: redis:alpine
command: redis-server /etc/redis/redis.conf
restart: unless-stopped
ports:
- 6379:6379
volumes:
- ./redis/data:/var/lib/redis
- ./redis/redis-server.log:/var/log/redis/redis-server.log
- cache:/var/run/redis/
- ./redis/redis.conf:/etc/redis/redis.conf
container_name: redis
healthcheck:
test: redis-cli ping
interval: 1s
timeout: 3s
retries: 30
volumes:
postgres_data:
cache:
static-volume:
docker-entrypoint.sh:
# run migration first
python manage.py migrate
python manage.py preload_sites -uvasas -l
python manage.py preload_endpoints -uvasas -l
python manage.py collectstatic --noinput
#start celery
export C_FORCE_ROOT='true'
celery multi start 1 -A realm -l INFO -c4
# start the server
python manage.py runserver 0:8000
redis.conf
unixsocket /var/run/redis/redis.sock
unixsocketperm 770
logfile /var/log/redis/redis-server.log
I am new to docker so apologies if I have not done something very obvious or if I have not followed some of the best practices.
Hi i am dockerizing my django postgres application ,whereas i am specifying user as admin but still i am getting connected to postgres db as root and getting errors.
DockerFile
FROM ubuntu:20.04
RUN apt update && apt install python3-pip python3-dev -y
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
WORKDIR /code
COPY requirements.txt /code/
RUN pip3 install --upgrade pip
RUN apt-get install libffi-dev
RUN pip3 install cffi
RUN pip3 install -r requirements.txt
COPY ./entrypoint.sh .
RUN sed -i 's/\r$//g' /code/entrypoint.sh
RUN chmod +x /code/entrypoint.sh
COPY . /code
RUN python3 manage.py collectstatic --no-input
ENTRYPOINT ["/code/entrypoint.sh"]
entrypoint.sh
#!/bin/sh
if [ "$DATABASE" = "postgres" ]
then
echo "Waiting for postgres..."
while ! nc -z $SQL_HOST $SQL_PORT; do
sleep 0.1
done
echo "PostgreSQL started"
fi
exec "$#"
docker-compose.yml
version: "3.3"
services:
db_new:
image: postgres:12.0-alpine
container_name: db_new
ports:
- 5432:5432
volumes:
- postgres_data:/var/lib/postgresql/data/
environment:
- POSTGRES_USER=admin
- POSTGRES_PASSWORD=admin
- POSTGRES_DB=docker2
redis:
image: "redis:alpine"
web:
restart: always
container_name: web
build:
context: .
dockerfile: Dockerfile
command: bash -c "/usr/local/bin/daphne -b 0.0.0.0 -p 8000 setup.asgi:application"
volumes:
- static_volume:/code/static/
- media_volume:/code/media/
ports:
- "8000:8000"
env_file:
- ./.env
depends_on:
- db_new
- redis
celery:
build: .
command: /usr/local/bin/celery -A setup worker -l info
depends_on:
- db_new
- redis
celery-beat:
build: .
command: /usr/local/bin/celery -A setup beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler
depends_on:
- db_new
- redis
nginx:
build: ./nginx
volumes:
- static_volume:/code/static/
- media_volume:/code/media/
ports:
- 80:80
depends_on:
- web
volumes:
postgres_data:
static_volume:
media_volume:
Error:
2021-12-17 10:49:19.602 UTC [27] FATAL: password authentication failed for user "root"
db_new | 2021-12-17 10:49:19.602 UTC [27] DETAIL: Role "root" does not exist.
db_new | Connection matched pg_hba.conf line 95: "host all all all md5"
Guys please help me on this .Thanks for ur help.
When i am specifying the user as admin why its connecting as root user .What i am missing on this.
Could you show us your .env file and the django settings file ?
You should have something like this in your django settings file:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'docker2',
'USER': 'admin',
'PASSWORD': 'admin',
'HOST': 'db_new',
'PORT': '5432',
}
}
I don't achieve to communicate with my database postgres using Docker and Django. Here is my docker-compose.yml :
version: '3'
services:
web:
container_name: web
build:
context: ./web
dockerfile: Dockerfile
command: python manage.py runserver 0.0.0.0:8000
volumes:
- ./web/:/usr/src/web/
ports:
- 8000:8000
- 3000:3000
- 35729:35729
env_file:
- database.env
stdin_open: true
depends_on:
- database
database:
container_name: database
image: postgres
volumes:
- database-data:/var/lib/postgresql/data/
ports:
- 5432:5432
volumes:
database-data:
Here is my database.env :
# database.env
POSTGRES_USERNAME=admin
POSTGRES_PASSWORD=pass
POSTGRES_DBNAME=db
POSTGRES_HOST=database
POSTGRES_PORT=5432
PGUSER=admin
PGPASSWORD=pass
PGDATABASE=db
PGHOST=database
PGPORT=5432
DATABASE=db
SQL_HOST=database
SQL_PORT=5432
And here is my Dockerfile :
# pull official base image
FROM python:3.8.3-alpine
# set work directory
WORKDIR /usr/src/web
# set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# install psycopg2 dependencies
RUN apk update \
&& apk add postgresql-dev gcc python3-dev musl-dev
RUN apk add zlib-dev jpeg-dev gcc musl-dev
# install nodejs
RUN apk add --update nodejs nodejs-npm
# copy project
ADD . .
# install dependencies
RUN pip install --upgrade pip
RUN pip install -r requirements.txt
# run entrypoint.sh
ENTRYPOINT ["sh", "/usr/src/web/entrypoint.sh"]
And there my entrypoint.sh :
#!/bin/sh
if [ "$DATABASE" = "db" ]
then
echo "Waiting for postgres..."
while ! nc -z $SQL_HOST $SQL_PORT; do
sleep 10
done
echo "PostgreSQL started"
fi
exec "$#"
I build the docker using that : docker-compose up -d --build
Then I type that : docker-composexec web npm start --prefix ./front/ .
I can access to the frontent : http://localhost:3000
But when I do docker logs database I got that :
2021-01-18 06:31:49.207 UTC [1] LOG: database system is ready to accept connections
2021-01-18 06:31:51.640 UTC [32] FATAL: password authentication failed for user "admin"
2021-01-18 06:31:51.640 UTC [32] DETAIL: Role "admin" does not exist.
Connection matched pg_hba.conf line 99: "host all all all md5"
Here is the status :
37ee3e314d52 web "sh /usr/src/web/ent…" About a minute ago Up About a minute 0.0.0.0:3000->3000/tcp, 0.0.0.0:8000->8000/tcp, 5432/tcp web
65dfeae57a94 postgres "docker-entrypoint.s…" About a minute ago Up About a minute 0.0.0.0:5432->5432/tcp database
Coud you help me ?
Thank you very much !
It seems like the postgres user you are using doesn't exist. You can add some environment variables to database docker-compose to create those (you probably need to create the database, too), Or you can write some script to create those for the first time.
version: '3'
services:
web:
container_name: web
build:
context: ./web
dockerfile: Dockerfile
command: python manage.py runserver 0.0.0.0:8000
volumes:
- ./web/:/usr/src/web/
ports:
- 8000:8000
- 3000:3000
- 35729:35729
env_file:
- database.env
stdin_open: true
depends_on:
- database
database:
container_name: database
image: postgres
volumes:
- database-data:/var/lib/postgresql/data/
ports:
- 5432:5432
environment:
- POSTGRES_USER=admin
- POSTGRES_PASSWORD=pass
- POSTGRES_DB=db
volumes:
database-data:
About postgres image envs you can check this link .
I'm trying to deploy my Django app to Heroku and i have several doubts about dynos, workers and deploy configuration.
In my heroku.yml file I have 2 types of processes, one for the web and the other for celery, I would like them to both have only 1 dyno but with several workers and to be scalable if necessary.
heroky.yml:
build:
docker:
web: Dockerfile-django
celery: Dockerfile-django
run:
web: gunicorn project.wsgi --log-file -
celery: celery -A project worker -B --loglevel=INFO
docker-compose.yml:
version: '3.7'
services:
web:
container_name: dilains_django_ctnr
build:
context: .
dockerfile: Dockerfile-django
restart: always
env_file: ./project/project/.env
command: python manage.py check
command: python manage.py runserver 0.0.0.0:8000
volumes:
- ./project:/dilains
depends_on:
- postgres
- redis
ports:
- 8000:8000
networks:
- dilains-ntwk
redis:
container_name: dilains_redis_ctnr
build:
context: .
dockerfile: Dockerfile-redis
volumes:
- ./redis-data:/data
ports:
- 3679:3679
networks:
- dilains-ntwk
celery:
container_name: dilains_celery_ctnr
build:
context: .
dockerfile: Dockerfile-django
restart: always
env_file: ./project/project/.env
command: celery -A project worker -B --loglevel=INFO
volumes:
- ./project:/dilains
depends_on:
- redis
- web
- postgres
networks:
- dilains-ntwk
networks:
dilains-ntwk:
driver: bridge
Dockerfile-django:
FROM python:3.7-alpine
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
RUN apk update && apk add --no-cache bash postgresql postgresql-dev gcc python3-dev musl-dev jpeg-dev zlib-dev libjpeg
RUN mkdir /dilains
COPY ./project /dilains/
COPY ./requirements.txt /dilains/
WORKDIR /dilains
RUN pip install -r requirements.txt
EXPOSE 8000
I tried scale with this commands to scale each process type with 4 workers:
$ heroku ps -a app_name
=== celery (Standard-1X): /bin/sh -c celery\ -A\ project\ worker\ -B\ --loglevel\=INFO (1)
celery.1: up 2020/10/23 08:05:31 +0200 (~ 41m ago)
=== web (Standard-1X): /bin/sh -c gunicorn\ project.wsgi\ --log-file\ - (1)
web.1: up 2020/10/23 08:05:40 +0200 (~ 41m ago)
$ heroku ps:scale web=1 worker=4 -a app_name
$ heroku ps:scale celery=1 worker=4 -a app_name
I'm paying Stardar-1X and tells: number of process type - unlimited and horizontally scaling - yes
Anybody could help please ?