How to startapp from inside docker-compose - django

I have a problem in the steps of testdriven.io The definitive guide to Django Celery, when i want to start a new tdd app if i execute these commands
`
$ docker-compose exec web bash
(container)$ ./manage.py startapp tdd
`
it creates an app that is read only and pycharm cant change read only status. Some solutions say that it is because i created the app from root user but i cannot understand how to solve it. Here are the Dockerfile and the Docker-compose.
version: '3.3'
services:
web:
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: testdriven_web
# '/start' is the shell script used to run the service
command: /start
# this volume is used to map the files and folders on the host to the container
# so if we change code on the host, code in the docker container will also be changed
volumes:
- .:/app
ports:
- 8010:8000
# env_file is used to manage the env variables of our project
env_file:
- ./.env/.dev-sample
depends_on:
- redis
- db
db:
image: postgres:14-alpine
volumes:
- postgres_data:/var/lib/postgresql/data/
environment:
- POSTGRES_DB=hello_django
- POSTGRES_USER=hello_django
- POSTGRES_PASSWORD=hello_django
redis:
image: redis:7-alpine
celery_worker:
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: testdriven_celery_worker
command: /start-celeryworker
volumes:
- .:/app
env_file:
- ./.env/.dev-sample
depends_on:
- redis
- db
celery_beat:
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: testdriven_celery_beat
command: /start-celerybeat
volumes:
- .:/app
env_file:
- ./.env/.dev-sample
depends_on:
- redis
- db
flower:
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: testdriven_celery_flower
command: /start-flower
volumes:
- .:/app
env_file:
- ./.env/.dev-sample
ports:
- 5557:5555
depends_on:
- redis
- db
volumes:
postgres_data:
And the Dockerfile
`
FROM python:3.10-slim-buster
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
RUN apt-get update \
# dependencies for building Python packages
&& apt-get install -y build-essential \
# psycopg2 dependencies
&& apt-get install -y libpq-dev \
# Translations dependencies
&& apt-get install -y gettext \
# Additional dependencies
&& apt-get install -y procps \
# cleaning up unused files
&& apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false \
&& rm -rf /var/lib/apt/lists/*
# Requirements are installed here to ensure they will be cached.
COPY ./requirements.txt /requirements.txt
RUN pip install -r /requirements.txt
COPY ./compose/local/django/entrypoint /entrypoint
RUN sed -i 's/\r$//g' /entrypoint
RUN chmod +x /entrypoint
COPY ./compose/local/django/start /start
RUN sed -i 's/\r$//g' /start
RUN chmod +x /start
COPY ./compose/local/django/celery/worker/start /start-celeryworker
RUN sed -i 's/\r$//g' /start-celeryworker
RUN chmod +x /start-celeryworker
COPY ./compose/local/django/celery/beat/start /start-celerybeat
RUN sed -i 's/\r$//g' /start-celerybeat
RUN chmod +x /start-celerybeat
COPY ./compose/local/django/celery/flower/start /start-flower
RUN sed -i 's/\r$//g' /start-flower
RUN chmod +x /start-flower
WORKDIR /app
ENTRYPOINT ["/entrypoint"]
`
I tried creating a user in the dockerfile using this as a base but i was unsuccessful https://github.com/testdrivenio/django-on-docker/issues/10

Related

Volume doesn't work in a docker container and forgets data

I'm trying to build Django + Docker + PostgreSQL
Dockerfile
FROM python:3.10.0-alpine
WORKDIR /usr/src/app
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
RUN pip install --upgrade pip
RUN apk update
RUN apk add postgresql-dev gcc python3-dev musl-dev
COPY ./requirements.txt .
RUN pip install -r requirements.txt
COPY ./entrypoint.sh .
RUN sed -i 's/\r$//g' /usr/src/app/entrypoint.sh
RUN chmod +x /usr/src/app/entrypoint.sh
COPY . .
ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
docker-compose.yml
version: '3.9'
services:
web:
build: .
command: gunicorn pets.wsgi:application --bind 0.0.0.0:8000
expose:
- 8000
env_file:
- .env
depends_on:
- db
db:
image: postgres:12.0-alpine
volumes:
- postgres_data:/var/lib/postgresql/data/
env_file:
- .env
nginx:
build: ./nginx
ports:
- 1337:80
depends_on:
- web
volumes:
postgres_data:
entrypoint.sh
#!/bin/sh
if [ "$DATABASE" = "postgres" ]
then
echo "Waiting for postgres..."
while ! nc -z $POSTGRES_HOST $POSTGRES_PORT; do
sleep 0.1
done
echo "PostgreSQL started"
fi
cd pets
python manage.py flush --no-input
python manage.py migrate
exec "$#"
So, when the container starts i use docker exec -it <container_id> python manage.py createsuperuser to create a superuser and then add some data to database.
But, when i use docker-compose stop and then running again with the same id the container forgets about the written data and everything has to be repeated.
Shouldn't volume solve this issue?

python collectstatic commad is not ran in Docker compose and gitlab

I am trying to run python manage.py collectstatic , in docker but nothing works, my python project misses some icons, and this command will solve the issue, but I can't know where to place the command, I have read several questions here but no luck.
Below is my docker-compose.ci.stag.yml file:
version: "3.7"
services:
web:
build:
context: .
dockerfile: Dockerfile.staging
cache_from:
- gingregisr*ty.azurecr.io/guio-tag:tag
image: gingregisrty.azurecr.io/guio-tag:tag
expose:
- 7000
env_file: .env
Then my docker-compose.staging.yml file :
version: '3.5'
# sudo docker login -p <password> -u <username>
services:
api:
container_name: api
image: gingregisrty.azurecr.io/guio-tag:tag
ports:
- 7000:7000
restart: unless-stopped
env_file:
- .env
networks:
- app-network
watchtower:
image: containrrr/watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /root/.docker/config.json:/config.json
command: --interval 30
environment:
- WATCHTOWER_CLEANUP=true
networks:
- app-network
nginx-proxy:
container_name: nginx-proxy
image: jwilder/nginx-proxy:0.9
restart: always
ports:
- 443:443
- 90:90
volumes:
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
depends_on:
- api
networks:
- app-network
nginx-proxy-letsencrypt:
image: jrcs/letsencrypt-nginx-proxy-companion
env_file:
- .env.prod.proxy-companion
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- acme:/etc/acme.sh
depends_on:
- nginx-proxy
networks:
- app-network
networks:
app-network:
driver: bridge
volumes:
certs:
html:
vhost:
acme:
then my Docker.staging file :
# ./django-docker/app/Dockerfile
FROM python:3.7.5-buster
# set work directory
WORKDIR /opt/app
# Add current directory code to working directory
ADD . /opt/app/
# set environment variables
# Prevents Python from writing pyc files to disc.
ENV PYTHONDONTWRITEBYTECODE 1
# Prevents Python from buffering stdout and stderr.
ENV PYTHONUNBUFFERED 1
# Copy firebase file
# COPY afro-mobile-test-firebase-adminsdk-cspoa.json
# Install system dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
tzdata \
python3-setuptools \
python3-pip \
python3-dev \
python3-venv \
git \
&& \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# install environment dependencies
RUN pip3 install --upgrade pip
# Install project dependencies
RUN pip3 install -r requirements.txt
EXPOSE 7000
# copy project
COPY . /opt/app/
CMD ["bash", "start-app.sh"]
then my start-app.sh file :
#Run migrations
python manage.py migrate
#run tests
# python manage.py test
# run collect statics
python manage.py collectstatic
echo 'COLLECT STAIIC DONE ********'
echo $PORT
# Start server
# python manage.py runserver 0.0.0.0:$PORT
gunicorn server.wsgi:application --bind 0.0.0.0:$PORT
Am using gitlab ci to automate the pipeline, so here is my gitlab.yml build script:
# Build and Deploy to Azure.
build-dev:
stage: build
before_script:
- export IMAGE=$CI_REGISTRY/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME
script:
- apk add --no-cache bash
- chmod +x ./setup_env.sh
- bash ./setup_env.sh
- docker login $AZ_REGISTRY_IMAGE -u $AZ_USERNAME_REGISTRY -p $AZ_PASSWORD_REGISTRY
- docker pull $AZ_REGISTRY_IMAGE/guio-tag:tag || true
- docker-compose -f docker-compose.ci.stag.yml build
- docker push $AZ_REGISTRY_IMAGE/guio-tag:tag
only:
- develop
- test-branch
The build runs successfully, but am sure python manage.py collectstatic is not ran, how best can I do this ?

testdriven.io: The Definitive Guide to Celery and Django. All containers logs are "Waiting for PostgreSQL to become available..."

I've followed each step of part 1 chapter 4 and literally copy pasted the code as shown.
docker-compose is able to build the containers but I always get the Waiting for PostgresSQL to become available being logged from all the other containers as shown below. docker-compose logs
Following is the output of docker ps -a. From which I can see that all containers are running in their respective ports.
docker ps -a logs
I checked the docker container logs of the db and it shows to be running.
postgres container logs
But, I'm unable to open the Django server on port 8010 nor able to view the flower server on port 5557 because in the container logs I'm getting the message "Waiting for PostgreSQL to become available..."
Someone please help. This issue is killing me. I've tried to view the logs of each container and it's showing it's running, yet I'm not able to view the Django and flower server.
Let me know if you guys need more info.
Thanks!
Tried checking if the DB is up and running at the correct port.
checked if Redis is running.
checked the logs of each running container which points to the same message "Waiting for PostgreSQL to become available..."
psycopg2-binary package was used.
Using psycopg2 instead worked for me.
This issue maybe related to Mac M1
I had the same problem with this tutorial (on mac M1) and it took me many hours. The solution for me was finally to use "alpine" instead of "buster", here is my Dockerfile:
FROM alpine:3.17.1
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
RUN apk update \
&& apk add postgresql-dev gcc python3-dev musl-dev py3-pip bash
# Requirements are installed here to ensure they will be cached.
COPY ./requirements.txt /requirements.txt
RUN pip install -r /requirements.txt
COPY ./compose/local/django/entrypoint /entrypoint
RUN sed -i 's/\r$//g' /entrypoint
RUN chmod +x /entrypoint
COPY ./compose/local/django/start /start
RUN sed -i 's/\r$//g' /start
RUN chmod +x /start
COPY ./compose/local/django/celery/worker/start /start-celeryworker
RUN sed -i 's/\r$//g' /start-celeryworker
RUN chmod +x /start-celeryworker
COPY ./compose/local/django/celery/beat/start /start-celerybeat
RUN sed -i 's/\r$//g' /start-celerybeat
RUN chmod +x /start-celerybeat
COPY ./compose/local/django/celery/flower/start /start-flower
RUN sed -i 's/\r$//g' /start-flower
RUN chmod +x /start-flower
WORKDIR /app
ENTRYPOINT ["/entrypoint"]
requirements.txt:
django==4.1.4
celery==5.2.7
redis==4.3.4
flower==1.2.0
psycopg2-binary==2.9.5
And may not directly related, I added start and health checks to the docker compose file, so you don’t need the postgres "ready" polling in the entrypoint script.
Here my docker-compose.yml:
version: '3.8'
services:
web:
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: django_celery_example_web
command: /start
volumes:
- .:/app
ports:
- 8010:8000
env_file:
- ./.env/.dev-sample
depends_on:
redis:
condition: service_started
db:
condition: service_healthy
db:
image: postgres:14.6-alpine
volumes:
- postgres_data:/var/lib/postgresql/data/
environment:
- POSTGRES_DB=hello_django
- POSTGRES_USER=hello_django
- POSTGRES_PASSWORD=hello_django
healthcheck:
test: ["CMD-SHELL", "pg_isready -U hello_django"]
interval: 5s
timeout: 5s
retries: 5
redis:
image: redis:7-alpine
networks:
- default
celery_worker:
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: django_celery_example_celery_worker
command: /start-celeryworker
volumes:
- .:/app
env_file:
- ./.env/.dev-sample
networks:
- default
depends_on:
redis:
condition: service_started
db:
condition: service_healthy
celery_beat:
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: django_celery_example_celery_beat
command: /start-celerybeat
volumes:
- .:/app
env_file:
- ./.env/.dev-sample
depends_on:
redis:
condition: service_started
db:
condition: service_healthy
flower:
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: django_celery_example_celery_flower
command: /start-flower
volumes:
- .:/app
env_file:
- ./.env/.dev-sample
ports:
- 5557:5555
depends_on:
redis:
condition: service_started
db:
condition: service_healthy
volumes:
postgres_data:

How to incorporate django-tailwind into Dockerized cookiecutter-django project?

Apologies for what is likely a very simple question, I am pretty new to Docker and am struggling to integrate django-tailwind into my docker project, which was created using cookiecutter-django.
I have tried altering my Dockerfile and local.yml file to follow the Example dockerfiles on the Example app on the django-tailwind github repo but haven't been able make it work. The instructions beyond the example app are not very detailed.
I have quoted my Dockerfile and local.yml file below; would someone be able to advise on what changes I need to make in order to incorporate django-tailwind, or point me in the direction of information that would help me to do it myself?
Dockerfile:
ARG PYTHON_VERSION=3.9-slim-bullseye
# define an alias for the specfic python version used in this file.
FROM python:${PYTHON_VERSION} as python
# Python build stage
FROM python as python-build-stage
ARG BUILD_ENVIRONMENT=local
# Install apt packages
RUN apt-get update && apt-get install --no-install-recommends -y \
# dependencies for building Python packages
build-essential \
# psycopg2 dependencies
libpq-dev
# Requirements are installed here to ensure they will be cached.
COPY ./requirements .
# Create Python Dependency and Sub-Dependency Wheels.
RUN pip wheel --wheel-dir /usr/src/app/wheels \
-r ${BUILD_ENVIRONMENT}.txt
# Python 'run' stage
FROM python as python-run-stage
ARG BUILD_ENVIRONMENT=local
ARG APP_HOME=/app
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
ENV BUILD_ENV ${BUILD_ENVIRONMENT}
WORKDIR ${APP_HOME}
# Install required system dependencies
RUN apt-get update && apt-get install --no-install-recommends -y \
# psycopg2 dependencies
libpq-dev \
# Translations dependencies
gettext \
# cleaning up unused files
&& apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false \
&& rm -rf /var/lib/apt/lists/*
# All absolute dir copies ignore workdir instruction. All relative dir copies are wrt to the workdir instruction
# copy python dependency wheels from python-build-stage
COPY --from=python-build-stage /usr/src/app/wheels /wheels/
# use wheels to install python dependencies
RUN pip install --no-cache-dir --no-index --find-links=/wheels/ /wheels/* \
&& rm -rf /wheels/
COPY ./compose/production/django/entrypoint /entrypoint
RUN sed -i 's/\r$//g' /entrypoint
RUN chmod +x /entrypoint
COPY ./compose/local/django/start /start
RUN sed -i 's/\r$//g' /start
RUN chmod +x /start
COPY ./compose/local/django/celery/worker/start /start-celeryworker
RUN sed -i 's/\r$//g' /start-celeryworker
RUN chmod +x /start-celeryworker
COPY ./compose/local/django/celery/beat/start /start-celerybeat
RUN sed -i 's/\r$//g' /start-celerybeat
RUN chmod +x /start-celerybeat
COPY ./compose/local/django/celery/flower/start /start-flower
RUN sed -i 's/\r$//g' /start-flower
RUN chmod +x /start-flower
# copy application code to WORKDIR
COPY . ${APP_HOME}
ENTRYPOINT ["/entrypoint"]
local.yml
version: '3'
volumes:
uniqued_local_postgres_data: {}
uniqued_local_postgres_data_backups: {}
services:
django: &django
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: uniqued_local_django
container_name: uniqued_local_django
platform: linux/x86_64
depends_on:
- postgres
- redis
volumes:
- .:/app:z
env_file:
- ./.envs/.local/.django
- ./.envs/.local/.postgres
ports:
- "8000:8000"
command: /start
postgres:
build:
context: .
dockerfile: ./compose/production/postgres/Dockerfile
image: uniqued_production_postgres
container_name: uniqued_local_postgres
volumes:
- uniqued_local_postgres_data:/var/lib/postgresql/data:Z
- uniqued_local_postgres_data_backups:/backups:z
env_file:
- ./.envs/.local/.postgres
docs:
image: uniqued_local_docs
container_name: uniqued_local_docs
platform: linux/x86_64
build:
context: .
dockerfile: ./compose/local/docs/Dockerfile
env_file:
- ./.envs/.local/.django
volumes:
- ./docs:/docs:z
- ./config:/app/config:z
- ./uniqued:/app/uniqued:z
ports:
- "9000:9000"
command: /start-docs
redis:
image: redis:6
container_name: uniqued_local_redis
celeryworker:
<<: *django
image: uniqued_local_celeryworker
container_name: uniqued_local_celeryworker
depends_on:
- redis
- postgres
ports: []
command: /start-celeryworker
celerybeat:
<<: *django
image: uniqued_local_celerybeat
container_name: uniqued_local_celerybeat
depends_on:
- redis
- postgres
ports: []
command: /start-celerybeat
flower:
<<: *django
image: uniqued_local_flower
container_name: uniqued_local_flower
ports:
- "5555:5555"
command: /start-flower

Unable to open unix socket in redis - Permission denied while firing up docker container

I am trying to fire up a separate redis container which will work as a broker for celery. Can someone help me with as to why the docker user is not able to open the UNIX socket. I have even tried making the user as root but it doesn't seem to work. Please find below the Dockerfile, docker-compose file and redis.conf file.
Dockerfile:
FROM centos/python-36-centos7
USER root
ENV DockerHOME=/home/django
RUN mkdir -p $DockerHOME
ENV PYTHONWRITEBYCODE 1
ENV PYTHONUNBUFFERED 1
ENV PATH=/home/django/.local/bin:$PATH
COPY ./oracle-instantclient18.3-basiclite-18.3.0.0.0-3.x86_64.rpm /home/django
COPY ./oracle-instantclient18.3-basiclite-18.3.0.0.0-3.x86_64.rpm /home/django
COPY ./oracle.conf /home/django
RUN yum install -y dnf
RUN dnf install -y libaio libaio-devel
RUN rpm -i /home/django/oracle-instantclient18.3-basiclite-18.3.0.0.0-3.x86_64.rpm && \
cp /home/django/oracle.conf /etc/ld.so.conf.d/ && \
ldconfig && \
ldconfig -p | grep client64
COPY ./requirements /home/django/requirements
WORKDIR /home/django
RUN pip install --upgrade pip
RUN pip install --no-cache-dir -r ./requirements/development.txt
COPY . .
RUN chmod 777 /home/django
EXPOSE 8000
ENTRYPOINT ["/bin/bash", "-e", "docker-entrypoint.sh"]
Docker-compose file:
version : '3.8'
services:
app:
build: .
volumes:
- .:/django
- cache:/var/run/redis
image: app_name:django
container_name: app_name
ports:
- 8000:8000
depends_on:
- db
- redis
db:
image: postgres:10.0-alpine
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- 5432:5432
environment:
- POSTGRES_USER=app_name
- POSTGRES_PASSWORD=app_password
- POSTGRES_DB=app_db
labels:
description : "Postgres Database"
container_name: app_name-db-1
redis:
image: redis:alpine
command: redis-server /etc/redis/redis.conf
restart: unless-stopped
ports:
- 6379:6379
volumes:
- ./redis/data:/var/lib/redis
- ./redis/redis-server.log:/var/log/redis/redis-server.log
- cache:/var/run/redis/
- ./redis/redis.conf:/etc/redis/redis.conf
container_name: redis
healthcheck:
test: redis-cli ping
interval: 1s
timeout: 3s
retries: 30
volumes:
postgres_data:
cache:
static-volume:
docker-entrypoint.sh:
# run migration first
python manage.py migrate
python manage.py preload_sites -uvasas -l
python manage.py preload_endpoints -uvasas -l
python manage.py collectstatic --noinput
#start celery
export C_FORCE_ROOT='true'
celery multi start 1 -A realm -l INFO -c4
# start the server
python manage.py runserver 0:8000
redis.conf
unixsocket /var/run/redis/redis.sock
unixsocketperm 770
logfile /var/log/redis/redis-server.log
I am new to docker so apologies if I have not done something very obvious or if I have not followed some of the best practices.