EDIT 22/05/2022
Docker version 20.10.14
docker-compose version 1.25.0
I delete again all containers/images and re-build using docker-compose -f docker-compose.preprod.yml build --force-rm --no-cache but I still observe the same issue: code in not up-to-date in web container
I use Django docker app and do not manage to apply code update to my web container.
I've tried to delete all containers (docker rm -f ID ; docker system prune) and images (docker rmi -f ID ; docker image prune) related to my app and re-build with docker-compose -f docker-comose.preprod.yml build
Then I run docker-compose -f docker-comose.preprod.yml up but for some reasons when I connect to my web running container (docker exec -it web sh) and read my updated files, I observe that update are not applied...
How should I do to make my update applied?
# Pull the official base image
FROM python:3.8.3-alpine
# Set a work directory
WORKDIR /usr/src/app
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# install psycopg2 dependencies
RUN apk update && apk add postgresql-dev gcc g++ python3-dev musl-dev
RUN apk --update add libxml2-dev libxslt-dev libffi-dev musl-dev libgcc openssl-dev curl postgresql-client
RUN apk add jpeg-dev zlib-dev freetype-dev lcms2-dev openjpeg-dev tiff-dev tk-dev tcl-dev nano
RUN pip3 install psycopg2 psycopg2-binary
# install xgettext for i18n
RUN apk add gettext
# Install dependencies
COPY requirements/ requirements/
RUN pip install --upgrade pip && pip install -r requirements/preprod.txt
# Copy the entrypoint.sh file
COPY entrypoint.preprod.sh .
# Copy the initdata sql file
COPY initdata.preprod.sql .
# Copy the project's files
COPY . .
RUN chmod +x entrypoint.preprod.sh
version: '3.7'
services:
web:
restart: always
container_name: ecrf_web
build:
context: ./app
dockerfile: Dockerfile.preprod
command: gunicorn core.wsgi:application --bind 0.0.0.0:8000
volumes:
- app_volume:/usr/src/app
- static_volume:/usr/src/app/static
- media_volume:/usr/src/app/media
expose:
- 8000
env_file:
- ./.env.preprod
entrypoint: [ "/usr/src/app/entrypoint.preprod.sh" ]
depends_on:
- redis
healthcheck:
test: [ "CMD", "curl", "-f", "http://localhost:8000/" ]
interval: 30s
timeout: 10s
retries: 50
redis:
container_name: ecrf_redis
image: "redis:alpine"
celery:
container_name: ecrf_celery
build:
context: ./app
dockerfile: Dockerfile.preprod
command: celery -A core worker -l info
volumes:
- app_volume:/usr/src/app
env_file:
- ./.env.preprod
depends_on:
- web
- redis
celery-beat:
container_name: ecrf_celery-beat
build:
context: ./app
dockerfile: Dockerfile.preprod
command: celery -A core beat -l info
volumes:
- app_volume:/usr/src/app
env_file:
- ./.env.preprod
depends_on:
- web
- redis
nginx:
container_name: ecrf_nginx
build: ./nginx
restart: always
volumes:
- static_volume:/usr/src/app/static
- media_volume:/usr/src/app/media
ports:
- 1370:80
depends_on:
- web
volumes:
static_volume:
media_volume:
app_volume:
Related
I have the following docker-compose file which builds and starts 4 containers one of them is Django container for which I am mounting the /var/run/docker.sock in volumes so that Django container can access the host docker engine.
version: '3.8'
services:
web:
build:
context: ./app
dockerfile: Dockerfile.prod
command: gunicorn hello_django.wsgi:application --bind 0.0.0.0:8000
volumes:
- static_volume:/home/app/web/staticfiles
- media_volume:/home/app/web/mediafiles
- /var/run/docker.sock:/var/run/docker.sock
expose:
- 8000
env_file:
- ./.env.prod
depends_on:
- postgresdb
restart: always
postgresdb:
container_name: postgresdb
image: timescale/timescaledb:latest-pg11
volumes:
- ./:/imports
- postgres_data:/var/lib/postgresql/data/
command: 'postgres -cshared_preload_libraries=timescaledb'
ports:
- "5432:5432"
env_file:
- ./.env.prod.db
restart: always
nginx:
build: ./nginx
volumes:
- static_volume:/home/app/web/staticfiles
- media_volume:/home/app/web/mediafiles
ports:
- 80:80
depends_on:
- web
restart: always
volttron1:
container_name: volttron1
hostname: volttron1
build:
context: ./volttron
dockerfile: Dockerfile
image: volttron/volttron:develop
volumes:
- ./volttron/platform_config.yml:/platform_config.yml
- ./volttron/configs:/home/volttron/configs
- ./volttron/volttronThingCerts:/home/volttron/volttronThingCerts
environment:
- CONFIG=/home/volttron/configs
- LOCAL_USER_ID=1000
network_mode: host
restart: always
mem_limit: 700m
cpus: 1.5
volumes:
postgres_data:
static_volume:
media_volume:
The content of the Docker.prod for django web container is following
###########
# BUILDER #
###########
# pull official base image
FROM python:3.9.6-alpine as builder
# set work directory
WORKDIR /usr/src/app
# set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# install psycopg2 dependencies
RUN apk update && apk add postgresql-dev gcc python3-dev musl-dev
RUN apk add libc-dev
RUN apk add --update-cache
RUN apk add --update alpine-sdk && apk add libffi-dev openssl-dev && apk --no-cache --update add build-base
# lint
RUN pip install -U pip
RUN pip install flake8==3.9.2
COPY . .
RUN flake8 --ignore=E501,F401 ./hello_django
# install dependencies
COPY ./requirements.txt .
RUN pip wheel --no-cache-dir --no-deps --wheel-dir /usr/src/app/wheels -r requirements.txt
#########
# FINAL #
#########
# pull official base image
FROM python:3.9.6-alpine
# create directory for the app user
RUN mkdir -p /home/app
# create the app user
RUN addgroup -S app && adduser -S app -G app
# create the appropriate directories
ENV HOME=/home/app
ENV APP_HOME=/home/app/web
RUN mkdir $APP_HOME
RUN mkdir $APP_HOME/staticfiles
RUN mkdir $APP_HOME/mediafiles
WORKDIR $APP_HOME
# install dependencies
RUN apk update && apk add libpq
COPY --from=builder /usr/src/app/wheels /wheels
COPY --from=builder /usr/src/app/requirements.txt .
RUN pip install --no-cache /wheels/*
# copy entrypoint.prod.sh
COPY ./entrypoint.prod.sh .
RUN sed -i 's/\r$//g' $APP_HOME/entrypoint.prod.sh
RUN chmod +x $APP_HOME/entrypoint.prod.sh
# copy project
COPY . $APP_HOME
# chown all the files to the app user
RUN chown -R app:app $APP_HOME
RUN chmod 666 /var/run/docker.sock
# change to the app user
USER app
# run entrypoint.prod.sh
ENTRYPOINT ["/home/app/web/entrypoint.prod.sh"]
The problem is in the statement RUN chmod 666 /var/run/docker.sock which raises the following error
chmod: cannot access "/var/run/docker.sock": No such file or directory
but why I am getting this error? when I have mounted the /var/run/docker.sock in docker.compose.yml file
You're trying to chmod the docker.sock file when building the image. The volume is only mounted and used when running the container. You'll probably need to change permissions of the socket file on the host if needed.
This may be a simple question but I've just started to learn docker and I am making my first project with it.
I have a django project using celery and Redis. I've made Dockerfile and docker-compose.yml:
Dockerfile
FROM python:3.8
RUN apt-get update && apt-get upgrade -y && apt-get autoremove && apt-get autoclean
RUN apt-get install -y \
libffi-dev \
libssl-dev \
libxml2-dev \
libxslt-dev \
libjpeg-dev \
libfreetype6-dev \
zlib1g-dev \
net-tools
ARG PROJECT=djangoproject
ARG PROJECT_DIR=/var/www/${PROJECT}
RUN mkdir -p $PROJECT_DIR
WORKDIR $PROJECT_DIR
COPY requirements.txt .
RUN pip install -r requirements.txt
EXPOSE 8000
STOPSIGNAL SIGINT
CMD ["python", "manage.py", "runserver", "127.0.0.1:8000"]
Docker-compose.yml:
version: "3"
services:
redis:
image: redis:latest
container_name: rd01
ports:
- '6379:6379'
restart: always
expose:
- '6379'
django:
container_name: django_server
build:
context: .
dockerfile: Dockerfile
image: docker_tutorial_django
volumes:
- ./parser_folder:/var/www/djangoproject
ports:
- "8000:8000"
links:
- redis
depends_on:
- celery
celery:
build: .
command: celery -A Parsing worker -B --loglevel=DEBUG
volumes:
- ./parser_folder:/var/www/djangoproject
links:
- redis
When I execute docker-compose up I get an error consumer: Cannot connect to redis://localhost:6379//: Error 99 connecting to localhost:6379. Cannot assign requested address..
I tried to change ports and write the command for Redis in docker-compose.yml but it won't working. Help me to figure it out please
I want to use docker to publish my Django project.
I have create a docker-compose.yml file, a .dockerignore and a Dockerfile like this one:
FROM python:3.6-alpine
RUN apk add --no-cache gcc musl-dev linux-headers
RUN apk update && apk add postgresql-dev gcc python3-dev musl-dev
RUN mkdir /code
COPY requirements.txt /code
WORKDIR /code
RUN pip install --upgrade pip
RUN pip install -r requirements.txt
COPY . .
CMD ["python", "manage.py", "runserver", "127.0.0.1:8000"]
When I first run docker compose I get an error installing a package contained in my requirements.txt file, at this point, I remove the packages from my file and run:
docker-compose down
docker-compose build --no-cache
here my docker-compose.yml
version: '3'
networks:
mynetwork:
driver: bridge
services:
db:
image: postgres
restart: always
ports:
- "5432:5432"
networks:
- mynetwork
environment:
POSTGRES_USER: myuser
POSTGRES_PASSWORD: mypass
POSTGRES_DB: mydb
volumes:
- ./data:/var/lib/postgresql/data
web:
build: .
networks:
- mynetwork
volumes:
- .:/DEV
ports:
- "8000:8000"
depends_on:
- db
When I proceed to execute pip install -r requirements.txt there is again the package in file that execute causing an issue... how can I clear the cache and use my new saved requirements.txt file?
When I start docker-compose build I have 60 gigs free. I run out of space before it finishes. Any idea what could possibly be going on?
I'm running latest of Docker for Mac and docker-compose
here's my docker-compose file:
version: '3'
services:
db:
image: postgres:9.6-alpine
volumes:
- data:/var/lib/postgresql/data
ports:
- 5432:5432
web:
image: python:3.6-alpine
command: ./waitforit.sh solr:8983 db:5432 -- bash -c "./init.sh"
build: .
env_file: ./.env
volumes:
- .:/sark
- solrcores:/solr
ports:
- 8000:8000
links:
- db
- solr
restart: always
solr:
image: solr:6-alpine
ports:
- 8983:8983
entrypoint:
- docker-entrypoint.sh
- solr-precreate
- sark
volumes:
- solrcores:/opt/solr/server/solr/mycores
volumes:
data:
solrcores:
and my dockerfile for the "web" image:
FROM python:3
# Some stuff that everyone has been copy-pasting
# since the dawn of time.
ENV PYTHONUNBUFFERED 1
# Install some necessary things.
RUN apt-get update
RUN apt-get install -y swig libssl-dev dpkg-dev netcat
# Copy all our files into the image.
RUN mkdir /sark
WORKDIR /sark
COPY . /sark/
# Install our requirements.
RUN pip install -U pip
RUN pip install -Ur requirements.txt
This image itself when built is ~3 gigs.
I'm pretty flummoxed.
Good friends, I am developing an application in django 1.11 with docker on windows, recently update the git repository of the project and also made some changes with docker containers.
The problem is that when entering the main page and some other URLS nothing happens, but when I try to login to the administrator, the django container is closed and I do not even get any error by the browser, console or log .
Example:
When I come in here they are fine
GET / 200 OK
POST / 403 Forbidden
GET / api / auth / 405 Method not allowed
But when I enter these, without showing any message, close the docker container (proyect_django_1 exited with code 0)
GET / admin No answer
POST / api / auth / No answer
My docker-compose
version: '3'
services:
db:
build: docker/postgres
volumes:
- ./docker/data/postgres:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=postgres
- POSTGRES_USER=postgres
- POSTGRES_DB=project
redis:
image: redis:3.2-alpine
volumes:
- ./docker/data/redis:/data
rabbit:
image: rabbitmq:3-management-alpine
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=admin
django:
build:
context: .
args:
- REQUIREMENTS=development.txt
command: python3.6 manage.py runserver 0.0.0.0:8008
volumes:
- ./:/code
working_dir: /code/project
env_file: ./docker/DevelopmentEnv
ports:
- "8008:8008"
links:
- db
- rabbit
- redis
depends_on:
- db
celeryworker:
build:
context: .
args:
- REQUIREMENTS=development.txt
working_dir: /code/project
volumes:
- ./:/code
env_file: ./docker/DevelopmentEnv
links:
- db
- rabbit
command: celery -A config worker -l INFO -Q celery
frontend:
image: node:8.4-alpine
volumes:
- ./:/code
working_dir: /code/frontend
command: ash -c "yarn install --no-bin-links && yarn run build"
socketio:
image: node:8.4-alpine
volumes:
- ./:/code
working_dir: /code/sockets
command: ash -c "yarn install --no-bin-links && yarn start"
ports:
- "3000:3000"
links:
- redis
- django
depends_on:
- redis
My dockerfile
FROM python:3.6.2-alpine3.6
ARG REQUIREMENTS
RUN apk update
RUN apk add postgresql-dev postgresql-client
RUN apk add libffi-dev gcc
RUN apk add musl-dev zlib-dev jpeg-dev
RUN apk add --no-cache --virtual .build-deps-testing \
--repository http://dl-cdn.alpinelinux.org/alpine/edge/testing \
gdal-dev
RUN mkdir /code
ADD ./ /code/
WORKDIR /code
RUN pip3.6 install -r requirements/$REQUIREMENTS
WORKDIR /code/project
You could add restart: always to your django service definition. This will start a new django container if the previous one exits for any reason.
You should be getting some logs about why the process is exiting. Try running docker inspect <container-name> to see if there are any clues about why the process exits. There is probably a bug in your Python code triggered by some URLs, and it causes the process to exit.