can't access video device from Docker Container - django

Here is my DockerFile:
FROM python:3.10.4
LABEL maintainer="Louis Jefferson Zhang <louis.ljz08#gmail.com>"
# Set environment variables
ENV PIP_DISABLE_PIP_VERSION_CHECK 1
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# Build Dependencies for Opencv, Dlib and Psycopg2
RUN apt-get update && apt-get install -y \
build-essential \
cmake \
libopencv-dev \
libdlib-dev \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
# Installation
RUN pip install opencv-python dlib psycopg2
# MAIN
WORKDIR /code
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY ./fr_as .
Here is my Docker Compose:
version: "3.9"
services:
web:
build: .
command: python /code/fr_as/manage.py runserver 0.0.0.0:8000
volumes:
- .:/code
ports:
- 8000:8000
depends_on:
- db
devices:
- /dev/video0:/dev/video0
privileged: true
db:
image: postgres:15
volumes:
- postgres_data:/var/lib/postgresql/data/
environment:
- "POSTGRES_HOST_AUTH_METHOD=trust"
volumes:
postgres_data:
Here is my requirements.txt:
Django
django-cleanup
django-object-actions
django_user_agents
face-recognition
imutils
pillow
I built the container using this command:
docker-compose up -d --build
And an error appeared when running the code below:
cap = cv2.VideoCapture(0)
_, img = cap.read()
Here is the error message:
OpenCV(4.7.0) /io/opencv/modules/imgproc/src/resize.cpp:4062: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
Looking at the Variable Watch Window, The Values for these variables are:
_ = False
img = None

Related

Volume doesn't work in a docker container and forgets data

I'm trying to build Django + Docker + PostgreSQL
Dockerfile
FROM python:3.10.0-alpine
WORKDIR /usr/src/app
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
RUN pip install --upgrade pip
RUN apk update
RUN apk add postgresql-dev gcc python3-dev musl-dev
COPY ./requirements.txt .
RUN pip install -r requirements.txt
COPY ./entrypoint.sh .
RUN sed -i 's/\r$//g' /usr/src/app/entrypoint.sh
RUN chmod +x /usr/src/app/entrypoint.sh
COPY . .
ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
docker-compose.yml
version: '3.9'
services:
web:
build: .
command: gunicorn pets.wsgi:application --bind 0.0.0.0:8000
expose:
- 8000
env_file:
- .env
depends_on:
- db
db:
image: postgres:12.0-alpine
volumes:
- postgres_data:/var/lib/postgresql/data/
env_file:
- .env
nginx:
build: ./nginx
ports:
- 1337:80
depends_on:
- web
volumes:
postgres_data:
entrypoint.sh
#!/bin/sh
if [ "$DATABASE" = "postgres" ]
then
echo "Waiting for postgres..."
while ! nc -z $POSTGRES_HOST $POSTGRES_PORT; do
sleep 0.1
done
echo "PostgreSQL started"
fi
cd pets
python manage.py flush --no-input
python manage.py migrate
exec "$#"
So, when the container starts i use docker exec -it <container_id> python manage.py createsuperuser to create a superuser and then add some data to database.
But, when i use docker-compose stop and then running again with the same id the container forgets about the written data and everything has to be repeated.
Shouldn't volume solve this issue?

python collectstatic commad is not ran in Docker compose and gitlab

I am trying to run python manage.py collectstatic , in docker but nothing works, my python project misses some icons, and this command will solve the issue, but I can't know where to place the command, I have read several questions here but no luck.
Below is my docker-compose.ci.stag.yml file:
version: "3.7"
services:
web:
build:
context: .
dockerfile: Dockerfile.staging
cache_from:
- gingregisr*ty.azurecr.io/guio-tag:tag
image: gingregisrty.azurecr.io/guio-tag:tag
expose:
- 7000
env_file: .env
Then my docker-compose.staging.yml file :
version: '3.5'
# sudo docker login -p <password> -u <username>
services:
api:
container_name: api
image: gingregisrty.azurecr.io/guio-tag:tag
ports:
- 7000:7000
restart: unless-stopped
env_file:
- .env
networks:
- app-network
watchtower:
image: containrrr/watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /root/.docker/config.json:/config.json
command: --interval 30
environment:
- WATCHTOWER_CLEANUP=true
networks:
- app-network
nginx-proxy:
container_name: nginx-proxy
image: jwilder/nginx-proxy:0.9
restart: always
ports:
- 443:443
- 90:90
volumes:
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
depends_on:
- api
networks:
- app-network
nginx-proxy-letsencrypt:
image: jrcs/letsencrypt-nginx-proxy-companion
env_file:
- .env.prod.proxy-companion
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- acme:/etc/acme.sh
depends_on:
- nginx-proxy
networks:
- app-network
networks:
app-network:
driver: bridge
volumes:
certs:
html:
vhost:
acme:
then my Docker.staging file :
# ./django-docker/app/Dockerfile
FROM python:3.7.5-buster
# set work directory
WORKDIR /opt/app
# Add current directory code to working directory
ADD . /opt/app/
# set environment variables
# Prevents Python from writing pyc files to disc.
ENV PYTHONDONTWRITEBYTECODE 1
# Prevents Python from buffering stdout and stderr.
ENV PYTHONUNBUFFERED 1
# Copy firebase file
# COPY afro-mobile-test-firebase-adminsdk-cspoa.json
# Install system dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
tzdata \
python3-setuptools \
python3-pip \
python3-dev \
python3-venv \
git \
&& \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# install environment dependencies
RUN pip3 install --upgrade pip
# Install project dependencies
RUN pip3 install -r requirements.txt
EXPOSE 7000
# copy project
COPY . /opt/app/
CMD ["bash", "start-app.sh"]
then my start-app.sh file :
#Run migrations
python manage.py migrate
#run tests
# python manage.py test
# run collect statics
python manage.py collectstatic
echo 'COLLECT STAIIC DONE ********'
echo $PORT
# Start server
# python manage.py runserver 0.0.0.0:$PORT
gunicorn server.wsgi:application --bind 0.0.0.0:$PORT
Am using gitlab ci to automate the pipeline, so here is my gitlab.yml build script:
# Build and Deploy to Azure.
build-dev:
stage: build
before_script:
- export IMAGE=$CI_REGISTRY/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME
script:
- apk add --no-cache bash
- chmod +x ./setup_env.sh
- bash ./setup_env.sh
- docker login $AZ_REGISTRY_IMAGE -u $AZ_USERNAME_REGISTRY -p $AZ_PASSWORD_REGISTRY
- docker pull $AZ_REGISTRY_IMAGE/guio-tag:tag || true
- docker-compose -f docker-compose.ci.stag.yml build
- docker push $AZ_REGISTRY_IMAGE/guio-tag:tag
only:
- develop
- test-branch
The build runs successfully, but am sure python manage.py collectstatic is not ran, how best can I do this ?

How to incorporate django-tailwind into Dockerized cookiecutter-django project?

Apologies for what is likely a very simple question, I am pretty new to Docker and am struggling to integrate django-tailwind into my docker project, which was created using cookiecutter-django.
I have tried altering my Dockerfile and local.yml file to follow the Example dockerfiles on the Example app on the django-tailwind github repo but haven't been able make it work. The instructions beyond the example app are not very detailed.
I have quoted my Dockerfile and local.yml file below; would someone be able to advise on what changes I need to make in order to incorporate django-tailwind, or point me in the direction of information that would help me to do it myself?
Dockerfile:
ARG PYTHON_VERSION=3.9-slim-bullseye
# define an alias for the specfic python version used in this file.
FROM python:${PYTHON_VERSION} as python
# Python build stage
FROM python as python-build-stage
ARG BUILD_ENVIRONMENT=local
# Install apt packages
RUN apt-get update && apt-get install --no-install-recommends -y \
# dependencies for building Python packages
build-essential \
# psycopg2 dependencies
libpq-dev
# Requirements are installed here to ensure they will be cached.
COPY ./requirements .
# Create Python Dependency and Sub-Dependency Wheels.
RUN pip wheel --wheel-dir /usr/src/app/wheels \
-r ${BUILD_ENVIRONMENT}.txt
# Python 'run' stage
FROM python as python-run-stage
ARG BUILD_ENVIRONMENT=local
ARG APP_HOME=/app
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
ENV BUILD_ENV ${BUILD_ENVIRONMENT}
WORKDIR ${APP_HOME}
# Install required system dependencies
RUN apt-get update && apt-get install --no-install-recommends -y \
# psycopg2 dependencies
libpq-dev \
# Translations dependencies
gettext \
# cleaning up unused files
&& apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false \
&& rm -rf /var/lib/apt/lists/*
# All absolute dir copies ignore workdir instruction. All relative dir copies are wrt to the workdir instruction
# copy python dependency wheels from python-build-stage
COPY --from=python-build-stage /usr/src/app/wheels /wheels/
# use wheels to install python dependencies
RUN pip install --no-cache-dir --no-index --find-links=/wheels/ /wheels/* \
&& rm -rf /wheels/
COPY ./compose/production/django/entrypoint /entrypoint
RUN sed -i 's/\r$//g' /entrypoint
RUN chmod +x /entrypoint
COPY ./compose/local/django/start /start
RUN sed -i 's/\r$//g' /start
RUN chmod +x /start
COPY ./compose/local/django/celery/worker/start /start-celeryworker
RUN sed -i 's/\r$//g' /start-celeryworker
RUN chmod +x /start-celeryworker
COPY ./compose/local/django/celery/beat/start /start-celerybeat
RUN sed -i 's/\r$//g' /start-celerybeat
RUN chmod +x /start-celerybeat
COPY ./compose/local/django/celery/flower/start /start-flower
RUN sed -i 's/\r$//g' /start-flower
RUN chmod +x /start-flower
# copy application code to WORKDIR
COPY . ${APP_HOME}
ENTRYPOINT ["/entrypoint"]
local.yml
version: '3'
volumes:
uniqued_local_postgres_data: {}
uniqued_local_postgres_data_backups: {}
services:
django: &django
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: uniqued_local_django
container_name: uniqued_local_django
platform: linux/x86_64
depends_on:
- postgres
- redis
volumes:
- .:/app:z
env_file:
- ./.envs/.local/.django
- ./.envs/.local/.postgres
ports:
- "8000:8000"
command: /start
postgres:
build:
context: .
dockerfile: ./compose/production/postgres/Dockerfile
image: uniqued_production_postgres
container_name: uniqued_local_postgres
volumes:
- uniqued_local_postgres_data:/var/lib/postgresql/data:Z
- uniqued_local_postgres_data_backups:/backups:z
env_file:
- ./.envs/.local/.postgres
docs:
image: uniqued_local_docs
container_name: uniqued_local_docs
platform: linux/x86_64
build:
context: .
dockerfile: ./compose/local/docs/Dockerfile
env_file:
- ./.envs/.local/.django
volumes:
- ./docs:/docs:z
- ./config:/app/config:z
- ./uniqued:/app/uniqued:z
ports:
- "9000:9000"
command: /start-docs
redis:
image: redis:6
container_name: uniqued_local_redis
celeryworker:
<<: *django
image: uniqued_local_celeryworker
container_name: uniqued_local_celeryworker
depends_on:
- redis
- postgres
ports: []
command: /start-celeryworker
celerybeat:
<<: *django
image: uniqued_local_celerybeat
container_name: uniqued_local_celerybeat
depends_on:
- redis
- postgres
ports: []
command: /start-celerybeat
flower:
<<: *django
image: uniqued_local_flower
container_name: uniqued_local_flower
ports:
- "5555:5555"
command: /start-flower

Unable to open unix socket in redis - Permission denied while firing up docker container

I am trying to fire up a separate redis container which will work as a broker for celery. Can someone help me with as to why the docker user is not able to open the UNIX socket. I have even tried making the user as root but it doesn't seem to work. Please find below the Dockerfile, docker-compose file and redis.conf file.
Dockerfile:
FROM centos/python-36-centos7
USER root
ENV DockerHOME=/home/django
RUN mkdir -p $DockerHOME
ENV PYTHONWRITEBYCODE 1
ENV PYTHONUNBUFFERED 1
ENV PATH=/home/django/.local/bin:$PATH
COPY ./oracle-instantclient18.3-basiclite-18.3.0.0.0-3.x86_64.rpm /home/django
COPY ./oracle-instantclient18.3-basiclite-18.3.0.0.0-3.x86_64.rpm /home/django
COPY ./oracle.conf /home/django
RUN yum install -y dnf
RUN dnf install -y libaio libaio-devel
RUN rpm -i /home/django/oracle-instantclient18.3-basiclite-18.3.0.0.0-3.x86_64.rpm && \
cp /home/django/oracle.conf /etc/ld.so.conf.d/ && \
ldconfig && \
ldconfig -p | grep client64
COPY ./requirements /home/django/requirements
WORKDIR /home/django
RUN pip install --upgrade pip
RUN pip install --no-cache-dir -r ./requirements/development.txt
COPY . .
RUN chmod 777 /home/django
EXPOSE 8000
ENTRYPOINT ["/bin/bash", "-e", "docker-entrypoint.sh"]
Docker-compose file:
version : '3.8'
services:
app:
build: .
volumes:
- .:/django
- cache:/var/run/redis
image: app_name:django
container_name: app_name
ports:
- 8000:8000
depends_on:
- db
- redis
db:
image: postgres:10.0-alpine
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- 5432:5432
environment:
- POSTGRES_USER=app_name
- POSTGRES_PASSWORD=app_password
- POSTGRES_DB=app_db
labels:
description : "Postgres Database"
container_name: app_name-db-1
redis:
image: redis:alpine
command: redis-server /etc/redis/redis.conf
restart: unless-stopped
ports:
- 6379:6379
volumes:
- ./redis/data:/var/lib/redis
- ./redis/redis-server.log:/var/log/redis/redis-server.log
- cache:/var/run/redis/
- ./redis/redis.conf:/etc/redis/redis.conf
container_name: redis
healthcheck:
test: redis-cli ping
interval: 1s
timeout: 3s
retries: 30
volumes:
postgres_data:
cache:
static-volume:
docker-entrypoint.sh:
# run migration first
python manage.py migrate
python manage.py preload_sites -uvasas -l
python manage.py preload_endpoints -uvasas -l
python manage.py collectstatic --noinput
#start celery
export C_FORCE_ROOT='true'
celery multi start 1 -A realm -l INFO -c4
# start the server
python manage.py runserver 0:8000
redis.conf
unixsocket /var/run/redis/redis.sock
unixsocketperm 770
logfile /var/log/redis/redis-server.log
I am new to docker so apologies if I have not done something very obvious or if I have not followed some of the best practices.

Django/Docker: migration not detected and not applied

Stack: Django/Docker/Docker-compose/Postgresql (not in container)
I have made modifications, including models updates, saved and push to my remote Gitlab repository.
Then, I pulled modification from my Gitlab repo on the preprod server and I can see that I have the modified version on the server.
But when I stop and restart the container, it does not detect any changes and does not apply the migrations.
I also checked, the entrypoint.preprod.sh file contains the makemigrations and migrate commands.
I have tried by rebuilding it with docker-compose build then run it, but it doesn't work anymore.
I tried by connecting directly to my container (docker exec -it web sh) but makemigrations are not detected and migrations are therefore not applied.
I must be missing something but what?
docker-compose-preprod.yml
version: '3.7'
services:
web:
restart: always
container_name: virage_web
build:
context: ./app
dockerfile: Dockerfile.preprod
restart: always
command: gunicorn core.wsgi:application --bind 0.0.0.0:8000
volumes:
- app_volume:/usr/src/app
- static_volume:/usr/src/app/static
- media_volume:/usr/src/app/media
expose:
- 8000
env_file:
- ./.env.preprod
entrypoint: [ "/usr/src/app/entrypoint.preprod.sh" ]
depends_on:
- redis
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/"]
interval: 30s
timeout: 10s
retries: 50
redis:
container_name: virage_redis
image: "redis:alpine"
celery:
container_name: virage_celery
build:
context: ./app
dockerfile: Dockerfile.preprod
command: celery -A core worker -l info
volumes:
- app_volume:/usr/src/app
env_file:
- ./.env.preprod
depends_on:
- web
- redis
celery-beat:
container_name: virage_celery-beat
build:
context: ./app
dockerfile: Dockerfile.preprod
command: celery -A core beat -l info
volumes:
- app_volume:/usr/src/app
env_file:
- ./.env.preprod
depends_on:
- web
- redis
nginx:
container_name: virage_nginx
build: ./nginx
restart: always
volumes:
- static_volume:/usr/src/app/static
- media_volume:/usr/src/app/media
ports:
- 1350:80
depends_on:
- web
volumes:
static_volume:
media_volume:
app_volume:
Dockerfile.preprod
# Pull the official base image
FROM python:3.8.3-alpine
# Set a work directory
WORKDIR /usr/src/app
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# install psycopg2 dependencies
RUN apk update && apk add postgresql-dev gcc g++ python3-dev musl-dev
RUN apk --update add libxml2-dev libxslt-dev libffi-dev musl-dev libgcc openssl-dev curl postgresql-client
RUN apk add jpeg-dev zlib-dev freetype-dev lcms2-dev openjpeg-dev tiff-dev tk-dev tcl-dev nano
RUN pip3 install psycopg2 psycopg2-binary
# install xgettext for i18n
RUN apk add gettext
# Install dependencies
COPY requirements/ requirements/
RUN pip install --upgrade pip && pip install -r requirements/preprod.txt
# Copy the entrypoint.sh file
COPY entrypoint.preprod.sh .
# Copy the initdata sql file
COPY initdata.preprod.sql .
# Copy the project's files
COPY . .
RUN chmod +x entrypoint.preprod.sh
entrypoint.preprod.sh
#!/bin/sh
if [ "$DATABASE" = "postgres" ]
then
echo "Waiting for postgres..."
while ! nc -z $SQL_HOST $SQL_PORT; do
sleep 0.1
done
echo "PostgreSQL started"
fi
# python manage.py flush --no-input
python manage.py makemigrations
python manage.py migrate
python manage.py collectstatic --no-input
python manage.py makemessages -l fr
python manage.py compilemessages
exec "$#"