I am developing on a windows and trying to run the Django application in Linux container with Gunicorn and Nginx to deploy it to Linux machine in production.
I mostly used this Connect docker python to SQL server with pyodbc post as a guide but I think I have tried every solution found online about this error.
If I ping DB server from container it gets connected so the port 1433 is open and everything should be good to go. But for some reason I'm getting error django.db.utils.ProgrammingError: ('42000', "[42000] [FreeTDS][SQL Server]Login failed for user
Django settings.py
DATABASES = {
'default': {
'ENGINE': "sql_server.pyodbc",
'NAME': 'database1',
'HOST': '123.45.6.78',
'PORT':'1433',
'USER': "user",
'PASSWORD': "pswd",
'OPTIONS': {
"driver": "FreeTDS",
"host_is_server": True,
"unicode_results": True,
"extra_params": "tds_version=7.3",
}
}
}
Dockerfile
# start from an official image
FROM python:3
# arbitrary location choice: you can change the directory
RUN mkdir -p /opt/services/djangoapp/src
WORKDIR /opt/services/djangoapp/src
#Install FreeTDS and dependencies for PyODBC
RUN apt-get update \
&& apt-get install unixodbc -y \
&& apt-get install unixodbc-dev -y \
&& apt-get install freetds-dev -y \
&& apt-get install freetds-bin -y \
&& apt-get install tdsodbc -y \
&& apt-get install --reinstall build-essential -y
# populate "ocbcinst.ini"
RUN echo "[FreeTDS]\n\
TDS_Version = '7.3'\n\
Description = FreeTDS unixODBC Driver\n\
Driver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so\n\
Setup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so" >> /etc/odbcinst.ini
# modify "freetds.conf"
RUN echo "[mssql]\n\
host = 172.30.2.18\n\
port = 1433\n\
tds version = 7.3" >> /etc/freetds/freetds.conf
RUN echo MinProtocol = TLSv1.0 >> /etc/ssl/openssl.cnf
RUN echo CipherString = DEFAULT#SECLEVEL=1 >> /etc/ssl/openssl.cnf
# install our dependencies
# we use --system flag because we don't need an extra virtualenv
COPY Pipfile Pipfile.lock /opt/services/djangoapp/src/
RUN pip install pipenv && pipenv install --system
# copy our project code
COPY . /opt/services/djangoapp/src
# expose the port 8000
EXPOSE 8000
# define the default command to run when starting the container
CMD ["gunicorn", "--chdir", "app", "--bind", ":8000", "config.wsgi:application"]
docker-compose.yml
version: '3'
services:
djangoapp:
build: .
volumes:
- .:/opt/services/djangoapp/src
- /static:/static
networks: # <-- here
- nginx_network
nginx:
image: nginx:1.13
ports:
- 8000:80
volumes:
- ./config/nginx/conf.d:/etc/nginx/conf.d
- /static:/static
depends_on:
- djangoapp
networks: # <-- here
- nginx_network
networks: # <-- and here
nginx_network:
driver: bridge
Pipfile
[[source]]
name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[dev-packages]
[packages]
django = "==2.1.0"
pyodbc = "==4.0.28"
django-pyodbc-azure = "*"
django-datatables-view = "*"
gunicorn = "*"
whitenoise = "*"
[requires]
python_version = "3.8"
I finally solved it myself. Had to add port 1433 to nginx in .yml file.
Like this
nginx:
image: nginx:1.13
ports:
- 8000:80
- 1433:1433
volumes:
- ./config/nginx/conf.d:/etc/nginx/conf.d
- /static:/static
depends_on:
- djangoapp
networks: # <-- here
- nginx_network
Related
Here is my DockerFile:
FROM python:3.10.4
LABEL maintainer="Louis Jefferson Zhang <louis.ljz08#gmail.com>"
# Set environment variables
ENV PIP_DISABLE_PIP_VERSION_CHECK 1
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# Build Dependencies for Opencv, Dlib and Psycopg2
RUN apt-get update && apt-get install -y \
build-essential \
cmake \
libopencv-dev \
libdlib-dev \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
# Installation
RUN pip install opencv-python dlib psycopg2
# MAIN
WORKDIR /code
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY ./fr_as .
Here is my Docker Compose:
version: "3.9"
services:
web:
build: .
command: python /code/fr_as/manage.py runserver 0.0.0.0:8000
volumes:
- .:/code
ports:
- 8000:8000
depends_on:
- db
devices:
- /dev/video0:/dev/video0
privileged: true
db:
image: postgres:15
volumes:
- postgres_data:/var/lib/postgresql/data/
environment:
- "POSTGRES_HOST_AUTH_METHOD=trust"
volumes:
postgres_data:
Here is my requirements.txt:
Django
django-cleanup
django-object-actions
django_user_agents
face-recognition
imutils
pillow
I built the container using this command:
docker-compose up -d --build
And an error appeared when running the code below:
cap = cv2.VideoCapture(0)
_, img = cap.read()
Here is the error message:
OpenCV(4.7.0) /io/opencv/modules/imgproc/src/resize.cpp:4062: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
Looking at the Variable Watch Window, The Values for these variables are:
_ = False
img = None
I am trying to run python manage.py collectstatic , in docker but nothing works, my python project misses some icons, and this command will solve the issue, but I can't know where to place the command, I have read several questions here but no luck.
Below is my docker-compose.ci.stag.yml file:
version: "3.7"
services:
web:
build:
context: .
dockerfile: Dockerfile.staging
cache_from:
- gingregisr*ty.azurecr.io/guio-tag:tag
image: gingregisrty.azurecr.io/guio-tag:tag
expose:
- 7000
env_file: .env
Then my docker-compose.staging.yml file :
version: '3.5'
# sudo docker login -p <password> -u <username>
services:
api:
container_name: api
image: gingregisrty.azurecr.io/guio-tag:tag
ports:
- 7000:7000
restart: unless-stopped
env_file:
- .env
networks:
- app-network
watchtower:
image: containrrr/watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /root/.docker/config.json:/config.json
command: --interval 30
environment:
- WATCHTOWER_CLEANUP=true
networks:
- app-network
nginx-proxy:
container_name: nginx-proxy
image: jwilder/nginx-proxy:0.9
restart: always
ports:
- 443:443
- 90:90
volumes:
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
depends_on:
- api
networks:
- app-network
nginx-proxy-letsencrypt:
image: jrcs/letsencrypt-nginx-proxy-companion
env_file:
- .env.prod.proxy-companion
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- acme:/etc/acme.sh
depends_on:
- nginx-proxy
networks:
- app-network
networks:
app-network:
driver: bridge
volumes:
certs:
html:
vhost:
acme:
then my Docker.staging file :
# ./django-docker/app/Dockerfile
FROM python:3.7.5-buster
# set work directory
WORKDIR /opt/app
# Add current directory code to working directory
ADD . /opt/app/
# set environment variables
# Prevents Python from writing pyc files to disc.
ENV PYTHONDONTWRITEBYTECODE 1
# Prevents Python from buffering stdout and stderr.
ENV PYTHONUNBUFFERED 1
# Copy firebase file
# COPY afro-mobile-test-firebase-adminsdk-cspoa.json
# Install system dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
tzdata \
python3-setuptools \
python3-pip \
python3-dev \
python3-venv \
git \
&& \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# install environment dependencies
RUN pip3 install --upgrade pip
# Install project dependencies
RUN pip3 install -r requirements.txt
EXPOSE 7000
# copy project
COPY . /opt/app/
CMD ["bash", "start-app.sh"]
then my start-app.sh file :
#Run migrations
python manage.py migrate
#run tests
# python manage.py test
# run collect statics
python manage.py collectstatic
echo 'COLLECT STAIIC DONE ********'
echo $PORT
# Start server
# python manage.py runserver 0.0.0.0:$PORT
gunicorn server.wsgi:application --bind 0.0.0.0:$PORT
Am using gitlab ci to automate the pipeline, so here is my gitlab.yml build script:
# Build and Deploy to Azure.
build-dev:
stage: build
before_script:
- export IMAGE=$CI_REGISTRY/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME
script:
- apk add --no-cache bash
- chmod +x ./setup_env.sh
- bash ./setup_env.sh
- docker login $AZ_REGISTRY_IMAGE -u $AZ_USERNAME_REGISTRY -p $AZ_PASSWORD_REGISTRY
- docker pull $AZ_REGISTRY_IMAGE/guio-tag:tag || true
- docker-compose -f docker-compose.ci.stag.yml build
- docker push $AZ_REGISTRY_IMAGE/guio-tag:tag
only:
- develop
- test-branch
The build runs successfully, but am sure python manage.py collectstatic is not ran, how best can I do this ?
Hi i am dockerizing my django postgres application ,whereas i am specifying user as admin but still i am getting connected to postgres db as root and getting errors.
DockerFile
FROM ubuntu:20.04
RUN apt update && apt install python3-pip python3-dev -y
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
WORKDIR /code
COPY requirements.txt /code/
RUN pip3 install --upgrade pip
RUN apt-get install libffi-dev
RUN pip3 install cffi
RUN pip3 install -r requirements.txt
COPY ./entrypoint.sh .
RUN sed -i 's/\r$//g' /code/entrypoint.sh
RUN chmod +x /code/entrypoint.sh
COPY . /code
RUN python3 manage.py collectstatic --no-input
ENTRYPOINT ["/code/entrypoint.sh"]
entrypoint.sh
#!/bin/sh
if [ "$DATABASE" = "postgres" ]
then
echo "Waiting for postgres..."
while ! nc -z $SQL_HOST $SQL_PORT; do
sleep 0.1
done
echo "PostgreSQL started"
fi
exec "$#"
docker-compose.yml
version: "3.3"
services:
db_new:
image: postgres:12.0-alpine
container_name: db_new
ports:
- 5432:5432
volumes:
- postgres_data:/var/lib/postgresql/data/
environment:
- POSTGRES_USER=admin
- POSTGRES_PASSWORD=admin
- POSTGRES_DB=docker2
redis:
image: "redis:alpine"
web:
restart: always
container_name: web
build:
context: .
dockerfile: Dockerfile
command: bash -c "/usr/local/bin/daphne -b 0.0.0.0 -p 8000 setup.asgi:application"
volumes:
- static_volume:/code/static/
- media_volume:/code/media/
ports:
- "8000:8000"
env_file:
- ./.env
depends_on:
- db_new
- redis
celery:
build: .
command: /usr/local/bin/celery -A setup worker -l info
depends_on:
- db_new
- redis
celery-beat:
build: .
command: /usr/local/bin/celery -A setup beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler
depends_on:
- db_new
- redis
nginx:
build: ./nginx
volumes:
- static_volume:/code/static/
- media_volume:/code/media/
ports:
- 80:80
depends_on:
- web
volumes:
postgres_data:
static_volume:
media_volume:
Error:
2021-12-17 10:49:19.602 UTC [27] FATAL: password authentication failed for user "root"
db_new | 2021-12-17 10:49:19.602 UTC [27] DETAIL: Role "root" does not exist.
db_new | Connection matched pg_hba.conf line 95: "host all all all md5"
Guys please help me on this .Thanks for ur help.
When i am specifying the user as admin why its connecting as root user .What i am missing on this.
Could you show us your .env file and the django settings file ?
You should have something like this in your django settings file:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'docker2',
'USER': 'admin',
'PASSWORD': 'admin',
'HOST': 'db_new',
'PORT': '5432',
}
}
I have 3 docker containers web(django) , nginx, db(postgresql)
When I run the following command
docker-compose -f docker-compose.prod.yml exec web python manage.py migrate --noinput
The exact error is:
django.db.utils.OperationalError: could not connect to server: Connection refused
Is the server running on host "localhost" (127.0.0.1) and accepting
TCP/IP connections on port 5432?
could not connect to server: Address not available
Is the server running on host "localhost" (::1) and accepting
TCP/IP connections on port 5432?
docker-compose.prod.yml
version: '3.7'
services:
db:
image: postgres:12.0-alpine
volumes:
- postgres_data:/var/lib/postgresql/data/
env_file:
- ./.envs/.db
web:
build:
context: ./tubscout
dockerfile: Dockerfile.prod
command: gunicorn hello_django.wsgi:application --bind 0.0.0.0:8000
volumes:
- .static_volume:/home/app/web/staticfiles
expose:
- 8000
env_file:
- ./.envs/.prod
depends_on:
- db
nginx:
build: ./nginx
volumes:
- .static_volume:/home/app/web/staticfiles
ports:
- 1337:80
depends_on:
- web
volumes:
postgres_data:
static_volume:
Dockerfile.prod
###########
# BUILDER #
###########
# pull official base image
FROM python:3.8.3-alpine as builder
# set work directory
WORKDIR /usr/src/app
# set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# install psycopg2 dependencies
RUN apk update \
&& apk add postgresql-dev gcc python3-dev musl-dev
# install dependencies
RUN pip install --upgrade pip
COPY ./requirements.txt .
RUN pip wheel --no-cache-dir --no-deps -w /usr/src/app/wheels -r requirements.txt
#########
# FINAL #
#########
# pull official base image
FROM python:3.8.3-alpine
# create directory for the app user
RUN mkdir -p /home/app
# create the app user
RUN addgroup -S app && adduser -S app -G app
# create the appropriate directories
ENV HOME=/home/app
ENV APP_HOME=/home/app/web
RUN mkdir $APP_HOME
RUN mkdir $APP_HOME/staticfiles
WORKDIR $APP_HOME
# install dependencies
RUN apk update && apk add libpq
COPY --from=builder /usr/src/app /wheels
COPY --from=builder /usr/src/app/requirements.txt .
RUN pip install --no-cache /wheels/wheels/*
# copy entrypoint.sh
COPY ./entrypoint.sh $APP_HOME
# copy project
COPY . $APP_HOME
# chown all the files to the app user
RUN chown -R app:app $APP_HOME
# change to the app user
USER app
# run entrypoint.prod.sh
ENTRYPOINT ["/home/app/web/entrypoint.sh"]
settings.py
DATABASES = {
'default': {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
"NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
./.envs/.db
POSTGRES_USER=postgres
POSTGRES_PASSWORD=123456789
POSTGRES_DB=tubscoutdb_prod
./.envs/.prod
DEBUG=0
SECRET_KEY='#yinppohul88coi7*f+1^_*7#o9u#kf-sr*%v(bb7^k5)n_=-h'
DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]
SQL_ENGINE=django.db.backends.postgresql
SQL_DATABASE=tubscoutdb_prod
SQL_USER=postgres
SQL_PASSWORD=123456789
SQL_HOST=localhost
SQL_PORT=5432
DATABASE=postgres
Change SQL_HOST to db in your .envs/.prod file. This will let the Web container reach the DB container and perform the migration.
Docker compose containers can be accessed with their service name from other containers.
Good day, I have a docker-compose.yml that contains the setup for PostgreSQL and a Dockerfile for creating an image in for Django. When I run python manage.py migrate it throws an error:
django.db.utils.OperationalError: could not translate host name "db" to address: Name does not resolve
I'm new to Django, Docker, and PostgreSQL so I've search for some solutions online but no luck.
docker-compose.yml
version: "3"
services:
db:
build: ./postgres
volumes:
- db-data:/var/lib/postgresql/data
ports:
- "5432:5432"
env_file:
- .dev.env
networks:
- backend
redis:
image: "redis:5.0-alpine"
volumes:
- redis-data:/data
ports:
- "6379:6379"
networks:
- backend
networks:
backend:
volumes:
db-data:
redis-data:
Dockerfile
FROM python:3.6-alpine
WORKDIR /usr/src/app
COPY requirements.txt ./
RUN mkdir -p /root/.ssh/
ADD id_rsa /root/.ssh/id_rsa
RUN chmod 700 /root/.ssh/id_rsa
RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing">>/etc/apk/repositories\
&& apk add --no-cache --virtual .build-deps gcc git openssh-client \
&& apk add --no-cache musl-dev \
postgresql-dev \
jpeg-dev \
zlib-dev \
gdal-dev \
proj4 \
openssl-dev \
libmagic \
libffi-dev \
python3-dev \
musl-dev \
&& ssh-keyscan github.com >> ~/.ssh/known_hosts \
&& pip install --no-cache-dir -r requirements.txt
# todo SHOULD NOT DO THE SSH KEYSCAN. SHOULD USE PUBLIC REPO INSTEAD
COPY . .
EXPOSE 8000
VOLUME ["/usr/src/app/storage"]
CMD ["python", "manage.py", "runserver", "0.0.0.0:8000"]
setting.py
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': os.environ['POSTGRES_DB'],
'USER': os.environ['POSTGRES_USER'],
'PASSWORD': os.environ['POSTGRES_PASSWORD'],
'HOST': os.environ.get('POSTGRES_HOST', 'db'),
'PORT': os.environ.get('POSTGRES_PORT', '5432'),
}
}
It should have migrated the database/ connect into it.