How can I connect to docker db from local django?
version: '3'
services:
redis-1:
container_name: redis1
build: ./docker/redis
environment:
X_REDIS_PORT: 7001
networks:
redisnet:
ipv4_address: 10.0.0.11
ports:
- 7001:7001
redis-2:
container_name: redis2
build: ./docker/redis
environment:
X_REDIS_PORT: 7002
networks:
redisnet:
ipv4_address: 10.0.0.12
ports:
- 7002:7002
redis-3:
container_name: redis3
build: ./docker/redis
environment:
X_REDIS_PORT: 7003
networks:
redisnet:
ipv4_address: 10.0.0.13
ports:
- 7003:7003
redis-4:
container_name: redis4
build: ./docker/redis
environment:
X_REDIS_PORT: 7004
networks:
redisnet:
ipv4_address: 10.0.0.14
ports:
- 7004:7004
redis-5:
container_name: redis5
build: ./docker/redis
environment:
X_REDIS_PORT: 7005
networks:
redisnet:
ipv4_address: 10.0.0.15
ports:
- 7005:7005
redis-6:
container_name: redis6
build: ./docker/redis
environment:
X_REDIS_PORT: 7006
networks:
redisnet:
ipv4_address: 10.0.0.16
ports:
- 7006:7006
redis-cluster:
container_name: redis-cluster
image: redis:latest
command: redis-cli -p 7001 --cluster create 10.0.0.11:7001 10.0.0.12:7002 10.0.0.13:7003 10.0.0.14:7004 10.0.0.15:7005 10.0.0.16:7006 --cluster-replicas 1 --cluster-yes
depends_on:
- redis-1
- redis-2
- redis-3
- redis-4
- redis-5
- redis-6
networks:
redisnet:
ipv4_address: 10.0.0.2
predixy:
container_name: predixy
build: ./docker/predixy
depends_on:
- redis-1
- redis-2
- redis-3
- redis-4
- redis-5
- redis-6
ports:
- 7617:7617
volumes:
- ./docker/predixy/conf:/etc/predixy/conf
networks:
redisnet:
ipv4_address: 10.0.0.3
networks:
redisnet:
driver: bridge
ipam:
driver: default
config:
- subnet: 10.0.0.0/16
This is my docker-compose.yml file and I would like to connect the predixy (cluster proxy) floated on port 7617 with django as below.
# settings.py
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://0.0.0.0:7617/",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
}
}
failed to connect to Redis: Connection refused (os error 111)
However, django does not seem to be able to connect to predixy.
For your information, I also upload the predixy file.
# docker/predixy/predixy.conf
################################### GENERAL ####################################
## Predixy configuration file example
## Specify a name for this predixy service
## redis command INFO can get this
Name PredixyExample
## Specify listen address, support IPV4, IPV6, Unix socket
## Examples:
# Bind 127.0.0.1:7617
# Bind 0.0.0.0:7617
# Bind /tmp/predixy
## Default is 0.0.0.0:7617
Bind 10.0.0.3:7617
## Worker threads
WorkerThreads 4
## Memory limit, 0 means unlimited
## Examples:
# MaxMemory 100M
# MaxMemory 1G
# MaxMemory 0
## MaxMemory can change online by CONFIG SET MaxMemory xxx
## Default is 0
MaxMemory 0
## Close the connection after a client is idle for N seconds (0 to disable)
## ClientTimeout can change online by CONFIG SET ClientTimeout N
## Default is 0
ClientTimeout 300
## IO buffer size
## Default is 4096
# BufSize 4096
################################### LOG ########################################
## Log file path
## Unspecify will log to stdout
## Default is Unspecified
Log ./predixy.log
## LogRotate support
## 1d rotate log every day
## nh rotate log every n hours 1 <= n <= 24
## nm rotate log every n minutes 1 <= n <= 1440
## nG rotate log evenry nG bytes
## nM rotate log evenry nM bytes
## time rotate and size rotate can combine eg 1h 2G, means 1h or 2G roate a time
## Examples:
# LogRotate 1d 2G
# LogRotate 1d
## Default is disable LogRotate
## In multi-threads, worker thread log need lock,
## AllowMissLog can reduce lock time for improve performance
## AllowMissLog can change online by CONFIG SET AllowMissLog true|false
## Default is true
# AllowMissLog false
## LogLevelSample, output a log every N
## all level sample can change online by CONFIG SET LogXXXSample N
LogVerbSample 0
LogDebugSample 0
LogInfoSample 10000
LogNoticeSample 1
LogWarnSample 1
LogErrorSample 1
################################### AUTHORITY ##################################
Include auth.conf
################################### SERVERS ####################################
Include cluster.conf
# Include sentinel.conf
# Include try.conf
################################### DATACENTER #################################
## LocalDC specify current machine dc
# LocalDC bj
## see dc.conf
# Include dc.conf
################################### COMMAND ####################################
## Custom command define, see command.conf
#Include command.conf
################################### LATENCY ####################################
## Latency monitor define, see latency.conf
Include latency.conf
## redis cluster server pool define
# cluster.conf
ClusterServerPool {
MasterReadPriority 60
StaticSlaveReadPriority 50
DynamicSlaveReadPriority 50
RefreshInterval 1
ServerTimeout 1
ServerFailureLimit 10
ServerRetryTimeout 1
Servers {
+ 10.0.0.11:7001
+ 10.0.0.12:7002
+ 10.0.0.13:7003
+ 10.0.0.14:7004
+ 10.0.0.15:7005
+ 10.0.0.16:7006
}
}
How can I solve this problem? I'm struggling with this problem.
What I want is to access docker-cli and connect to redis-cli -h 10.0.0.3 -p 7617 so that I can see the data I put in from local django
Replace 0.0.0.0 with predixy as below
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://predixy:7617/",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
}
}
Related
I get this error when running docker-compose up:
ERROR: failed to resolve image name: short-name "caddy:2-alpine" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
Here is my docker-compose.yaml file:
version: "3"
#networks:
# web:
# external: true
# bridge:
# driver: bridge
services:
# CaddyServer reverse proxy
caddy:
restart: always
image: caddy:2-alpine
ports:
- "443:443"
command: caddy reverse-proxy --from https://xxxxxx.com --to http://0.0.0.0:8000
#volumes:
# - /local/path/to/Caddyfile:/path/inside/continer/to/Caddyfile
# networks:
# - web
# - bridge
# Django web app
django:
restart: always
build: .
ports:
- "80:8000"
depends_on:
- pgdb
#environment:
# - url=https://api.backend.example.com
#command: "gunicorn config.wsgi:application --bind 0.0.0.0:8000"
#networks:
# - bridge
pgdb:
image: postgres
container_name: pgdb
environment:
- POSTGRES_DB=xxxxx
- POSTGRES_USER=xxxx
- POSTGRES_PASSWORD=xxxx
volumes:
- pg-data:/var/lib/postgresql/data/
volumes:
pg-data:
I was Getting this error short-name "postgres" did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf.
The problem was my docker was not properly installed
https://www.simplilearn.com/tutorials/docker-tutorial/how-to-install-docker-on-ubuntu
I followed this page and reinstalled docker.
it Solved for me.
I've been trying to debug my kibana-elasticsearch service for a couple of days now.
I want to be able to access my Docker Kibana container ui in a web browser on a separate host but the service is not available?
It only for exploring and testing so I don't need any authentication on it for now. I've locked down the security group to trusted ip addresses.
Both Kibana and Elasticsearch containers are running. I can access Kibana via localhost:5601.
After trawling through loads of posts and documentation I know the issue is with binding the container to a an ip address to make it accessible to external hosts.
my kibana config file:
# Kibana is served by a back end server. This setting specifies the port to use.
#server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "0.0.0.0"
my docker compose file:
version: '3.6'
services:
# creates a fluentd service with mountpoints
fluentd:
container_name: fluentd
user: root
build:
context: .
image: fluentd
ports:
- "9880:9880"
volumes:
# LOCAL_HOST_DIR:CONTAINER_DIR
- /var/lib/docker/containers:/fluentd/log/containers # Example: Reading docker logs on
- ./file:/fluentd/log/files/ #Example: Reading logs from a file
- ./configurations:/fluentd/etc/ #where defualt config file is located
# - ./logs:/output/ # Example:Fluentd will collect logs and store it here for demo
logging:
driver: "local"
# This app sends logs to Fluentd endpoint via HTTP
http-myapp:
container_name: http-myapp
image: alpine
volumes:
- ./http:/app
command: [ /bin/sh , -c , "apk add --no-cache curl && chmod +x /app/http_app.sh && ./app/http_app.sh"]
# write test files to a local volume
file-myapp:
image: alpine
container_name: log-generator
# restart: always
volumes:
- ./file:/app
command: [/bin/sh, -c , "chmod +x /app/app.sh && ./app/app.sh"]
elasticsearch: # port 9200
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.1
container_name: elasticsearch
environment:
- node.name=elasticsearch
# - cluster.initial_master_nodes=elasticsearch
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xmx256m -Xms256m"
- discovery.type=single-node
volumes:
- esdata:/usr/share/elasticsearch/data
ulimits:
memlock:
soft: -1
hard: -1
kibana:
image: docker.elastic.co/kibana/kibana:7.13.1
container_name: kibana
volumes:
- ./kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://elasticsearch:9200
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
SERVER_NAME: kibana
# SERVER.HOST: "0.0.0.0"
volumes:
esdata:
I have a dockerized DRF project with installed NGINX in it. All works fine except one thing:
Django debug toolbar requires INTERNAL_IPS parameter to be specified in settings.py.
For docker I use this one:
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS = [ip[:-1] + "1" for ip in ips]
It also works fine but not with NGINX as NGINX use it’s own ip dynamically(probably?) definded inside or assigned by docker or anything else.
I can get this ip from server logs:
172.19.0.8 - - [09/Oct/2020:17:10:40 +0000] "GET /admin/ HTTP/1.0" 200 6166 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36 OPR/71.0.3770.228"
and add it to setting.py:
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS = [ip[:-1] + "1" for ip in ips]
INTERNAL_IPS.append('172.18.0.8')
but I expect that this ip might be different on different machines etc, so it is not reliable enough.
So that question is -is it possible somehow to get NGINX docker ip in settings.py dynamically or fix docker-compose somehow???
docker-compose:
version: '3.8'
volumes:
postgres_data:
redis_data:
static_volume:
media_volume:
services:
web:
build: .
#command: python /code/manage.py runserver 0.0.0.0:8000
command: gunicorn series.wsgi:application --config ./gunicorn.conf.py
env_file:
- ./series/.env
volumes:
- .:/code
- static_volume:/home/app/web/staticfiles
- media_volume:/home/app/web/mediafiles
# ports:
# - 8000:8000
expose:
- 8000
depends_on:
- db
- redis
- celery
- celery-beat
links:
- db
nginx:
build: ./nginx
volumes:
- static_volume:/home/app/web/staticfiles
- media_volume:/home/app/web/mediafiles
ports:
# - 1337:80
- 8000:80
depends_on:
- web
db:
build:
context: .
dockerfile: postgres.dockerfile
restart: always
env_file:
- ./series/.env
environment:
- POSTGRES_DB=postgres
- POSTGRES_USER=postgres
volumes:
- postgres_data:/var/lib/postgresql/data/
ports:
- target: 5432
published: 5433
protocol: tcp
mode: host
redis:
image: redis:alpine
command: >
redis-server
--appendonly yes
--appendfsync no
--auto-aof-rewrite-percentage 100
--auto-aof-rewrite-min-size 64mb
ports:
- target: 6379
published: 6380
protocol: tcp
mode: host
volumes:
- redis_data:/data
restart: always
environment:
- REDIS_REPLICATION_MODE=master
celery:
build: .
command: celery worker -A series --loglevel=INFO --concurrency=4 -E
restart: always
environment:
- C_FORCE_ROOT=1
volumes:
- .:/code
depends_on:
- db
- redis
hostname: celery-main
celery-beat:
build: .
command: celery -A series beat --loglevel=INFO --pidfile=
restart: always
volumes:
- .:/code
depends_on:
- db
- redis
hostname: celery-beat
flower:
# http://localhost:8888/
image: mher/flower
environment:
- CELERY_BROKER_URL=redis://redis:6379/1
- FLOWER_PORT=8888
depends_on:
- celery
- celery-beat
- redis
restart: always
ports:
- target: 8888
published: 8888
protocol: tcp
mode: host
dockerfile of NGINX:
FROM nginx:1.19.0-alpine
RUN rm /etc/nginx/conf.d/default.conf
COPY nginx.conf /etc/nginx/conf.d
Thank you
Following an approach similar to the latest cookiecutter-django code which adds an internal IP address for gulp, we can add the nginx container's IP address dynamically there as well (no need to hard-code the IP address):
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips]
# Since our requests will be routed to Django via the nginx container, include
# the nginx IP address as internal as well
hostname, _, nginx_ips = socket.gethostbyname_ex("nginx")
INTERNAL_IPS += nginx_ips
In the above code, we use the hostname nginx to match the name of the docker service you specified in your docker-compose file. (I had the same issue as you, and this worked for me.)
Having following configuration placed in settings.py:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql$
'NAME': 'database1', # Or path to database file if $
# The following settings are not used with sqlite3:
'USER': 'database1_role',
'PASSWORD': 'database1_password',
'HOST': 'database1', # Empty for localhost throu$
'PORT': '5432', # Set to empty string for defaul$
'ATOMIC_REQUESTS': True
}
}
docker-compose.yml:
version: '3'
services:
# web container, with django + gunicorn
djangoapp:
build: .
environment:
- DJANGO_SETTINGS_MODULE
volumes:
- .:/opt/services/djangoapp/src
- static:/opt/services/djangoapp/static
- media:/opt/services/djangoapp/media
networks:
- database1_network
- nginx_network
depends_on:
- database1
# reverse proxy container (nginx)
nginx:
image: nginx:1.13
ports:
- 8000:80
volumes:
- ./config/nginx/conf.d:/etc/nginx/conf.d
- static:/opt/services/djangoapp/static
- media:/opt/services/djangoapp/media
networks:
- nginx_network
depends_on:
- djangoapp
# database containers, one for each db
database1:
image: postgres:10
environment:
POSTGRES_USER: database1_role
POSTGRES_PASSWORD: database1_password
POSTGRES_DB: database1
ports:
- "5432"
volumes:
- database1_volume:/var/lib/postgresql/data
networks:
- database1_network
networks:
database1_network:
driver: bridge
nginx_network:
driver: bridge
volumes:
database1_volume:
static:
media:
I still do receive following error communicate whenever I am trying to access my site using webbrowser:
OperationalError at / FATAL: password authentication failed for user
"database1_role"
Previously I used to have a message that my database does not even exist / psycopg2 cannot make any connections with it.
I've solved it by replacing the db name in both files:
settings.py as well as POSTGRES_DB, but right at the moment I literally have no idea of what to do right now.
In order to run this setup I am using these two commands:
docker-compose build
docker-compose up
command:
docker-compose ps
clearly says that there are three components running:
Name Command State Ports
-----------------------------------------------------------------------------------
services_database1_1_f84f6d3c38e0 docker-entrypoint.sh postgres Exit 0
services_djangoapp_1_da56c77d50ff gunicorn -c config/gunicor ... Exit 0
services_nginx_1_c6e0edb717c0 nginx -g daemon off; Exit 0
Currently I use official nginx docker image + my own 'django with uwsgi' build and everything works ok. I want to add SSL to the project using jwilder/nginx-proxy + jrcs/letsencrypt-nginx-proxy-companion.
The structure of my project is the next:
myproject/
| -- data/
| -- media/
| -- static/
| -- sources/
| -- dockerfiles/
| -- nginx/
| -- nginx.conf
| -- uwsgi_params
| -- solr/
| -- default/ (configs)
| -- Dockerfile
| -- web/
| -- Dockerfile
| -- requirements.txt
| -- myproject/
| -- app_1/
| -- app_2/
| -- settings/
| -- myproject_uwsgi.ini
| -- docker-compose.yml
The relative configs are below:
# myproject/sources/docker-compose.yml
version: '2'
services:
nginx:
image: nginx:latest
container_name: myproject_nginx-container
ports:
- "80:80"
depends_on:
- web
volumes:
- ./dockerfiles/nginx:/etc/nginx/conf.d
- ../static:/static
- ../media:/media
web:
build: ./dockerfiles/web/
container_name: myproject_django-container
command: bash -c 'uwsgi --ini ./settings/myproject_uwsgi.ini'
volumes:
- ./web:/web
- ../static:/static
- ../media:/media
solr-docker:
build: ./dockerfiles/solr/
container_name: myproject_solr-container
entrypoint:
- docker-entrypoint.sh
- solr-precreate
- default
ports:
- "8983:8983"
volumes:
- ./dockerfiles/solr/default:/opt/solr/server/solr/mycores/default # configs
- ../data/solr/default/data:/opt/solr/server/solr/mycores/default/data # indexes
# other-services...
next:
# myproject/sources/myproject/settings/myproject_uwsgi.ini
[uwsgi]
master = True
lazy-apps = True
# Number of worker processes for handling requests
%k = cpu count
processes = %(%k * 2)
# Number of threads for handling requests
threads = %(%k * 2)
# Respawn processes that take more than ... seconds
# harakiri = 20
# Respawn processes after serving ... requests
max-requests = 5000
# Clear environment on exit
vacuum = True
# the base directory (full path)
chdir = /myproject/
# Django's wsgi file (path starting from chdir/)
module = settings.wsgi:application
# location of settings
# env = DJANGO_SETTINGS_MODULE=$(DJANGO_PROJECT_NAME).settings
socket = :8000
and one more:
# myproject/dockerfiles/nginx/nginx.conf
upstream django {
ip_hash;
server web:8000;
}
# Redirection from WWW to non-WWW
server {
listen 80;
server_name www.myproject.com;
rewrite ^/(.*) http://myproject.com/$1 permanent;
}
server {
listen 80 default_server;
server_name myproject.com;
charset utf-8;
keepalive_timeout 5;
location /media {
alias /media;
}
location /static {
alias /static;
}
location / {
uwsgi_pass django;
include uwsgi_params;
}
}
uwsgi_params file has a typical configuration which can be seen HERE.
How to convert http to https you can find in my answer below.
In order to implement https, it is necessary to add to the existing docker-compose.yml the next two images: jwilder/nginx-proxy + jrcs/letsencrypt-nginx-proxy-companion and add environment variables for: nginx service which serves django via uwsgi + for solr. Everything else (configs) can be kept as is.
So here is the final result:
# myproject/sources/docker-compose.yml
version: '2'
services:
nginx-proxy: # <-- NEW SECTION
image: jwilder/nginx-proxy
container_name: myproject_nginx-proxy
ports:
- "80:80"
- "443:443"
volumes:
- "/var/run/docker.sock:/tmp/docker.sock:ro"
- "/etc/nginx/conf.d"
- "/etc/nginx/vhost.d"
- "/usr/share/nginx/html"
- "./volumes/proxy/certs:/etc/nginx/certs"
labels:
- "com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy"
letsencrypt-companion: # <-- NEW SECTION
restart: always
image: jrcs/letsencrypt-nginx-proxy-companion
container_name: myproject_letsencrypt-companion-container
volumes_from:
- nginx-proxy
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "./volumes/proxy/certs:/etc/nginx/certs:rw"
# ###############################
# Old settings. ALMOST identical.
nginx:
image: nginx:latest
container_name: myproject_nginx-container
# ports: <-- REMOVE SECTION
# - "80:80"
volumes:
- ./dockerfiles/nginx:/etc/nginx/conf.d
- ../static:/static
- ../media:/media
depends_on:
- nginx-proxy # <-- NEW SECTION
- web
environment: # <-- NEW SECTION
- VIRTUAL_HOST=myproject.com
- LETSENCRYPT_HOST=myproject.com
- LETSENCRYPT_EMAIL=info#myproject.com
web:
build: ./dockerfiles/web/
container_name: myproject_django-container
command: bash -c 'uwsgi --ini ./settings/myproject_uwsgi.ini'
volumes:
- ./web:/web
- ../static:/static
- ../media:/media
solr-docker:
build: ./dockerfiles/solr/
container_name: myproject_solr-container
entrypoint:
- docker-entrypoint.sh
- solr-precreate
- default
# ports: <-- REMOVE SECTION
# - "8983:8983"
volumes:
- ./dockerfiles/solr/default:/opt/solr/server/solr/mycores/default # configs
- ../data/solr/default/data:/opt/solr/server/solr/mycores/default/data # indexes
environment: # <-- NEW SECTION
- VIRTUAL_HOST=solr.myproject.com
- VIRTUAL_PORT=8983
- LETSENCRYPT_HOST=solr.myproject.com
- LETSENCRYPT_EMAIL=info#myproject.com
# other-services...
If you want to open Solr's dashboard now, instead of myproject.com:8983 you need to use solr.myproject.com.