I have the following docker compose file:
version: '2'
services:
app:
build: .
command: >
bash -cex "
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
/virtualenv/bin/flask run -h 0.0.0.0 -p 5050
"
env_file: env
links:
- postgres
ports:
- 8080:8080
As you can see I'm using the env_file option to load my environment variables from the file env.
Now I'm trying to deploy this container to Elastic Beanstalk.
This is my file Dockerrun.aws.json so far:
{
"AWSEBDockerrunVersion": 2,
"containerDefinitions": [
{
"name": "app",
"image": "myorg/myimage",
"essential": true,
"memory": 256,
"command": [
"/bin/bash",
"export LC_ALL=C.UTF-8",
"export LANG=C.UTF-8",
"/virtualenv/bin/flask run -h 0.0.0.0 -p 5050"
],
"portMappings": [
{
"hostPort": 8080,
"containerPort": 8080
}
],
"links": [
"postgres",
]
}
In the AWS Elastic Beanstalk documentation just mention the environment option to pass an array of env variables, but I can't find how to pass a file instead of an array of variables.
Does someone knows how to translate this docker-compose file to Dockerrun.aws.json file properly?
Regards.
Try container-transform.
$ pip install container-transform
$ cat docker-compose.yml | container-transform -v
and it will print the ECS format to STDOUT.
Related
I am getting the below error while deploying to aws elastic beanstalk from travis CI.
Service:AmazonECS, Code:ClientException, Message:Container list cannot be empty., Class:com.amazonaws.services.ecs.model.ClientException
.travis.yml:
sudo: required
language: generic
services:
- docker
before_install:
- docker build -t sathishpskdocker/react-test -f ./client/Dockerfile.dev ./client
script:
- docker run -e CI=true sathishpskdocker/react-test npm test
after_success:
- docker build -t sathishpskdocker/multi-client ./client
- docker build -t sathishpskdocker/multi-nginx ./nginx
- docker build -t sathishpskdocker/multi-server ./server
- docker build -t sathishpskdocker/multi-worker ./worker
# Log in to the docker CLI
- echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_ID" --password-stdin
# Take those images and push them to docker hub
- docker push sathishpskdocker/multi-client
- docker push sathishpskdocker/multi-nginx
- docker push sathishpskdocker/multi-server
- docker push sathishpskdocker/multi-worker
deploy:
provider: elasticbeanstalk
region: 'us-west-2'
app: 'multi-docker'
env: 'Multidocker-env'
bucker_name: elasticbeanstalk-us-west-2-194531873493
bucker_path: docker-multi
On:
branch: master
access_key_id: $AWS_ACCESS_KEY
secret_access_key: $AWS_SECRET_KEY
Dockerrun.aws.json:
{
"AWSEBDockerrunVersion": 2,
"containerDefintions": [
{
"name": "client",
"image": "sathishpskdocker/multi-client",
"hostname": "client",
"essential": false,
"memory": 128
},
{
"name": "server",
"image": "sathishpskdocker/multi-server",
"hostname": "api",
"essential": false,
"memory": 128
},
{
"name": "worker",
"image": "sathishpskdocker/multi-worker",
"hostname": "worker",
"essential": false,
"memory": 128
},
{
"name": "nginx",
"image": "sathishpskdocker/multi-nginx",
"hostname": "nginx",
"essential": true,
"portMappings": [
{
"hostPort": 80,
"containerPort": 80
}
],
"links": ["client", "server"],
"memory": 128
}
]
}
Deploying part alone failing with the error:
Service:AmazonECS, Code:ClientException, Message:Container list cannot be empty., Class:com.amazonaws.services.ecs.model.ClientException
Ah, Never mind, it's my mistake. There is typo in the dockerrun config file which wrongly reads containerDefintions instead of containerDefinitions.
Thanks everyone whoever taking look at my question. Cheers!
While deploying dotnet app as docker with Milticontainer option in Elasticbean stalk, Getting the error like
2021-05-20 01:26:55 ERROR ECS task stopped due to: Task failed to start. (traveltouchapi: CannotPullContainerError: Error response from daemon: pull access denied for traveltouchapi, repository does not exist or may require 'docker login': denied: requested access to the resource is denied
postgres_image: )
2021-05-20 01:26:58 ERROR Failed to start ECS task after retrying 2 times.
2021-05-20 01:27:00 ERROR [Instance: i-0844a50e307bd8b23] Command failed on instance. Return code: 1 Output: .
Environment details for: TravelTouchApi-dev3
Application name: TravelTouchApi
Region: ap-south-1
Deployed Version: app-c1ba-210520_065320
Environment ID: e-i9t6f6vszk
Platform: arn:aws:elasticbeanstalk:ap-south-1::platform/Multi-container Docker running on 64bit Amazon Linux/2.26.0
Tier: WebServer-Standard-1.0
CNAME: TravelTouchApi-dev3.ap-south-1.elasticbeanstalk.com
Updated: 2021-05-20 01:23:27.384000+00:00
My Dockerfile is
FROM mcr.microsoft.com/dotnet/core/aspnet:3.1 AS base
WORKDIR /app
EXPOSE 80
EXPOSE 443
FROM mcr.microsoft.com/dotnet/core/sdk:3.1 AS build
# Install Node.js
RUN curl -fsSL https://deb.nodesource.com/setup_14.x | bash - \
&& apt-get install -y \
nodejs \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /src/TravelTouchApi
COPY ["TravelTouchApi.csproj", "./"]
RUN dotnet restore "TravelTouchApi.csproj"
COPY . .
WORKDIR "/src/TravelTouchApi"
RUN dotnet build "TravelTouchApi.csproj" -c Release -o /app/build
FROM build AS publish
RUN dotnet publish "TravelTouchApi.csproj" -c Release -o /app/publish
FROM base AS final
WORKDIR /app
COPY --from=publish /app/publish .
ENTRYPOINT ["dotnet", "TravelTouchApi.dll"]
My docker-compose.yml is
version: '3.4'
networks:
traveltouchapi-dev:
driver: bridge
services:
traveltouchapi:
image: traveltouchapi:latest
depends_on:
- "postgres_image"
build:
context: .
dockerfile: Dockerfile
ports:
- "80:80"
environment:
DB_CONNECTION_STRING: "host=postgres_image;port=5432;database=blogdb;username=bloguser;password=bloguser"
networks:
- traveltouchapi-dev
postgres_image:
image: postgres:latest
ports:
- "5432"
restart: always
volumes:
- db_volume:/var/lib/postgresql/data
environment:
POSTGRES_USER: "bloguser"
POSTGRES_PASSWORD: "bloguser"
POSTGRES_DB: "blogdb"
networks:
- traveltouchapi-dev
volumes:
db_volume:
My Dockerrun.aws.json
{
"AWSEBDockerrunVersion": 2,
"containerDefinitions": [
{
"environment": [
{
"name": "POSTGRES_USER",
"value": "bloguser"
},
{
"name": "POSTGRES_PASSWORD",
"value": "bloguser"
},
{
"name": "POSTGRES_DB",
"value": "blogdb"
}
],
"essential": true,
"image": "postgres:latest",
"memory": 200,
"mountPoints": [
{
"containerPath": "/var/lib/postgresql/data",
"sourceVolume": "Db_Volume"
}
],
"name": "postgres_image",
"portMappings": [
{
"containerPort": 5432
}
]
},
{
"environment": [
{
"name": "DB_CONNECTION_STRING",
"value": "host=postgres_image;port=5432;database=blogdb;username=bloguser;password=bloguser"
}
],
"essential": true,
"image": "traveltouchapi:latest",
"name": "traveltouchapi",
"portMappings": [
{
"containerPort": 80,
"hostPort": 80
}
],
"memory": 200
}
],
"family": "",
"volumes": [
{
"host": {
"sourcePath": "db_volume"
},
"name": "Db_Volume"
}
]
}
I think you are missing the login step before deploy the applications.
Can you try use this command before deploying?
aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_DEFAULT_ACCID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com
The image name must contains with full repo/tag name 'natheesh/traveltouchapi: latest' in Dockerrun.json
I've been trying to deploy my docker consisted of Django, Postgresql and Nginx. It works fine when I do sudo docker-compose up However when deploy it on AWS EB, it gives me
could not translate host name "db" to address: Name or service not known
What I've done is I pushed my docker to docker hub using sudo docker build -t myname/dockername -f Dockerfile . and I simply do eb deploy
File Structure
myproject
myproject
settings.py
urls.py
...
Dockerfile
Dockerrun.aws.json
manage.py
requirements.txt
...
Dockerfile
FROM python:3
ENV PYTHONUNBUFFERED 1
RUN mkdir /code
WORKDIR /code
COPY requirements.txt /code/
RUN pip install -r requirements.txt
COPY . /code/
EXPOSE 8000
CMD ["sh", "on-container-start.sh"]
Dockerrun.aws.json
{
"AWSEBDockerrunVersion": "1",
"Image": {
"Name": "myname/dockername:latest",
"Update": "true"
},
"Ports": [
{
"ContainerPort": "8000"
}
]
}
docker-compose.yml
version: '3'
services:
db:
image: postgres
hostname: db
networks:
- some_network
web:
restart: always
build: .
volumes:
- .:/code
hostname: web
expose:
- "8000"
depends_on:
- db
links:
- db:db
networks:
- some_network
nginx:
image: nginx
hostname: nginx
ports:
- "8000:8000"
volumes:
- ./config/nginx:/etc/nginx/conf.d
depends_on:
- web
networks:
- some_network
networks:
some_network:
One thing I realize is that when I use docker-compose up on my machine, I get 3 different containers running. However on EB, I see only one container running.
I think it's because I'm fetching the image from docker hub that I built with those files and that somehow caused these 3 containers to be one and it's messing up with recognizing host names? I am quite not sure still. Help will be greatly appreciated. Thanks!
Dockerrun.aws.json should correlate with docker-compose.yml
The reason of issue that host name ”db“ could not be translated to address is that the docker-compose.yml and Dockerrun.aws.json files describe a different architecture:
There are 3 containers in docker-compose.yml
There is only 1 container in Dockerrun.aws.json
Therefore, the application tries to resolve the db hostname and cannot find it, because db not declared in Dockerrun.aws.json
Fix Dockerrun.aws.json
So, update your Dockerrun.aws.json. You can do it either manually or using convenient tool micahhausler/container-transform:
a) either update it manually
You can use samples, such as:
k2works/aws-eb-docker-multi-container-sample**
b) or update it using micahhausler/container-transform
You can try micahhausler/container-transform:
Transforms docker-compose, ECS, and Marathon configurations
Transforms docker-compose, ECS, and Marathon configurations
Here is what it outputs for your case:
$ container-transform docker-compose.yml > Dockerrun.aws.json
Dockerrun.aws.json
{
"containerDefinitions": [
{
"essential": true,
"image": "postgres",
"name": "db"
},
{
"essential": true,
"image": "nginx",
"mountPoints": [
{
"containerPath": "/etc/nginx/conf.d",
"sourceVolume": "_ConfigNginx"
}
],
"name": "nginx",
"portMappings": [
{
"containerPort": 8000,
"hostPort": 8000
}
]
},
{
"essential": true,
"links": [
"db:db"
],
"mountPoints": [
{
"containerPath": "/code",
"sourceVolume": "_"
}
],
"name": "web"
}
],
"family": "",
"volumes": [
{
"host": {
"sourcePath": "."
},
"name": "_"
},
{
"host": {
"sourcePath": "./config/nginx"
},
"name": "_ConfigNginx"
}
]
}
Note:: Of course, you should fix missing settings such as memory for db and nginx containers.
You can omit networks at all
According to Networking in Compose | Docker Documentation:
For example, suppose your app is in a directory called myapp, and your docker-compose.yml looks like this:
docker-compose.yml
version: "3"
services:
web:
build: .
ports:
- "8000:8000"
db:
image: postgres
ports:
- "8001:5432"
When you run docker-compose up, the following happens:
A network called myapp_default is created.
A container is created using web’s configuration. It joins the network myapp_default under the name web.
A container is created using db’s configuration. It joins the network myapp_default under the name db.
So, since all your containers linked to the same some_network, you can omit it.
docker-compose.yml
version: '3'
services:
db:
image: postgres
hostname: db
web:
restart: always
build: .
volumes:
- .:/code
hostname: web
expose:
- "8000"
depends_on:
- db
links:
- db:db
nginx:
image: nginx
hostname: nginx
ports:
- "8000:8000"
volumes:
- ./config/nginx:/etc/nginx/conf.d
depends_on:
- web
And $ container-transform docker-compose.yml > Dockerrun.aws.json will produce:
Dockerrun.aws.json
{
"containerDefinitions": [
{
"essential": true,
"image": "postgres",
"name": "db"
},
{
"essential": true,
"image": "nginx",
"mountPoints": [
{
"containerPath": "/etc/nginx/conf.d",
"sourceVolume": "_ConfigNginx"
}
],
"name": "nginx",
"portMappings": [
{
"containerPort": 8000,
"hostPort": 8000
}
]
},
{
"essential": true,
"links": [
"db:db"
],
"mountPoints": [
{
"containerPath": "/code",
"sourceVolume": "_"
}
],
"name": "web"
}
],
"family": "",
"volumes": [
{
"host": {
"sourcePath": "."
},
"name": "_"
},
{
"host": {
"sourcePath": "./config/nginx"
},
"name": "_ConfigNginx"
}
]
}
I am trying to deploy a docker container to Elastic Beanstalk in AWS. I'm repeatedly getting error while doing so and each time the error is related to the ENTRYPOINT that I specified in the dockerrun.aws.json. What am I doing wrong here ?
The webapp uses Django, python3 and keras.
This is my Dockerfile content:
# reference: https://hub.docker.com/_/ubuntu/
FROM ubuntu:18.04
RUN apt-get update && apt-get install \
-y --no-install-recommends python3 python3-virtualenv
# Adds metadata to the image as a key value pair example LABEL
version="1.0"
LABEL maintainer="Amir Ashraff <amir.ashraff#gmail.com>"
##Set environment variables
ENV VIRTUAL_ENV=/opt/venv
RUN python3 -m virtualenv --python=/usr/bin/python3 $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
# Install dependencies:
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Open Ports for Web App
EXPOSE 8000
WORKDIR /manage.py
COPY . /manage.py
RUN chmod +x /manage.py
ENTRYPOINT [ "python3" ]
CMD [ "python3", "manage.py runserver 0.0.0.0:8000" ]
And this is the dockerrun.aws.json content:
{
"AWSEBDockerrunVersion": "1",
"Ports": [
{
"ContainerPort": ""
}
],
"Volumes": [
{
"HostDirectory": "/~/aptos",
"ContainerDirectory": "/aptos/diabetes_retinopathy_recognition"
}
],
"Logging": "/aptos/diabetes_recognition",
"Entrypoint": "/opt/venv/bin/python3",
"Command": "python3 manage.py runserver 0.0.0.0:8000"
}
And this is the error from AWS logs:
Docker container quit unexpectedly on Tue Aug 20 13:03:47 UTC 2019:
/opt/venv/bin/python3: can't open file 'python3': [Errno 2] No such file
or directory.
I have a multi-container app which I want to deploy on ElasticBeanstalk. Below are my files.
Dockerfile
FROM python:2.7
WORKDIR /app
ADD . /app
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y \
apt-utils \
git \
python \
python-dev \
libpcre3 \
libpcre3-dev \
python-setuptools \
python-pip \
nginx \
supervisor \
default-libmysqlclient-dev \
python-psycopg2 \
libpq-dev \
sqlite3 && \
pip install -U pip setuptools && \
rm -rf /var/lib/apt/lists/*
RUN pip install -r requirements.txt
EXPOSE 8000
RUN chmod +x entry_point.sh
docker-compose.yml
version: "2"
services:
db:
restart: always
container_name: docker_test-db
image: postgres:9.6
expose:
- "5432"
mem_limit: 10m
environment:
- POSTGRES_NAME=postgres
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=docker_test
redis:
restart: always
image: redis:3.0
expose:
- "6379"
mem_limit: 10m
web:
# replace username/repo:tag with your name and image details
restart: always
build: .
image: docker_test
container_name: docker_test-container
ports:
- "8000:8000"
environment:
- DATABASE=db
- POSTGRES_NAME=postgres
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=docker_test
mem_limit: 500m
depends_on:
- db
- redis
entrypoint: ./entry_point.sh
command: gunicorn docker_test.wsgi:application -w 2 -b :8000 --timeout 120 --graceful-timeout 120 --worker-class gevent
celery:
image: docker_test
container_name: docker_test-celery
command: celery -A docker_test worker -l info
links:
- db
- redis
mem_limit: 10m
depends_on:
- web
cbeat:
image: docker_test
container_name: docker_test-cbeat
command: celery beat --loglevel=info
links:
- db
- redis
mem_limit: 10m
depends_on:
- web
I works file when I run it on my local system. But when I upload it on elasticbeanstalk, It gives my following error.
ECS task stopped due to: Essential container in task exited. (celery:
db: cbeat: web: CannotPullContainerError: API error (404): pull access
denied for docker_test, repository does not exist or may require
'docker login' redis: )
I transform docker-compose.yml to Dockerrun.aws.json by using container-transform. For above file, my Dockerrun.aws.json is following.
{
"AWSEBDockerrunVersion": 2,
"containerDefinitions": [
{
"command": [
"celery",
"beat",
"--loglevel=info"
],
"essential": true,
"image": "docker_test",
"links": [
"db",
"redis"
],
"memory": 10,
"name": "cbeat"
},
{
"command": [
"celery",
"-A",
"docker_test",
"worker",
"-l",
"info"
],
"essential": true,
"image": "docker_test",
"links": [
"db",
"redis"
],
"memory": 10,
"name": "celery"
},
{
"environment": [
{
"name": "POSTGRES_NAME",
"value": "postgres"
},
{
"name": "POSTGRES_USER",
"value": "postgres"
},
{
"name": "POSTGRES_PASSWORD",
"value": "postgres"
},
{
"name": "POSTGRES_DB",
"value": "docker_test"
}
],
"essential": true,
"image": "postgres:9.6",
"memory": 10,
"name": "db"
},
{
"essential": true,
"image": "redis:3.0",
"memory": 10,
"name": "redis"
},
{
"command": [
"gunicorn",
"docker_test.wsgi:application",
"-w",
"2",
"-b",
":8000",
"--timeout",
"120",
"--graceful-timeout",
"120",
"--worker-class",
"gevent"
],
"entryPoint": [
"./entry_point.sh"
],
"environment": [
{
"name": "DATABASE",
"value": "db"
},
{
"name": "POSTGRES_NAME",
"value": "postgres"
},
{
"name": "POSTGRES_USER",
"value": "postgres"
},
{
"name": "POSTGRES_PASSWORD",
"value": "postgres"
},
{
"name": "POSTGRES_DB",
"value": "docker_test"
}
],
"essential": true,
"image": "docker_test",
"memory": 500,
"name": "web",
"portMappings": [
{
"containerPort": 8000,
"hostPort": 8000
}
]
}
],
"family": "",
"volumes": []
}
How can I resolve this problem?
Please push the image "docker_test" to either dockerhub or ECR for Beanstalk to pull image from. Currently, it's on your local & the ECS agent doesn't know about it.
Tag & Push docker_test image to a registry like dockerhub & ECR.
Update image repo URL in Dockerrun.aws.json.
Allow Beanstalk to pull the image.
I'm not that familiar with EB, but I am pretty familiar with ECR and ECS.
I usually get that error when I try pull an image from an empty repo on ECR, in other words the ECR repo was created but you havn't pushed any docker images to the repo yet.
This can also happen when you try pull an image from ECR and it can't find the version number of the image in the tag. I suggest that you change your docker-compose.yml file to use the latest version of the images. This will mean that everywhere you mention the image docker_test you will need suffix it with ":latest"
Something like this:
image: docker_test:latest
I will post my whole docker-compose.yml I made for you at the end of the reply.
I would suggest that you have a look at this doc:https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/create_deploy_docker.container.console.html see the section:"Using Images from an Amazon ECR Repository" they explain how you can resolve the docker login issue.
I hope that helps. Please reply if you have any questions regarding this.
version: "2"
services:
db:
restart: always
container_name: docker_test-db
image: postgres:9.6
expose:
- "5432"
mem_limit: 10m
environment:
- POSTGRES_NAME=postgres
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=docker_test
redis:
restart: always
image: redis:3.0
expose:
- "6379"
mem_limit: 10m
web:
# replace username/repo:tag with your name and image details
restart: always
build: .
image: docker_test:latest
container_name: docker_test-container
ports:
- "8000:8000"
environment:
- DATABASE=db
- POSTGRES_NAME=postgres
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=docker_test
mem_limit: 500m
depends_on:
- db
- redis
entrypoint: ./entry_point.sh
command: gunicorn docker_test.wsgi:application -w 2 -b :8000 --timeout 120 --graceful-timeout 120 --worker-class gevent
celery:
image: docker_test
container_name: docker_test-celery
command: celery -A docker_test worker -l info
links:
- db
- redis
mem_limit: 10m
depends_on:
- web
cbeat:
image: docker_test:latest
container_name: docker_test-cbeat
command: celery beat --loglevel=info
links:
- db
- redis
mem_limit: 10m
depends_on:
- web