I try to deploy a simple nodejs app on AWS EC2 server by docker, the server does not respond but the app runs correctly as expected on the local machine by
docker-compose up -d --build
index.js
require("dotenv").config();
const express = require("express");
const app = express();
app.use("/api", (req, res) => {
res.status(200).send("Test Deployment");
});
const port = process.env.PORT;
app.listen(port, () => {
console.log("server runs on port =", port);
});
Dockerfile
FROM node:18.13-alpine
WORKDIR /app
COPY package.json .
RUN npm install --only=production;
COPY . .
EXPOSE 3000
CMD ["npm", "run", "start"]
docker-compose.yml
version: "3"
services:
backend:
container_name: backend
ports:
- "3000:3000"
build: .
environment:
PORT: "3000"
command: npm run start
backend container logs on server
test-docker$ docker logs backend -f
> test-node#1.0.0 start
> node .
server runs on port = 3000
Related
docker-compose.yml
version: "3.4"
services:
helen.api:
image: ${DOCKER_REGISTRY-}helenapi
build:
context: .
dockerfile: HelenWebAPI/Dockerfile
docker-compose.override.yml
version: '3.4'
services:
helen.api:
environment:
- ASPNETCORE_ENVIRONMENT=Development
- ASPNETCORE_URLS=https://+:443;http://+:80
ports:
- "5001:443"
- "5000:80"
volumes:
- ~/.aspnet/https:/root/.aspnet/https:ro
- ~/.microsoft/usersecrets:/root/.microsoft/usersecrets:ro
Dockerfile
#See https://aka.ms/containerfastmode to understand how Visual Studio uses this Dockerfile to build your images for faster debugging.
FROM mcr.microsoft.com/dotnet/aspnet:6.0 AS base
WORKDIR /app
EXPOSE 80
EXPOSE 443
FROM mcr.microsoft.com/dotnet/sdk:6.0 AS build
WORKDIR /src
COPY ["HelenWebAPI/Helen.API.csproj", "HelenWebAPI/"]
COPY ["Helen.Globals/Helen.Globals.csproj", "Helen.Globals/"]
RUN dotnet restore "HelenWebAPI/Helen.API.csproj"
COPY . .
WORKDIR "/src/HelenWebAPI"
RUN dotnet build "Helen.API.csproj" -c Release -o /app/build
FROM build AS publish
RUN dotnet publish "Helen.API.csproj" -c Release -o /app/publish
FROM base AS final
WORKDIR /app
COPY --from=publish /app/publish .
ENTRYPOINT ["dotnet", "Helen.API.dll"]
When I try deploying, I get the following error eb-stdouterr.log:
Unhandled exception. System.InvalidOperationException: Unable to configure HTTPS endpoint. No server certificate was specified, and the default developer certificate could not be found or is out of date.
I'm on .NET 6 and it's a Web API. I'm trying to host it on AWS Elastic Beanstalk.
I'm using the Node aws-sdk for a project, and I'd like to check the health of the API container (to make sure the Dockerfile builds properly before each deployment).
I need to check the health of the container like this:
steps:
- label: 'Check Docker Health'
command:
- cd v2/api
- echo AWS_ACCESS_KEY_ID=$(aws --profile bit-stg configure get aws_access_key_id) >> .env &&
echo AWS_SECRET_ACCESS_KEY=$(aws --profile bit-stg configure get aws_secret_access_key) >> .env &&
echo AWS_PROFILE="bit-stg" >> .env &&
echo AWS_SDK_LOAD_CONFIG=0 >> .env &&
CASSANDRA_ENV=local >> .env
- docker build --tag api .
- docker run --env-file .env api
- curl "localhost:3000/swagger"
The problem I have is the Docker container does not have access to the .aws credentials.
I've tried to run the image like this docker run --env-file .env api, with the AWS_SDK_LOAD_CONFIG=0 to prevent the API from loading the AWS config, but it still doesn't work.
The only way I can make it work is by using the docker-compose.yml file but I cannot use it in the buildkite pipeline.
If it helps, the Dockerfile I'm using:
FROM node:16
WORKDIR /api
COPY package.json package-lock.json /api/
RUN yarn global add swagger#0.7.5 pm2#4.2.3 && yarn install
COPY . /api
EXPOSE 3000 9229
# You can enable/disable the debug options
ENV NODE_ENV=stg
ENV NODE_LOCAL=false
CMD [ "pm2-runtime", "start", "process-dev.yml" ]
HEALTHCHECK CMD curl --fail http://localhost:3000/ || exit 1
And the docker-compose.yml file I'm using (you can see here I'm loading my ~/.aws volume onto the container):
version: '2.0'
services:
api:
container_name: API
build:
dockerfile: Dockerfile.dev
volumes:
- '.:/api'
- '~/.aws/:/root/.aws:ro'
ports:
- "3000:3000"
- "9229:9229"
environment:
AWS_ACCESS_KEY_ID: $AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $AWS_SECRET_ACCESS_KEY
AWS_PROFILE: $AWS_PROFILE
AWS_SDK_LOAD_CONFIG: $AWS_SDK_LOAD_CONFIG
AXIOS: ${AXIOS}
CASSANDRA_ENV: $CASSANDRA_ENV
SEQUELIZE: ${SEQUELIZE}
DEBUG: swagger-tools*,sequelize*
Thanks for your help !
I'm working on a Django project and it's dockerized, I've deployed my application at the Amazon EC2 instance, so currently, the EC2 protocol is HTTP and I want to make it HTTPS so I've created a cloud front distribution to redirect at my EC2 instance but unfortunately I'm getting the following error.
error:
CloudFront attempted to establish a connection with the origin, but either the attempt failed or the origin closed the connection. We can't connect to the server for this app or website at this time. There might be too much traffic or a configuration error. Try again later, or contact the app or website owner.
If you provide content to customers through CloudFront, you can find steps to troubleshoot and help prevent this error by reviewing the CloudFront documentation.
Generated by cloudfront (CloudFront)
Request ID: Pa0WApol6lU6Ja5uBuqKVPVTJFBpkcnJQgtXMYzQP6SPBhV4CtMOVw==
docker-compose.yml
version: "3.8"
services:
db:
container_name: db
image: "postgres"
restart: always
volumes:
- ./scripts/init.sql:/docker-entrypoint-initdb.d/init.sql
- postgres-data:/var/lib/postgresql/data/
env_file:
- prod.env
app:
container_name: app
build:
context: .
restart: always
volumes:
- static-data:/vol/web
depends_on:
- db
env_file:
- prod.env
proxy:
container_name: proxy
build:
context: ./proxy
restart: always
depends_on:
- app
ports:
- 80:8000
volumes:
- static-data:/vol/static
volumes:
postgres-data:
static-data:
Dockerfile
FROM python:3
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
WORKDIR /app
EXPOSE 8000
COPY ./core/ /app/
COPY ./scripts /scripts
# installing nano and cron service
RUN apt-get update
RUN apt-get install -y cron
RUN apt-get install nano
RUN pip install --upgrade pip
COPY requirements.txt /app/
# install dependencies and manage assets
RUN pip install -r requirements.txt && \
mkdir -p /vol/web/static && \
mkdir -p /vol/web/media
# files for cron logs
RUN mkdir /cron
RUN touch /cron/django_cron.log
# start cron service
RUN service cron start
RUN service cron restart
RUN chmod +x /scripts/run.sh
CMD ["/scripts/run.sh"]
I have a django app (url shortener for k8s) here [https://github.com/MrAmbiG/shorty/tree/k8s][1]. The docker-compose version works with the same docker image but the docker run doesn't work (I cannot access from host, no errors). Docker and docker-compose up both are from docker.io and both are using the same docker image but why the difference?
I apologize for not posting all the contents of the file but rather posting the github url itself.
version: '3.7'
services:
django:
image: gajuambi/shorty
ports:
- 80:8001
env_file:
- ../.env
Below Doesnt work
docker run --name shorty -it --env-file .env gajuambi/shorty -p 8001:8001
The docker image itself runs with no error but when I enter the address in the browser of the host (my windows laptop) and I get nothing.
I tried the following urls in my host where docker is running (laptop) browser
http://localhost:8001/
http://127.0.0.1:8001/
I tried binding the django to the following addresses
0.0.0.0
0
127.0.0.1
but no go.
ports:
- 80:8001
i think your application running on port 80 as you tried binding app on 0.0.0.0 default port will be 80
but you forwarding port 8001 while running the docker command
Please try with
docker run --name shorty -it --env-file .env gajuambi/shorty -p 8001:80
Also, try with opening the IP of host machine (computer) or **docker bridge IP**
http://{host IP}:8001
updated the entrypoint command to
daphne shorty.asgi:application -b 0 -p 8000
Currently docker rm shorty -f && docker build -t gajuambi/shorty -f .\Deployment\Dockerfile . && docker run --name shorty -it --env-file .env -p 80:8000 gajuambi/shorty is working fine.
I have updated the github repo for reference.
https://github.com/MrAmbiG/shorty.git
I have Django and React inside the same Docker container using docker-compose.yml and running this container inside a Digital Ocean Droplet running Ubuntu. When I navigate to http://my_ip_address:3000 which is the React app, it works just fine, but when I navigate to http://my_ip_address:8000 which is the Django app, I get a 400 Bad Request error from the server.
project/back-end/Dockerfile
FROM python:3.7
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
WORKDIR /nerdrich
COPY Pipfile Pipfile.lock /nerdrich/
RUN pip install pipenv && pipenv install --system
COPY . /nerdrich/
EXPOSE 8000
project/front-end/Dockerfile
# official node.js runtime for Docker
FROM node:12
# Create and set the directory for this container
WORKDIR /app/
# Install Application dependencies
COPY package.json yarn.lock /app/
RUN yarn install --no-optional
# Copy over the rest of the project
COPY . /app/
# Set the default port for the container
EXPOSE 3000
CMD yarn start
project/docker-compose.yml
version: "3"
services:
web:
build: ./back-end
command: python /nerdrich/manage.py runserver
volumes:
- ./back-end:/nerdrich
ports:
- "8000:8000"
stdin_open: true
tty: true
client:
build: ./front-end
volumes:
- ./front-end:/app
- /app/node_modules
ports:
- '3000:3000'
stdin_open: true
environment:
- NODE_ENV=development
depends_on:
- "web"
command:
yarn start
project/back-end/nerdrich/.env
ALLOWED_HOSTS=['165.227.82.162']
I can provide any additional information if needed.