I have a .Rmd along with a .sql file that is read by the .Rmd file that I’m trying to deploy in ShinyProxy. I am able to run this from within RStudio on my Mac.
The application loads, I can see it in ShinyProxy, but when I click on the application, it launches, then says please wait, then the error java.lang.StackOverflowError. I tried increasing the stack size with the JAVA_OPTS in the Dockerfile.
I do see this in shinyproxy.log:
java.lang.reflect.InvocationTargetException: null
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:1.8.0_332]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[na:1.8.0_332]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:1.8.0_332]
at java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_332]
.
.
.
.
Caused by: javax.naming.NoInitialContextException: Need to specify class name in environment or system property, or as an applet parameter, or in an application resource file: java.naming.factory.initial
at javax.naming.spi.NamingManager.getInitialContext(NamingManager.java:673) ~[na:1.8.0_332]
at javax.naming.InitialContext.getDefaultInitCtx(InitialContext.java:313) ~[na:1.8.0_332]
at javax.naming.InitialContext.getURLOrDefaultInitCtx(InitialContext.java:350) ~[na:1.8.0_332]
at javax.naming.InitialContext.lookup(InitialContext.java:417) ~[na:1.8.0_332]
... 140 common frames omitted
Dockerfile:
FROM openanalytics/r-base
MAINTAINER John Reber "John.Reber#jefferson.edu"
ENV JAVA_HOME /usr/lib/jvm/java-11-openjdk-amd64
RUN export JAVA_HOME
ENV JAVA_OPTS "-Xms4G -Xmx8G -Xss2G"
RUN export JAVA_OPTS
# Install Java for rJava
RUN apt-get update && \
apt-get install -y default-jdk && \
apt-get install -y default-jre && \
apt-get install -y ca-certificates-java && \
rm -rf /var/lib/apt/lists/*
RUN ["java", "-version"]
CMD javareconf
RUN apt-get update && apt-get install -y \
libcurl4-openssl-dev \
# libcurl4-gnutls-dev \
libssl-dev \
libxml2-dev && \
rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y \
libharfbuzz0b && \
rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y \
sudo \
pandoc \
pandoc-citeproc \
libcairo2-dev \
libxt-dev \
libssh2-1-dev && \
rm -rf /var/lib/apt/lists/*
WORKDIR /opt/oracle
RUN apt-get update && apt-get install -y libaio1 wget unzip \
&& wget https://download.oracle.com/otn_software/linux/instantclient/instantclient-basiclite-linuxx64.zip \
&& unzip instantclient-basiclite-linuxx64.zip \
&& rm -f instantclient-basiclite-linuxx64.zip \
&& cd /opt/oracle/instantclient* \
&& rm -f *jdbc* *occi* *mysql* *README *jar uidrvci genezi adrci \
&& echo /opt/oracle/instantclient* > /etc/ld.so.conf.d/oracle-instantclient.conf \
&& ldconfig
WORKDIR /
RUN apt-get update && apt-get install -y \
libmysql++-dev \
unixodbc-dev \
libpq-dev && \
rm -rf /var/lib/apt/lists/*
#RUN apt-get update && apt-get install -y \
# libxml2 \
# libssl1.1 && \
# rm -rf /var/lib/apt/lists/*
CMD javareconf
RUN ["java", "-version"]
# install needed R packages
#RUN R -e "install.packages(c('flexdashboard', 'knitr', 'plotly', 'httpuv', 'shiny', 'rJava', 'RJDBC', 'dplyr', 'readr', 'DT', 'lubridate', 'rmarkdown'), dependencies = TRUE, repo='http://cran.r-project.org')"
RUN R -e "install.packages(c('shiny'), dependencies = TRUE, repo='https://cloud.r-project.org')"
RUN R -e "install.packages(c('flexdashboard', 'dplyr', 'rJava', 'RJDBC', 'readr', 'DT', 'lubridate', 'rmarkdown'), dependencies = TRUE, repo='https://cloud.r-project.org')"
# 'sysfonts','gifski', 'Cairo', 'tidyverse',
# make directory and copy Rmarkdown flexdashboard file in it
RUN mkdir -p /prmc
COPY prmc/PRMC.Rmd /prmc/PRMC.Rmd
#COPY prmc/PRMC_Local.Rmd /prmc/PRMC_Local.Rmd
COPY prmc/prmc.sql /prmc/prmc.sql
#COPY prmc/PRMC_ACCRUAL.csv /prmc/PRMC_ACCRUAL.csv
COPY prmc/ojdbc11.jar /prmc/ojdbc11.jar
# Copy Rprofile.site to the image
COPY Rprofile.site /usr/local/lib/R/etc/
# make all app files readable (solves issue when dev in Windows, but building in Ubuntu)
RUN chmod -R 755 /prmc
# expose port on Docker container
EXPOSE 3838
# run flexdashboard as localhost and on exposed port in Docker container
CMD ["R", "-e", "rmarkdown::run('/prmc/PRMC.Rmd', shiny_args = list(port = 3838, host = '0.0.0.0'))"]
application.xml:
proxy:
# title: Open Analytics Shiny Proxy
title: SKCC Open Analytics ShinyProxy
# logo-url: https://www.openanalytics.eu/shinyproxy/logo.png
logo-url: https://ewebapp01pa.jefferson.edu/includes/images/logo-2014.jpg
landing-page: /
heartbeat-rate: 10000
heartbeat-timeout: 60000
port: 8081
# authentication: keycloak
authentication: simple
admin-groups: admin
useForwardHeaders: true
# Example: 'simple' authentication configuration
users:
- name: jack
password: XXXXXXXX
groups: scientists, admin
- name: jeff
password: XXXXXXXXX
groups: mathematicians
# keycloak authentication
keycloak:
auth-server-url: https://kc.kcc.tju.edu/auth
realm: shinyproxy
public-client: true
resource: shinyproxy
credentials-secret: s2NwbneBKh10wG0fHjZjevGnLlNTt44h
use-resource-role-mappings: false
# Docker configuration
docker:
url: http://localhost:2375
port-range-start: 20000
specs:
- id: 01_hello
display-name: Hello Application
description: Application which demonstrates the basics of a Shiny app
container-cmd: ["R", "-e", "shinyproxy::run_01_hello()"]
container-image: openanalytics/shinyproxy-demo
access-groups: [scientists, mathematicians, analyze, admin]
# - id: 06_tabsets
# display-name: 06_tabsets
# description: Application 06_tabsets demonstration
# container-cmd: ["R", "-e", "shinyproxy::run_06_tabsets()"]
# container-image: openanalytics/shinyproxy-demo
# access-groups: []
## - id: euler
# display-name: Euler's number
# container-cmd: [ "R", "-e", "shiny::runApp('/root/euler')" ]
# container-image: openanalytics/shinyproxy-template
# access-groups: scientists
- id: prmc
display-name: PRMC Dashboard
description: (Protocol Review Monitoring Committee Dashboard)
docker-cmd: ["R", "-e rmarkdown::run('/prmc/PRMC.Rmd')"]
container-image: prmc_dashboard3
access-groups: [scientists, mathematicians, analyze, admin]
logging:
file:
name: shinyproxy.log
level:
root: DEBUG
I have a docker-compose service that runs django using gunicorn in an entrypoint shell script.
When I issue CTRL-C after the docker-compose stack has been started, the web and nginx services do not gracefully exit and are not deleted. How do I configure the docker environment so that the services are removed when a CTRL-C is issued?
I have tried using stop_signal: SIGINT but the result is the same. Any ideas?
docker-compose log after CTRL-C issued
^CGracefully stopping... (press Ctrl+C again to force)
Killing nginx ... done
Killing web ... done
docker containers after CTRL-C is issued
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4b2f7db95c90 nginx:alpine "/docker-entrypoint.…" 5 minutes ago Exited (137) 5 minutes ago nginx
cdf3084a8382 myimage "./docker-entrypoint…" 5 minutes ago Exited (137) 5 minutes ago web
Dockerfile
#
# Use poetry to build wheel and install dependencies into a virtual environment.
# This will store the dependencies during compile docker stage.
# In run stage copy the virtual environment to the final image. This will reduce the
# image size.
#
# Install poetry using pip, to allow version pinning. Use --ignore-installed to avoid
# dependency conflicts with poetry.
#
# ---------------------------------------------------------------------------------------
##
# base: Configure python environment and set workdir
##
FROM python:3.8-slim as base
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONFAULTHANDLER=1 \
PYTHONHASHSEED=random \
PYTHONUNBUFFERED=1
WORKDIR /app
# configure user pyuser:
RUN useradd --user-group --create-home --no-log-init --shell /bin/bash pyuser && \
chown pyuser /app
# ---------------------------------------------------------------------------------------
##
# compile: Install dependencies from poetry exported requirements
# Use poetry to build the wheel for the python package.
# Install the wheel using pip.
##
FROM base as compile
ARG DEPLOY_ENV=development \
POETRY_VERSION=1.1.7
# pip:
ENV PIP_DEFAULT_TIMEOUT=100 \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
PIP_NO_CACHE_DIR=1
# system dependencies:
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential gcc && \
apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/*
# install poetry, ignoring installed dependencies
RUN pip install --ignore-installed "poetry==$POETRY_VERSION"
# virtual environment:
RUN python -m venv /opt/venv
ENV VIRTUAL_ENV=/opt/venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
# install dependencies:
COPY pyproject.toml poetry.lock ./
RUN /opt/venv/bin/pip install --upgrade pip \
&& poetry install $(if [ "$DEPLOY_ENV" = 'production' ]; then echo '--no-dev'; fi) \
--no-ansi \
--no-interaction
# copy source:
COPY . .
# build and install wheel:
RUN poetry build && /opt/venv/bin/pip install dist/*.whl
# -------------------------------------------------------------------------------------------
##
# run: Copy virtualenv from compile stage, to reduce final image size
# Run the docker-entrypoint.sh script as pyuser
#
# This performs the following actions when the container starts:
# - Make and run database migrations
# - Collect static files
# - Create the superuser
# - Run wsgi app using gunicorn
#
# port: 5000
#
# build args:
#
# GIT_HASH Git hash the docker image is derived from
#
# environment:
#
# DJANGO_DEBUG True if django debugging is enabled
# DJANGO_SECRET_KEY The secret key used for django server, defaults to secret
# DJANGO_SUPERUSER_EMAIL Django superuser email, default=myname#example.com
# DJANGO_SUPERUSER_PASSWORD Django superuser passwd, default=Pa55w0rd
# DJANGO_SUPERUSER_USERNAME Django superuser username, default=admin
##
FROM base as run
ARG GIT_HASH
ENV DJANGO_DEBUG=${DJANGO_DEBUG:-False}
ENV DJANGO_SECRET_KEY=${DJANGO_SECRET_KEY:-secret}
ENV DJANGO_SETTINGS_MODULE=default_project.main.settings
ENV DJANGO_SUPERUSER_EMAIL=${DJANGO_SUPERUSER_EMAIL:-"myname#example.com"}
ENV DJANGO_SUPERUSER_PASSWORD=${DJANGO_SUPERUSER_PASSWORD:-"Pa55w0rd"}
ENV DJANGO_SUPERUSER_USERNAME=${DJANGO_SUPERUSER_USERNAME:-"admin"}
ENV GIT_HASH=${GIT_HASH:-dev}
# install virtualenv from compiled image
COPY --chown=pyuser:pyuser --from=compile /opt/venv /opt/venv
# set path for virtualenv and VIRTUAL_ENV toactivate virtualenv
ENV VIRTUAL_ENV="/opt/venv"
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
COPY --chown=pyuser:pyuser ./docker/docker-entrypoint.sh ./
USER pyuser
RUN mkdir /opt/venv/lib/python3.8/site-packages/default_project/staticfiles
EXPOSE 5000
ENTRYPOINT ["./docker-entrypoint.sh"]
Entrypoint
#!/bin/sh
set -e
echo "Making migrations..."
django-admin makemigrations
echo "Running migrations..."
django-admin migrate
echo "Making staticfiles..."
mkdir -p /opt/venv/lib/python3.8/site-packages/default_project/staticfiles
echo "Collecting static files..."
django-admin collectstatic --noinput
# requires gnu text tools
# echo "Compiling translation messages..."
# django-admin compilemessages
# echo "Making translation messages..."
# django-admin makemessages
if [ "$DJANGO_SUPERUSER_USERNAME" ]
then
echo "Creating django superuser"
django-admin createsuperuser \
--noinput \
--username $DJANGO_SUPERUSER_USERNAME \
--email $DJANGO_SUPERUSER_EMAIL
fi
exec gunicorn \
--bind 0.0.0.0:5000 \
--forwarded-allow-ips='*' \
--worker-tmp-dir /dev/shm \
--workers=4 \
--threads=1 \
--worker-class=gthread \
default_project.main.wsgi:application
exec "$#"
docker-compose
version: '3.8'
services:
web:
container_name: web
image: myimage
init: true
build:
context: .
dockerfile: docker/Dockerfile
environment:
- DJANGO_DEBUG=${DJANGO_DEBUG}
- DJANGO_SECRET_KEY=${DJANGO_SECRET_KEY}
- DJANGO_SUPERUSER_EMAIL=${DJANGO_SUPERUSER_EMAIL}
- DJANGO_SUPERUSER_PASSWORD=${DJANGO_SUPERUSER_PASSWORD}
- DJANGO_SEUPRUSER_USERNAME=${DJANGO_SUPERUSER_USERNAME}
# stop_signal: SIGINT
volumes:
- static-files:/opt/venv/lib/python3.8/site-packages/{{ cookiecutter.project_name }}/staticfiles:rw
ports:
- 127.0.0.1:${DJANGO_PORT}:5000
nginx:
container_name: nginx
image: nginx:alpine
volumes:
- ./docker/nginx:/etc/nginx/conf.d
- static-files:/static
depends_on:
- web
ports:
- 127.0.0.1:8000:80
volumes:
static-files:
You can use docker-compose down
Stops containers and removes containers, networks, volumes, and images created by up.
Reference
I'm Deploying Django via gunicorn onto a K8s node from a Docker Image.
For a Dockerfile using CMD python manage.py runserver 0.0.0.0:8000, i.e. standard Django dev-server, the backend services requests fine.
For a Dockerfile using CMD gunicorn ..., i.e. a proper staging/production server, requests are serviced SUPER-slow or not at all:
Here's the Dockerfile:
FROM python:3.9-buster
LABEL maintainer="hq#deepspaceprogram.com"
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y gcc && \
apt-get install -y git && \
apt-get install -y libcurl4 && \
apt-get install -y libpq-dev && \
apt-get install -y libssl-dev && \
apt-get install -y python3-dev && \
apt-get install -y librtmp-dev && \
apt-get install -y libcurl4-gnutls-dev && \
apt-get install -y libcurl4-openssl-dev && \
apt-get install -y postgresql-9.3 && \
apt-get install -y python-psycopg2
ENV PROJECT_ROOT /app
WORKDIR /app
# install python packages with poetry
COPY pyproject.toml .
RUN pip3 install poetry && \
poetry config virtualenvs.create false && \
poetry install --no-dev
COPY accounts accounts
COPY analytics analytics
COPY commerce commerce
COPY documents documents
COPY leafsheets leafsheets
COPY leafsheets_django leafsheets_django
COPY marketing marketing
COPY static static
COPY manage.py .
# This should be an empty file if building for staging/production
# Else (image for local dev) it should contain the complete .env
COPY .env-for-docker-image .env
# CMD python manage.py runserver 0.0.0.0:8000
CMD gunicorn \
--bind :8000 \
--workers 3 \
--worker-class gthread \
--worker-tmp-dir /dev/shm \
--timeout 120 \
--log-level debug \
--log-file - \
leafsheets_django.wsgi ;
Logs here show lots of "Connection Closing" messages.
In my settings.py I have CORS setup ok:
# Cors (ref: https://pypi.org/project/django-cors-headers/)
if DEBUG:
# CORS_ORIGIN_ALLOW_ALL = True # TODO: Remove after Django update
CORS_ALLOW_ALL_ORIGINS = True
else:
# CORS_ORIGIN_WHITELIST = ( FRONTEND_URL, ) # TODO: Remove after Django update
CORS_ALLOWED_ORIGINS = ( FRONTEND_URL, )
CORS_ALLOW_CREDENTIALS = True
ALLOWED_HOSTS = ["*"]
What's happening? How to proceed?
I'm using Docker with python:3.7.6-slim image to dockerize the Django application.
I'm using django-import-export plugin to import data in the admin panel which stores the uploaded file in the temporary directory to read while importing.
But on import, it gives an error
FileNotFoundError: [Errno 2] No such file or directory: '/tmp/tmppk01nf3d'
The same is working when not using docker.
Dockerfile
FROM python:3.7.6-slim
ARG APP_USER=appuser
RUN groupadd -r ${APP_USER} && useradd --no-log-init -r -g ${APP_USER} ${APP_USER}
RUN set -ex \
&& RUN_DEPS=" \
libpcre3 \
mime-support \
default-libmysqlclient-dev \
inkscape \
libcurl4-nss-dev libssl-dev \
" \
&& seq 1 8 | xargs -I{} mkdir -p /usr/share/man/man{} \
&& apt-get update && apt-get install -y --no-install-recommends $RUN_DEPS \
&& pip install pipenv \
&& rm -rf /var/lib/apt/lists/* \
&& mkdir -p /home/${APP_USER}/.config/inkscape \
&& chown -R ${APP_USER} /home/${APP_USER}/.config/inkscape \
# Create directories
&& mkdir /app/ \
&& mkdir /app/config/ \
&& mkdir /app/scripts/ \
&& mkdir -p /static_cdn/static_root/ \
&& chown -R ${APP_USER} /static_cdn/
WORKDIR /app/
COPY Pipfile Pipfile.lock /app/
RUN set -ex \
&& BUILD_DEPS=" \
build-essential \
libpcre3-dev \
libpq-dev \
" \
&& apt-get update && apt-get install -y --no-install-recommends $BUILD_DEPS \
&& pipenv install --deploy --system \
\
&& apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false $BUILD_DEPS \
&& rm -rf /var/lib/apt/lists/*
COPY ./src /app/
COPY scripts/ /app/scripts/
COPY configs/ /app/configs/
EXPOSE 8000
ENV UWSGI_WSGI_FILE=qcg/wsgi.py
ENV UWSGI_HTTP=:8000 UWSGI_MASTER=1 UWSGI_HTTP_AUTO_CHUNKED=1 UWSGI_HTTP_KEEPALIVE=1 UWSGI_LAZY_APPS=1 UWSGI_WSGI_ENV_BEHAVIOR=holy
ENV UWSGI_WORKERS=2 UWSGI_THREADS=4
ENV UWSGI_STATIC_MAP="/static/=/static_cdn/static_root/" UWSGI_STATIC_EXPIRES_URI="/static/.*\.[a-f0-9]{12,}\.(css|js|png|jpg|jpeg|gif|ico|woff|ttf|otf|svg|scss|map|txt) 315360000"
USER ${APP_USER}:${APP_USER}
ENTRYPOINT ["/app/scripts/docker/entrypoint.sh"]
and running command
docker run my-image uwsgi --show-config
I have config.yaml I try upload code to AWS. On first version it's works fine, but now it's doesn't work. How can I fix it? I added deploy section and wrote sh commands
version: 2
jobs:
build:
working_directory: ~/myProject
parallelism: 1
shell: /bin/bash --login
environment:
CIRCLE_ARTIFACTS: /tmp/circleci-artifacts
CIRCLE_TEST_REPORTS: /tmp/circleci-test-results
docker:
- image: circleci/build-image:ubuntu-14.04-XXL-upstart-1189-5614f37
command: /sbin/init
steps:
- checkout
- run: mkdir -p $CIRCLE_ARTIFACTS $CIRCLE_TEST_REPORTS
- run:
working_directory: ~/myProject
command: nvm install 8.9.1 && nvm alias default 8.9.1
- restore_cache:
keys:
- v1-dep-{{ .Branch }}-
- v1-dep-master-
- v1-dep-
- run: sudo sudo add-apt-repository "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) main universe restricted multiverse"
- run: sudo apt update
- run: sudo apt-get install python2.7-dev
- run: sudo easy_install --upgrade six
- run: sudo pip install --upgrade urllib3==1.21.1
- run: sudo pip install --upgrade pip
- run: sudo pip install --upgrade blessed
- run: sudo pip install awsebcli==3.12.3 --ignore-installed six pyyaml
- run: rm -rf /home/ubuntu/.aws
- run: if [ -z "${NODE_ENV:-}" ]; then export NODE_ENV=test; fi
- run: export PATH="~/myProject/node_modules/.bin:$PATH"
- run: npm install
- save_cache:
key: v1-dep-{{ .Branch }}-{{ epoch }}
paths:
- vendor/bundle
- ~/virtualenvs
- ~/.m2
- ~/.ivy2
- ~/.bundle
- ~/.go_workspace
- ~/.gradle
- ~/.cache/bower
- ./node_modules
- run: npm test
- store_test_results:
path: /tmp/circleci-test-results
- store_artifacts:
path: /tmp/circleci-artifacts
- store_artifacts:
path: /tmp/circleci-test-results
deploy:
name: deploy to AWS
production:
branch: production
commands:
- bash ./deploy_prod.sh
- eb deploy stmi-production
staging:
branch: master
commands:
- bash ./deploy_staging.sh
- eb deploy stmi-dev
This works for me:
machine: true
steps:
- checkout
- run:
name: create workspace
command: mkdir -p /tmp/workspace
- run:
name: Install awsebcli package
command: |
sudo apt-get -y -qq update
sudo apt-get install python-pip python-dev build-essential
sudo pip install --upgrade awsebcli
eb --version
- run:
name: installing dependencies
command: |
npm install
- run:
name: deploy
command: |
bash deploy.sh
- run:
name: Removing aws config
command: |
rm -rf /home/circleci/.aws
- run: ls /tmp/workspace
- persist_to_workspace:
root: /tmp/workspace
paths:
- status.txt`
And this is my deploy.sh
mkdir /home/circleci/.aws
touch /home/circleci/.aws/config
chmod 600 /home/circleci/.aws/config
echo "[profile user]" > /home/circleci/.aws/config
echo "aws_access_key_id=$AWS_ACCESS_KEY_ID" >> /home/circleci/.aws/config
echo "aws_secret_access_key=$AWS_SECRET_ACCESS_KEY" >>
/home/circleci/.aws/config
eb deploy $BEANSTALK_ENVIRONMENT --profile user --region
$BEANSTALK_PRODUCTION_AWS_REGION &&
echo 'Deployment Succeed' >> /tmp/workspace/beanstalk-deploy-
status.txt