I am running a Django (4.1) app in Docker. As part of our test suite, I would like to make use of a development SMTP server which is also running in a Docker container (see docker-compose.yml below). I am using a Selenium driver to run the tests against a Django LiveServer instance. I am also using 1secmail as a temporary mailbox for the tests.
The SMTP server is working nicely with Django (i.e. Django is able to send emails with it) only if Django is not running from a LiveServer. Whenever I try to programmatically test a scenario where SMTP is involved (with a Selenium driver), the email never gets sent (see the failing test below).
The question is simple: how do I make Django and the SMTP server talk to each other in the tests environment?
My docker-compose.yml
version: '3.8'
services:
myapp:
build: .
command: python manage.py runserver 0.0.0.0:8000
volumes:
- ./:/usr/src/app/
ports:
- 8009:8000
env_file:
- ./.env.dev
links:
- selenium-chrome
- dev-smtp-server
myapp-db:
image: postgres:14-alpine
volumes:
- postgres_data:/var/lib/postgresql/data/
environment:
- POSTGRES_USER=blabla
- POSTGRES_PASSWORD=blabla
- POSTGRES_DB=blabla
selenium-chrome:
image: selenium/standalone-chrome
ports:
- 4444:4444 # actual Selenium
- 5900:5900 # VNC server
dev-smtp-server:
image: bytemark/smtp
restart: always
volumes:
postgres_data:
My test fixtures
...
from pytest_django.live_server_helper import LiveServer
#pytest.fixture(scope="session")
def test_server() -> LiveServer:
host = socket.gethostbyname(socket.gethostname())
if host not in settings.ALLOWED_HOSTS:
settings.ALLOWED_HOSTS.append(host)
server = LiveServer(host)
yield server
server.stop()
#pytest.fixture
def driver():
options = webdriver.ChromeOptions()
driver = webdriver.Remote(
command_executor="http://selenium-chrome:4444/wd/hub", # connect to the Selenium container defined in docker-compose
options=options,
)
yield driver
driver.quit()
#pytest.fixture
def forgot_my_password_page_driver(driver: Remote, test_server: LiveServer):
forgotMyPasswordURL = reverse("password_reset")
driver.get(f"{test_server.url}{forgotMyPasswordURL}")
return driver
#pytest.fixture
def email_address_for_which_forgotten_password_form_has_just_been_filed(
forgot_my_password_page_driver, db
):
emailInput = forgot_my_password_page_driver.find_element(By.NAME, "email")
emailAddress = OneSecMailAPI.get_random_email_address()
emailInput.send_keys(emailAddress)
sendButton = forgot_my_password_page_driver.find_element(
By.CSS_SELECTOR, "input[type='submit']"
)
sendButton.click()
return emailAddress
The failing test
def test_forgot_my_password_form_sends_an_email_to_actually_reset_your_password(
email_address_for_which_forgotten_password_form_has_just_been_filed,
):
emailAddress = email_address_for_which_forgotten_password_form_has_just_been_filed
emailDomain = emailAddress.split("#")[1]
emailReceived = False
loopCounter = 0
while not emailReceived and loopCounter < 5:
messages = OneSecMailAPI.get_messages(login=emailAddress, domain=emailDomain)
if len(messages) == 0:
time.sleep(1)
loopCounter = loopCounter + 1
else:
emailReceived = True
assert emailReceived
Related
I have this project with scrapy, scrapyd and django. My crawler uses the django models to add the items to the database through the pipelines.
What i did was use a single container to start the scrapyd and the django server, but this give the problem that the scrapyd can't find the spiders even if they exist
docker-compose.yaml
version: "3"
services:
api:
build:
context: .
ports:
- "8000:8000"
volumes:
- ./app:/app
command: >
sh -c "cd pokemon_crawler && scrapyd &
python manage.py makemigrations &&
python manage.py migrate &&
python manage.py runserver 0.0.0.0:8000"
environment:
- DB_HOST=db
- DB_NAME=pokedex
- DB_USER=postgres
- DB_PASS=supersecretpassword
depends_on:
- db
db:
image: "postgres:10-alpine"
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=supersecretpassword
- POSTGRES_DB=pokedex
My crawler view to run the crawler
from rest_framework.views import APIView
from rest_framework import authentication, permissions
from rest_framework.response import Response
from scrapyd_api import ScrapydAPI
class CrawlerView(APIView):
scrapyd = ScrapydAPI("http://localhost:6800")
authentication_classes = [authentication.TokenAuthentication]
permission_classes = [permissions.IsAdminUser]
def post(self, request, format=None):
pokemons = request.POST.getlist("pokemons", None)
if not pokemons or not isinstance(pokemons, list):
return Response({"error": "Missing args"})
pokemons = ["+"] if "all" in pokemons else pokemons
settings = {
"USER_AGENT": "Mozilla/5.0 (compatible; Googlebot/2.1; "
"+http://www.google.com/bot.html)",
}
# Here we schedule a new crawling task from scrapyd.
# This returns a ID which belongs and will be belong to this task
task = self.scrapyd.schedule(
"pokemon_crawler", "pokemon", settings=settings, pokemons=pokemons
)
return Response({"task_id": task, "status": "started"})
def get(self, request, format=None):
task_id = request.GET.get("task_id", None)
if not task_id:
return Response({"error": "Missing args"})
status = self.scrapyd.job_status("pokemon_crawler", task_id)
return Response({"status": status})
Spider:
class PokemonSpider(CrawlSpider):
name = "pokemon"
base_url = "https://pokemondb.net"
allowed_domains = ["pokemondb.net", "pokemon.gameinfo.io"]
start_urls = ["https://pokemondb.net/pokedex/all"]
def __init__(self, *args, **kwargs):
PokemonSpider.rules = (
Rule(
LinkExtractor(
allow=[
f"/pokedex/{pokemon}"
for pokemon in kwargs.get("pokemons")
],
deny=("/pokedex/all",),
),
callback="parse",
),
)
def parse(self, response):
pass # i have implementation but it's irrelevant
As you can see i execute the scrapyd first and put in second plan and then run the django server. I actually didn't try to run with the docker-compose up what i did was run some django tests that i made with this command: cd pokemon_crawler && scrapyd & python manage.py test [the test to run].
But when i run this i receive the error scrapyd_api.exceptions.ScrapydResponseError: spider 'pokemon' not found
How can i fix this? Or have a better way to do this docker setup? I know i could create another container with another network just for the scrapyd, but i need to have access to the pokemon model in the scrapy pipeline to save the scraped items to the database. Can i do this with a separate container?
I followed this guide to setup everything
I'm trying to make this https://haystack.deepset.ai/docs/latest/tutorial5md into a Dockerized Django App, the problem is when I implement the code locally it works but when I make a dockerized version of it it gives me a connection refused, my guess is that the two docker images can't find their ways to each other.
This is my docker-compose.yaml file
version: '3.7'
services:
es:
image: elasticsearch:7.8.1
environment:
- xpack.security.enabled=false
- "ES_JAVA_OPTS=-Xms512m -Xmx512m -Dlog4j2.disable.jmx=true"
- discovery.type=single-node
- VIRTUAL_HOST=localhost
ports:
- "9200:9200"
networks:
- test-network
container_name: es
healthcheck:
test: ["CMD", "curl", "-s", "-f", "http://localhost:9200"]
retries: 6
web:
build: .
command: bash -c "sleep 1m && python manage.py migrate && python manage.py makemigrations && python manage.py runserver 0.0.0.0:8000"
volumes:
- .:/app
networks:
- test-network
ports:
- "8000:8000"
depends_on:
- es
healthcheck:
test: ["CMD", "curl", "-s", "-f", "http://localhost:9200"]
retries: 6
networks:
test-network:
driver: bridge
and this is my apps.py
from django.apps import AppConfig
import logging
# from haystack.reader.transformers import TransformersReader
from haystack.reader.farm import FARMReader
from haystack.preprocessor.utils import convert_files_to_dicts, fetch_archive_from_http
from haystack.preprocessor.cleaning import clean_wiki_text
from django.core.cache import cache
import pickle
from haystack.document_store.elasticsearch import ElasticsearchDocumentStore
from haystack.retriever.sparse import ElasticsearchRetriever
from haystack.document_store.elasticsearch import ElasticsearchDocumentStore
class SquadmodelConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'squadModel'
def ready(self):
document_store = ElasticsearchDocumentStore(host="elasticsearch", username="", password="", index="document")
doc_dir = "data/article_txt_got"
s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/wiki_gameofthrones_txt.zip"
fetch_archive_from_http(url=s3_url, output_dir=doc_dir)
dicts = convert_files_to_dicts(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)
document_store.write_documents(dicts)
reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=True)
document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document")
retriever = ElasticsearchRetriever(document_store=document_store)
self.reader = reader
self.retriever = retriever
my views.py
from django.apps import apps as allApps
from rest_framework.decorators import api_view
from rest_framework.response import Response
from haystack.pipeline import ExtractiveQAPipeline
theApp = allApps.get_app_config('squadModel')
reader = theApp.reader
retreiver = theApp.retriever
#api_view(['POST'])
def respondQuestion(request):
question = request.data["question"]
pipe = ExtractiveQAPipeline(reader, retreiver)
prediction = pipe.run(query=question, top_k_retriever=10, top_k_reader=5)
content = {"prediction": prediction}
return Response(content)
again this Django API works perfectly locally with an elastic search docker image but in this config i can't manage to make it work.
Any help ?
As suggested by #leandrojmp, just needed to replace "localhost" with "es" on the apps.py.
document_store = ElasticsearchDocumentStore(host="es", username="", password="", index="document")
In Django, I want to perform a Celery task (let's say add 2 numbers) when a user uploads a new file in /media. What I've done is to use signals so when the associated Upload object is saved the celery task will be fired.
Here's my code and Docker configuration:
signals.py
from django.db.models.signals import post_save
from django.dispatch import receiver
from core.models import Upload
from core.tasks import add_me
def upload_save(sender, instance, signal, *args, **kwargs):
print("IN UPLOAD SIGNAL") # <----- LOGS PRINT UP TO HERE, IN CONTAINERS
add_me.delay(10)
post_save.connect(upload_save, sender=Upload) # My post save signal
tasks.py
from celery import shared_task
#shared_task(ignore_result=True, max_retries=3)
def add_me(upload_id):
print('In celery') # <----- This is not printed when in Docker!
return upload_id + 20
views.py
class UploadView(mixins.CreateModelMixin, generics.GenericAPIView):
serializer_class = UploadSerializer
def post(self, request, *args, **kwargs):
serializer = UploadSerializer(data=request.data)
print("SECOND AFTER")
print(request.data) <------ I can see my file name here
if serializer.is_valid():
print("THIRD AFTER") <------ This is printer OK in all cases
serializer.save()
print("FOURTH AFTER") <----- But this is not printed when in Docker!
return response.Response(
{"Message": "Your file was uploaded"},
status=status.HTTP_201_CREATED,
)
return response.Response(
{"Message": "Failure", "Errors": serializer.errors},
status=status.HTTP_403_FORBIDDEN,
)
docker-compose.yml
version: "3.8"
services:
db:
# build: ./database_docker/
image: postgres
ports:
- "5432:5432"
environment:
POSTGRES_DB: test_db
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_pass
# volumes:
# - media:/code/media
web:
build: ./docker/
command: bash -c "python manage.py migrate --noinput && python manage.py runserver 0.0.0.0:8000"
volumes:
- .:/code
- media:/code/media
ports:
- "8000:8000"
depends_on:
- db
rabbitmq:
image: rabbitmq:3.6.10
volumes:
- media:/code/media
worker:
build: ./docker/
command: celery -A example_worker worker --loglevel=debug -n worker1.%h
volumes:
- .:/code
- media:/code/media
depends_on:
- db
- rabbitmq
volumes:
media:
Dockerfile
FROM python:latest
ENV PYTHONUNBUFFERED=1
WORKDIR /code
COPY requirements.txt /code/
RUN pip3 install -r requirements.txt
COPY . /code/
WORKDIR /code
Everything works OK when not in Docker.
The problem is that when I'm deploying the above in Docker and try to upload a file, the request never finishes even-though the file is uploaded in the media folder (confirmed it by accessing its contents in both the web and worker containers).
More specifically it seems that the Celery task is not executed (finished?) and the code after the serializer.save() is never reached.
When I remove the signal (thus no Celery task is fired) everything is OK. Can someone please help me?
I just figured it out. Turns out that I need to add the following in the __init__.py of my application.
from .celery import app as celery_app
__all__ = ("celery_app",)
Don't know why everything is running smoothly without this piece of code when I'm not using containers...
I'm trying to setup a CI environment where I can test my Django application with selenium where both are running in docker.
My test is setup with the following:
from time import sleep
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium.webdriver.remote.webdriver import WebDriver
class MySeleniumTests(StaticLiveServerTestCase):
port = 8000
#classmethod
def setUpClass(cls):
super().setUpClass()
cls.selenium = WebDriver("http://selenium:4444", desired_capabilities={'browserName': 'chrome'})
cls.selenium.implicitly_wait(10)
#classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
def test_login(self):
self.selenium.get('%s:%s%s' % ('http://web', self.port, '/'))
greeting = self.selenium.find_element_by_id("greeting")
self.assertEqual(greeting.text, 'hello world')
I then try to run this on gitlab with this CI setup in my .gitlab-ci.yml:
image:
name: docker/compose:1.26.2
entrypoint: ['/bin/sh', '-c']
services:
- docker:dind
variables:
DOCKER_HOST: tcp://docker:2375
DOCKER_DRIVER: overlay2
stages:
- test
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
build:
stage: test
script:
- docker build --tag django .
- docker network create selenium-net
- docker run -d --network selenium-net --name selenium selenium/standalone-chrome:4.0.0-alpha-6-20200730
- docker run --network selenium-net --name web --expose 8000 django dindselenium/manage.py test myapp
On my local machine the connection WebDriver setup succeeds but then Selenium fails to connect to the web app. On the CI environment I can't even connect to Selenium from the web app.
I've setup an example repo here: https://gitlab.com/oskarpersson/dind-selenium/, and an example of a failing job: https://gitlab.com/oskarpersson/dind-selenium/-/jobs/705523165
i'm new to docker ,i've developed an app using flask and it's working fine.when i tried deploying the app on docker the console says that it's Running on http://127.0.0.1:5000/ ,but when i try to log to this adress on the browser it shows me "this website is not accessible"
here's my Dockerfile:
FROM python:3.6
RUN mkdir /web WORKDIR /web ADD . /web/ RUN pip install -r requirements.txt
ENV FLASK_ENV="docker" EXPOSE 5000 CMD ["python", "mongo.py"]
my dockercompose.yaml file:
version: '3'
services:
# Define the Flask web application
flaskapp:
# Build the Dockerfile that is in the web directory
build: ./web
# Always restart the container regardless of the exit status; try and restart the container indefinitely
restart: always
# Expose port 8000 to other containers (not to the host of the machine)
expose:
- "8000"
# Mount the web directory within the container at /home/flask/app/web
# volumes:
# - ./web:/homvole/flask/app/web
# Don't create this container until the redis and mongo containers (below) have been created
depends_on:
- redis
- mongo
# Link the redis and mongo containers together so that they can talk to one another
links:
- redis
- mongo
# Pass environment variables to the flask container (this debug level lets you see more useful information)
environment:
FLASK_DEBUG: 1
# Deploy with 3 replicas in the case of failure of one of the containers (only in Docker Swarm)
deploy:
mode: replicated
replicas: 3
# Define the redis Docker container
redis:
# use the redis:alpine image: https://hub.docker.com/_/redis/
image: redis:alpine
restart: always
deploy:
mode: replicated
replicas: 3
# Define the redis NGINX forward proxy container
# Define the mongo database
mongo:
image: mongo
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: root
mongo-express:
image: mongo-express
restart: always
ports:
- 8081:8081
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: root
ME_CONFIG_MONGODB_ADMINPASSWORD: root
# Expose port 5432 to other Docker containers
expose:
- "8081"
my mongo.py file:
from flask import Flask, jsonify, request
#from flask.ext.pymongo import PyMongo
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'databasename'
#app.config['MONGO_URI'] = 'mongodb://username:password#hostname:port/databasename'
app.config['MONGO_URI'] = 'mongodb://root:root#localhost:27017/databasename'
mongo = PyMongo(app)
#app.route('/framework/', methods=['GET'])
def get_all_frameworks():
framework = mongo.db.framework
output = []
for q in framework.find():
output.append({'name' : q['name'], 'language' : q['language']})
return jsonify({'result' : output})
#app.route('/framework/find/<name>', methods=['GET'])
def get_one_framework(name):
framework = mongo.db.framework
q = framework.find_one({'name' : name})
if q:
output = {'name' : q['name'], 'language' : q['language']}
else:
output = 'No results found'
return jsonify({'result' : output})
#app.route('/framework/', methods=['POST'])
def add_framework():
framework = mongo.db.framework
name = request.json['name']
language = request.json['language']
framework_id = framework.insert({'name' : name, 'language' : language})
new_framework = framework.find_one({'_id' : framework_id})
output = {'name' : new_framework['name'], 'language' : new_framework['language']}
return jsonify({'result' : output})
#app.route('/framework/update/', methods=['POST'])
def update_framework():
framework = mongo.db.framework
name = request.json['name']
language = request.json['language']
myquery = { "name": name }
newvalues = { "$set": {"language":language } }
q=framework.update_one(myquery, newvalues)
if q:
output = {'name' : q['name'], 'language' : q['language']}
else:
output = 'No results found'
return jsonify({'result' : output})
#app.route('/framework/delete/<name>', methods=['GET'])
def delete_one_framework(name):
framework = mongo.db.framework
#name = request.json['name']
myquery = { "name": name }
q=framework.delete_one(myquery)
if q:
output = 'element deleted successfully '
else:
output = 'No results found'
return jsonify({'result' : output})
if __name__ == '__main__':
app.run(debug=False)
app.run(threaded=True)
app.run(host='0.0.0.0')
to deploy my app i've used the commands:
docker build -t moapp4 .
docker run -p 5000:5000 moapp4
this shows:
Serving Flask app "mongo" (lazy loading)
Environment: docker
Debug mode: off
Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)