How to run Django selenium test in github actions - django

I have a Django selenium test that runs fine on local machine using Firefox Webdrive.
When I try to run it on github actions I get following error:
Traceback (most recent call last):
File "/home/runner/work/Pangea/Pangea/core/tests/test_selenium.py", line 12, in setUpClass
cls.selenium = WebDriver()
File "/opt/hostedtoolcache/Python/3.8.12/x64/lib/python3.8/site-packages/selenium/webdriver/firefox/webdriver.py", line 181, in __init__
RemoteWebDriver.__init__(
File "/opt/hostedtoolcache/Python/3.8.12/x64/lib/python3.8/site-packages/selenium/webdriver/remote/webdriver.py", line 269, in __init__
self.start_session(capabilities, browser_profile)
File "/opt/hostedtoolcache/Python/3.8.12/x64/lib/python3.8/site-packages/selenium/webdriver/remote/webdriver.py", line 360, in start_session
response = self.execute(Command.NEW_SESSION, parameters)
File "/opt/hostedtoolcache/Python/3.8.12/x64/lib/python3.8/site-packages/selenium/webdriver/remote/webdriver.py", line 425, in execute
self.error_handler.check_response(response)
File "/opt/hostedtoolcache/Python/3.8.12/x64/lib/python3.8/site-packages/selenium/webdriver/remote/errorhandler.py", line 247, in check_response
raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.WebDriverException: Message: Process unexpectedly closed with status 1
The test that I am trying to run is very basic as shown below
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from rest_framework.reverse import reverse
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.webdriver import WebDriver
class TestLoginWithSelenium(StaticLiveServerTestCase):
#classmethod
def setUpClass(cls):
super().setUpClass()
cls.selenium = WebDriver()
cls.selenium.maximize_window()
cls.username = "xyz"
cls.password = "mnbvcxza"
cls.server_url = 'http://127.0.0.1:8000'
#classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
def test_login(self):
"""
Test Login for a user
"""
self.assertTrue(1==1)
I am running following commands on Github Actions:
pip install selenium
sudo apt install firefox-geckodriver
which geckodriver
geckodriver -V
sudo mv /usr/bin/geckodriver /usr/local/bin/geckodriver
which geckodriver
Mozilla Firefox 99.0
geckodriver 0.30.0

Reference on headless firefox option
Following is the modified code to run headless firefox on github actions
test.py
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from rest_framework.reverse import reverse
from selenium import webdriver
from selenium.webdriver import FirefoxOptions
from selenium.webdriver.common.by import By
class TestLoginWithSelenium(StaticLiveServerTestCase):
#classmethod
def setUpClass(cls):
super().setUpClass()
opts = FirefoxOptions()
opts.add_argument("--headless")
cls.selenium = webdriver.Firefox(options=opts)
# this will run with head and you can actually see the
# browser open up in tests
# cls.selenium = webdriver.Firefox()
cls.username = "xyz"
cls.password = "mnbvcxza"
#classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
def test_login(self):
"""
Test Login for a user
"""
self.client.logout()
self.selenium.get(f"{self.live_server_url}{reverse('login')}")
username_input = self.selenium.find_element(By.ID, "id_auth-username")
username_input.send_keys(self.username)
password_input = self.selenium.find_element(By.ID, "id_auth-password")
password_input.send_keys(self.password)
self.selenium.find_element(By.ID, 'idLogin').click()
github actions
pip install selenium
sudo apt install firefox-geckodriver

Related

Flask AssertionError: View function mapping is overwriting an existing endpoint function: home

I'm trying to code a social network with flask on python anywhere
, everything was working fine before and without touching the imports I started to receive this error when I run routes.py
Traceback (most recent call last):
File "/home/OurHub/mysite/routes.py", line 13, in <module>
def home():
File "/usr/local/lib/python3.9/site-packages/flask/scaffold.py", line 433, in decorator
self.add_url_rule(rule, endpoint, f, **options)
File "/usr/local/lib/python3.9/site-packages/flask/scaffold.py", line 54, in wrapper_func
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1084, in add_url_rule
raise AssertionError(
AssertionError: View function mapping is overwriting an existing endpoint function: home
I tried to put everything in a single file and I don't have two functions that have the same name
here is the start of my routes.py code
import os
import secrets
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort
from __init__ import app, db, bcrypt
from forms import FormCreerCompte, FormConnecter, ModifierCompte, FormPoste
from modelsdb import Profil, Poste
from flask_login import login_user, current_user, logout_user, login_required
#app.route("/")
#app.route("/home")
def home():
page = request.args.get('page',1, type=int)
posts = Poste.query.paginate(page=page, per_page=5)
return render_template('page1.html', posts=posts)
and the code from the innit file:
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
app = Flask(__name__)
app.config['SECRET_KEY'] = '6dfde280ba245'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://OurHub:ninjQ#OurHub.mysql.pythonanywhere-services.com/OurHub$default'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view= 'connecter'
login_manager.login_message_category = 'primary'
import routes
After a lot of research, I tried to delete a piece of code that I had commented in my html file and it worked! Maybe because the html comment "<!-->" doesn't work with python code inserts "{%%}". The error still appears when I run route, but the application works fine, I was looking for the error in the wrong place after all.

How do I test that my Celery worker actually works in Django

(code at bottom)
Context: I'm working on a Django project where I need to provide the user feedback on a task that takes 15-45 seconds. In comes Celery to the rescue! I can see that Celery is performing as expected when I celery -A my_project worker -l info & python manage.py runserver.
Problem: I can't figure out how to run a celery worker in my tests. When I run python manage.py test, I get the following error:
Traceback (most recent call last):
File "/Users/pbrockman/coding/t1v/lib/python3.8/site-packages/django/test/utils.py", line 387, in inner
return func(*args, **kwargs)
File "/Users/pbrockman/coding/tcommerce/tcommerce/tests.py", line 58, in test_shared_celery_task
self.assertEqual(result.get(), 6)
File "/Users/pbrockman/coding/t1v/lib/python3.8/site-packages/celery/result.py", line 224, in get
return self.backend.wait_for_pending(
File "/Users/pbrockman/coding/t1v/lib/python3.8/site-packages/celery/backends/base.py", line 756, in wait_for_pending
meta = self.wait_for(
File "/Users/pbrockman/coding/t1v/lib/python3.8/site-packages/celery/backends/base.py", line 1087, in _is_disabled
raise NotImplementedError(E_NO_BACKEND.strip())
NotImplementedError: No result backend is configured.
Please see the documentation for more information.
Attempted solution:
I tried various combinations of #override_settings with CELERY_TASK_ALWAYS_EAGER=True, CELERY_TASK_EAGER_PROPOGATES=True, and BROKER_BACKEND='memory'.
I tried both #app.task decorator and the #shared_task decorator.
How do I see if celery is having the expected behavior in my tests?
Code
Celery Settings: my_project/celery.py
import os
from dotenv import load_dotenv
load_dotenv()
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_project.settings')
app = Celery('my_project-{os.environ.get("ENVIRONMENT")}',
broker=os.environ.get('REDISCLOUD_URL'),
include=['my_project.tasks'])
from django.conf import settings
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
if __name__ == '__main__':
app.start()
Testing: my_project/tests.py
from django.test import TestCase
from tcommerce.celery import app
from tcommerce.tasks import shared_add
from tcommerce.tasks import app_add
class CeleryTests(TestCase):
def test_shared_celery_task(self):
'#shared_task'
result = shared_add.delay(2, 4)
self.assertEqual(result.get(), 6)
def test_app_celery_task(self):
'#task.app'
result = app_add.delay(2, 4)
self.assertEqual(result.get(), 6)
Defining tasks: my_project/tasks.py
from .celery import app
from celery import shared_task
#shared_task
def shared_add(x, y):
return x + y
#app.task
def app_add(x, y):
return x + y

Executing Django management commands that spins off multiple processes and threads in windows and linux

I am relatively new to multi-threading and multi-processing. I just encountered another learn-block when i just realized that windows and linux handles multi-processing very differently. I do not know th technicalities, but I do know that it is different.
I am using a django to execute my application: python manage.py random_script, within random_script, I am importing multiprocessing and spinning of different processes. i get the following error:
File "<string>", line 1, in <module>
File "C:\FAST\Python\3.6.4\lib\multiprocessing\spawn.py", line 99, in spawn_main
new_handle = reduction.steal_handle(parent_pid, pipe_handle)
File "C:\FAST\Python\3.6.4\lib\multiprocessing\reduction.py", line 82, in steal_handle
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
OSError: [WinError 87] The parameter is incorrect
I tried adding this at the top because my development server is windows but my production server is linux:
if 'win' in sys.platform:
print('Window')
multiprocessing.set_start_method('spawn')
else:
print('Linux')
multiprocessing.set_start_method('fork')
But to no success. When i continued to look through google, it suggest writing the portion of the process spawning under the if __name__ == '__main__': line. That would be fine if I am executing my scripts normally (i.e. python random_script.py), but I am not. I have ran out of ideas and no longer know how to proceed.
++ EDITED ++
manage.py
#!/usr/bin/env python
import os
import sys
import argparse
DEFAULT_SETTINGS_MODULE = "api.test_settings"
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", DEFAULT_SETTINGS_MODULE)
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
random_script.py:
class Command(BaseCommand):
def __init__(self):
super().__init__()
def handle(self, *args, **kwargs):
<...>
self.main()
def main(self):
<...>
Above is my manage.py and my random_script.py.
Thanks for the guidance
Every app has main module which inits / starts it.
For Django manually run management commands this is manage.py and you can set desired method in there:
# manage.py
...
if __name__ == "__main__":
import multiprocessing
if 'win' in sys.platform:
multiprocessing.set_start_method('spawn')
else:
multiprocessing.set_start_method('fork')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
...
And sample of custom management command:
# random_script.py
def calculation(x):
import time
time.sleep(1)
return x
class Command(BaseCommand):
def handle(self, *args, **options):
calc_args = [1, 2, 3, 4, 5]
with multiprocessing.Pool(processes=3) as pool:
results = pool.map(calculation, calc_args)
self.stdout.write(
self.style.SUCCESS('Success: %s' % results)
)

reliably kill phantomjs launched in setUpClass if derived class' setUpClass fails

I wrote a SeleniumTestCase class that launches PhantomJS in its setUpClass and kills it in its tearDownClass. However, if a derived class' setUpClass raises an error, the PhantomJS process is left hanging because SeleniumTestCase.tearDownClass doesn't get called.
from django.test import LiveServerTestCase
import sys, signal, os
from selenium import webdriver
errorShots = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', "errorShots")
class SeleniumTestCase(LiveServerTestCase):
#classmethod
def setUpClass(cls):
"""
Launches PhantomJS
"""
super(SeleniumTestCase, cls).setUpClass()
cls.browser = webdriver.PhantomJS()
#classmethod
def tearDownClass(cls):
"""
Saves a screenshot if the test failed, and kills PhantomJS
"""
print 'Tearing down...'
if cls.browser:
if sys.exc_info()[0]:
try:
os.mkdir(errorShots)
except:
pass
errorShotPath = os.path.join(
errorShots,
"ERROR_phantomjs_%s_%s.png" % (cls._testMethodName, datetime.datetime.now().isoformat())
)
cls.browser.save_screenshot(errorShotPath)
print 'Saved screenshot to', errorShotPath
cls.browser.service.process.send_signal(signal.SIGTERM)
cls.browser.quit()
class SetUpClassTest(SeleniumTestCase):
#classmethod
def setUpClass(cls):
print 'Setting Up'
super(SetUpClassTest, cls).setUpClass()
raise Error('gotcha!')
def test1(self):
pass
Output (note that "Tearing Down" doesn't get printed)
$ ./manage.py test
Creating test database for alias 'default'...
Setting Up
E
======================================================================
ERROR: setUpClass (trucks.tests.SetUpClassTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/andy/leased-on/trucks/tests.py", line 1416, in setUpClass
raise Error('gotcha!')
NameError: global name 'Error' is not defined
----------------------------------------------------------------------
Ran 0 tests in 1.034s
FAILED (errors=1)
Destroying test database for alias 'default'...
How can I kill PhantomJS after a suite's setUpClass fails?
I know I could switch to using setUp and addCleanup, but I want to avoid relaunching PhantomJS (and logging back into my app with it) before every single test.
I decided to use setUpModule and tearDownModule to launch and kill PhantomJS. I put the screenshot-saving code in an addCleanup hook.
from django.test import LiveServerTestCase
from selenium import webdriver
import sys
import signal
import os
import unittest
errorShots = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', "errorShots")
browser = None
def setUpModule():
"""
Launches PhantomJS
"""
global browser
sys.stdout.write('Starting PhantomJS...')
sys.stdout.flush()
browser = webdriver.PhantomJS()
print 'done'
def tearDownModule():
"""
kills PhantomJS
"""
if browser:
sys.stdout.write('Killing PhantomJS...')
sys.stdout.flush()
browser.service.process.send_signal(signal.SIGTERM)
browser.quit()
print 'done'
class SeleniumTestCase(LiveServerTestCase):
def setUp(self):
self.addCleanup(self.cleanup)
def cleanup(self):
"""
Saves a screenshot if the test failed
"""
if sys.exc_info()[0]:
try:
os.mkdir(errorShots)
except:
pass
errorShotPath = os.path.join(
errorShots,
"ERROR_phantomjs_%s_%s.png" % (self._testMethodName, datetime.datetime.now().isoformat())
)
browser.save_screenshot(errorShotPath)
print '\nSaved screenshot to', errorShotPath

Raising Error: NotRegistered when I use Flask with Celery

Description
Hi, I'm learning Celery, and I read a blog.>>
Celery and the Flask Application Factory Pattern - miguelgrinberg.com
So I wrote a small program to run Flask with Celery
Code
app.__init__.py
from flask import Flask
from celery import Celery
celery = Celery(__name__, broker='amqp://127.0.0.1:5672/')
def create_app():
app = Flask(__name__)
#celery.task
def add(x, y):
print x+y
#app.route('/')
def index():
add.delay(1, 3)
return 'Hello World!'
return app
manage.py
from app import create_app
app = create_app()
if __name__ == '__main__':
app.run()
celery_worker_1.py
from app import celery, create_app()
f_app = create_app()
f_app.app_context().push()
celery_worker_2.py
from app import celery, create_app
#celery.task
def foo():
print 'Balabala...'
f_app = create_app()
f_app.app_context().push()
Problem
When I run the Flask server and celery useing:
celery -A celery_worker_1 worker -l
the Celery raised NotRegistered Error:
Traceback (most recent call last): File "D:\Python27\lib\site-packages\billiard\pool.py", line 363, in workloop
result = (True, prepare_result(fun(*args, **kwargs))) File "D:\Python27\lib\site-packages\celery\app\trace.py", line 349, in
_fast_trace_task
return _tasks[task].__trace__(uuid, args, kwargs, request)[0] File "D:\Python27\lib\site-packages\celery\app\registry.py", line 26, in __missing__
raise self.NotRegistered(key) NotRegistered: 'app.add'
But instead of using celery_worker_2:
celery -A celery_worker_2 worker -l info
the task run correctly:
[2015-11-28 15:45:56,299: INFO/MainProcess] Received task: app.add[cbe5e1d6-c5df-4141-9db1-e6313517c202]
[2015-11-28 15:45:56,302: WARNING/Worker-1] 4
[2015-11-28 15:45:56,371: INFO/MainProcess] Task app.add[cbe5e1d6-c5df-4141-9db1-e6313517c202] succeeded in 0.0699999332428s: None
Why can't the Celery run correctly with the code of celery_worker_1?
PS: I'm not good at English, you can point it out to me if you can't understand, I'd like to describe again. ThankS!