How to count executed queries during selenium test? - django

I have tried this below mentioned code, but it didn't work for me.
class SeleniumTest(LiveServerTestCase):
#classmethod
def setUpClass(cls):
super().setUpClass()
cls.driver = PhantomJS()
#override_settings(DEBUG=True)
def test_queries(self)
with self.assertNumQueries(10):
self.driver.get(self.live_server_url + "/page-with-10-queries")
Output:
query['sql'] for query in self.captured_queries
AssertionError: 0 != 10 : 0 queries executed, 10 expected
Captured queries were:

To test the queries generated by a view, I use the following code:
def test_queries(self):
with self.assertNumQueries(3):
found = self.client.get(reverse('<url name>'))
print(found) # This is the line that initiates the Lazy query
The following is a not tested suggestion but if you insist on using selenium you may try:
#override_settings(DEBUG=True)
def test_queries(self)
with self.assertNumQueries(10):
found = self.driver.get(self.live_server_url + "/page-with-10-queries")
print(found) # Just an idea

Related

django test giving abnormal results

I am getting weird results when testing for 2 different views. They are both list views. The first one should only show active orders(active=True), the second one only showing historic views (completed=True).
I am using postgresql as a database also. Issue is that both the view works perfectly fine in the browser, however when testing pytest raises an error saying that an order that shouldn't be listed, is listed.
The views are as follows:
class OrderHistoryView(LoginRequiredMixin, ListView):
template_name = 'orders/order_history.html'
def get_queryset(self):
user = self.request.user
qs = Order.objects.filter(Q(buyer=user, completed=True)|Q(seller=user, completed=True))
return qs
class OrderListView(LoginRequiredMixin, ListView):
template_name = 'orders/order_list_view.html'
def get_queryset(self):
user = self.request.user
qs = Order.objects.filter(Q(buyer=user, active=True)|Q(seller=user, active=True))
return qs
The tests are:
class OrderHistoryViewTests(TestCase):
#classmethod
def setUp(self):
self.req = RequestFactory()
self.user = mixer.blend('user.CustomUser', email='test#test.com', password='1234')
self.user2 = mixer.blend('user.CustomUser')
self.advert = mixer.blend('advert.Advert', author=self.user)
self.offer = mixer.blend(
'offer.Offer', advert=self.advert, author=self.user2, receiver=self.user, accepted=True)
self.order = mixer.blend(
'orders.Order', advert=self.advert, seller=self.user2, buyer=self.user, offer=self.offer,
pending=True, completed=True, active=True, disputed=False, cancelled=False)
self.order2 = mixer.blend('orders.Order', completed=False, buyer=self.user)
def test_only_shows_completed(self):
request = self.req.get('/')
request.user = self.user
resp = OrderHistoryView.as_view()(request)
self.assertContains(resp, self.order)
self.assertNotContains(resp, order2)
The second test is exactly the same, it just tests that its only showing active orders
error message is :
FAILED orders/tests/test_views.py::OrderHistoryViewTests::test_only_shows_completed - AssertionError: 3 != 0 : Response should not contain '10'
FAILED orders/tests/test_views.py::OrderListViewTests::test_order_listing_showing_only_active - AssertionError: 2 != 0 : Response should not contain '15'
EDIT:
I have done some further investigation on this and have found that even that the first "self.assertContains(resp, self.order)" statement is failing also. However when i run test_views only this "self.assertContains(resp, self.order)" test passes, however when i run all the tests together it fails. I have noticed after commenting out 5 different views in the test_forms directory, when i run the tests they all pass. Issue is that there is nothing wrong with the 5 form tests, their is no mock statements on them and the tear down seems to be working.
I then put a print statement to see what queryset is actually showing up:
printing Order.objects.all() shows ", ]>" and when i print(self.order.completed) also comes up true. However the result of the test failing says : "AssertionError: False is not true : Couldn't find '25' in response"
Even though 25 is clearly there and completed. The view code looks perfect also and the view is a very simple view. What could be causing this?
Also i have noticed if i run py.test i only need to comment out 3 of the form tests and it will all pass, however with manage.py test i have to comment out 5 form tests for it to pass.
I just used print(resp.context_data) to see what is actually in the response...
{'paginator': None, 'page_obj': None, 'is_paginated': False, 'object_list': ]>, 'order_list': ]>, 'view': }
From here we can see the order "25" is actually in the response, i printed the username also and it matches.

Django Tests: increase testsRun counter

In some Django Tests I have loop to test many things.
in the end-result it shows up as:
Ran 1 test in 3.456s
I would like to increment that counter for each loop, how can I do that?
It is using subTest() , but that does not update the counter (which I believe is a parameter testsRun)
my test looks something like this
class MyTestCase(TestCase):
def test_auth_pages(self):
pages = ['homepage', 'dashboard', 'profile']
for page in pages:
with self.subTest():
# ....testsRun += 1
self.c.login(username='test', password='test')
response = self.c.get(reverse_lazy(page))
self.assertEqual(200, response.status_code, msg=page)
self.c.logout()
response = self.c.get(reverse_lazy(page))
self.assertEqual(302, response.status_code, msg=page)
If you don't mind changing testing framework, consider pytest with pytest-django package. You can easily parametrize a test using #pytest.mark.parametrize:
import pytest
#pytest.mark.parametrize("page_name", ['homepage', 'dashboard', 'profile'])
def test_some_page(page_name, client):
client.login(username='test', password='test')
response = client.get(reverse_lazy(page))
assert response.status_code == 200
client.logout()
response = client.get(reverse_lazy(page))
assert response.status_code == 302
If not, you could create a test function factory that would accept the page name and return a test function for that page:
class MyTestCase(TestCase):
def _create_page_test(page_name):
def test_function(self):
self.c.login(username='test', password='test')
response = self.c.get(reverse_lazy(page_name))
self.assertEqual(200, response.status_code, msg=page_name)
self.c.logout()
response = self.c.get(reverse_lazy(page_name))
self.assertEqual(302, response.status_code, msg=page_name)
return test_function
test_homepage = _create_page_test("homepage")
test_dashboard = _create_page_test("dashboard")
test_profile = _create_page_test("profile")
The added benefit of such changes is that each page has a separate test, independent from the other. That makes debugging easier.
You can achieve this with a different test suite.
Check out test generators from the django-nose package
def test_evens():
for i in range(0, 5):
yield check_even, i, i*3 # this generates 5 different tests
def check_even(n, nn):
assert n % 2 == 0 or nn % 2 == 0

How to store results from many TestCases (to one file) in django (unittest)?

I have several test cases which all have a similar tearDown:
def tearDown(self):
execution_time = time.time() - self.startTime
result_list = [self._testMethodName]
result = [str(x) for x in sys.exc_info()]
if result[0] == 'None':
result_list.append('PASS')
elif 'Assertion' in result[0]:
result_list.append('FAIL')
else:
result_list.append('ERROR')
result_list.append(result)
result_list.append(str(execution_time))
TEST_RESULTS.append(result_list)
I'm using the tearDown function to store results from each test (in the test case) to a global TEST_RESULTS object (so each TestCase file has a TEST_RESULTS global defined).
Then in the tearDownClass function im doing this to store results to csv:
#classmethod
def tearDownClass(cls):
with open ('tests/results/test_case_1_output.csv', 'wr') as resultFile:
wr = csv.writer(resultFile)
wr.writerows(TEST_RESULTS)
To me this is a terrible implementation. Globals defined everywhere and tearDown/tearDownClass implemented over and over again in each test case rather than defined once.
Furthermore, I would like to create a test result file which collects results from all test cases.
My hunch is this requires defining the file handle at the runner level (or somewhere before the TestCases are being called). This would allow me to reinitialize the csv file at a higher level (rather than arbitrarily in one TestCase).
Does anyone have a suggestion on how this can be accomplished? Did not see a way to do this from the docs and overriding django TestCase seemed hazardous.
I will post my solution (thanks very much to #xyres) since i think it might help some others.
Below is an example of a TestCase which calls SetUp, tearDown and setUpClass from the base class (either TestManager or TestCase. The trick was to call setUpClass from base class 'TestCase' and create another initialization function 'initialize' called on the 'TestManager' base class.
class MyTestCase(TestManager, TestCase)
def setUp(self):
self.param1, self.param2 = super(MyTestCase, self).setUp()
def tearDown(self):
test_name = self._testMethodName
super(MyTestCase, self).get_and_write_results_to_csv(test_name)
#classmethod
def setUpClass(cls):
super(MyTestCase, cls).setUpClass()
super(MyTestCase, cls).initialize('my test case name')
class TestManager():
#classmethod
def initialize(cls, test_case_name):
with open('path/to/my/testresults.csv', 'wr') as resultFile:
wr = csv.writer(resultFile)
wr.writerow("Results for " + test_case_name + "are below:")
def setUp(self):
"""
Do some setup stuff that's the same for each TestCase.
Im not showing the actions here but assume you want this
function to return 2 params that are the same for every
TestCase setup
"""
return param1, param2
def get_and_write_results_to_csv(self, test_name):
execution_time = time.time() - self.startTime
result_list = [test_name]
result = [str(x) for x in sys.exc_info()]
if result[0] == 'None':
result_list.append('PASS')
elif 'Assertion' in result[0]:
result_list.append('FAIL')
else:
result_list.append('ERROR')
result_list.append(result)
result_list.append(str(execution_time))
with open('path/to/my/testresults.csv', 'a') as resultFile:
wr = csv.writer(resultFile)
wr.writerow(result_list)

HTML report for django tests

I have a Django project containing an API (created with rest framework if that counts anywhere). I have added some tests for the API but in order to have an overall view of the tests, either passing, either failing or missing, I need to create an HTML report.
When the tests are finished a HTML table report should be generated which shows the endpoints and HTTP responses covered during tests, the results of the tests plus the combinations which are missing the tests.
Unfortunately I cannot understand how should I do that. I know that coverage can give me a detailed html report, but that's not what I need, I need something like this:
| Endpoint description | 200 | 400 | 403 | 404 |
| GET /endpoint1 | PASS | PASS |PASS | N/A |
| POST /endpoint1 | PASS | FAIL |MISSING| N/A |
Does anybody has any idea about that? Maybe some libs that could help out with that or what strategy should I use for that?
Thank you in advance
Late to the party, but this is my solution to outputting a HTML test report for Django tests. (based on HtmlTestRunner cannot be directly used with Django DiscoverRunner)
The following classes if placed in tests/html_test_reporter.py can be used as a DiscoverRunner which is patched to use HTMLTestRunner.
from django.test.runner import DiscoverRunner
from HtmlTestRunner import HTMLTestRunner
class MyHTMLTestRunner(HTMLTestRunner):
def __init__(self, **kwargs):
# Pass any required options to HTMLTestRunner
super().__init__(combine_reports=True, report_name='all_tests', add_timestamp=False, **kwargs)
class HtmlTestReporter(DiscoverRunner):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Patch over the test_runner in the super class.
html_test_runner = MyHTMLTestRunner
self.test_runner=html_test_runner
Then this is run with:
python manage.py test -v 2 --testrunner tests.html_test_reporter.HtmlTestReporter
By default Django projects use django.test.runner.DiscoverRunner to search for tests and then use PyTest to run them. HTMLTestRunner can be used with PyTest to output a HTML test report, but it does seem possible to configure PyTest to use HTMLRunner through DiscoverRunner.
Hope this helps.
As Django uses the python's standard unittest library, you'll have to tweak some of its parts.
First, you'll need some way to specify which tests actually test which endpoint. A custom decorator is handy for that:
from functools import wraps
def endpoint(path, code):
"""
Mark some test as one which tests specific endpoint.
"""
def inner(func):
#wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
inner._endpoint_path = path
inner._endpoint_code = code
return inner
class MyTestCase(TestCase):
#endpoint(path='/path/one', code=200)
def test_my_path_is_ok(self):
response = self.client.get('/path/one?foo=bar')
self.assertEqual(response.status_code, 200)
#endpoint(path='/path/one', code=404)
def test_my_path_expected_errors(self):
response = self.client.get('/path/one?foo=qux')
self.assertEqual(response.status_code, 404)
def test_some_other_stuff(self):
# this one will not be included in our results grid.
pass
You could use a "magical" approach (e.g. special methods' names to guess the endpoint they are testing) instead, but explicit is better than implicit, right?
Then, you need a way to collect the results of your tests - specifically, of that which test the endpoints. Here we make a (very draft) subclass of unittest.TestResult to handle it:
class EndpointsTestResult(TestResult):
def __init__(self):
super(EndpointsTestResult, self).__init__()
self.endpoint_results = {}
def addError(self, test, err):
super(EndpointsTestResult, self).addError(test, err)
if hasattr(test, '_endpoint_path'):
branch = self.endpoint_results.setdefault(getattr(test, '_endpoint_path'), {})
branch[getattr(test, '_endpoint_code')] = 'MISSING'
def addFailure(self, test, err):
# similar as addError()
def addSuccess(self, test):
# similar as addError()
Finally it's time to actually output our results. Let's make a sublass of the unittest.TextTestRunner and specify it in our custom runner:
class EndpointsTestRunner(TextTestRunner):
def _makeResult(self):
self._result = EndpointsTestResult()
return self._result
def run(self, test):
super(EndpointsTestRunner).run(test)
# After running a test, print out the table
generate_a_nifty_table(self._result.endpoint_results)
class EndpointsDjangoRunner(django.test.runner.DiscoverRunner):
test_runner = EndpointsTestRunner
Now we have our custom EndpointsDjangoRunner, and we should specify it in the settings.py:
TEST_RUNNER = 'path.to.the.EndpointsDjangoRunner'
That's it. Please let me know if you spot any awkward errors in the code.

Django: is there a way to count SQL queries from an unit test?

I am trying to find out the number of queries executed by a utility function. I have written a unit test for this function and the function is working well. What I would like to do is track the number of SQL queries executed by the function so that I can see if there is any improvement after some refactoring.
def do_something_in_the_database():
# Does something in the database
# return result
class DoSomethingTests(django.test.TestCase):
def test_function_returns_correct_values(self):
self.assertEqual(n, <number of SQL queries executed>)
EDIT: I found out that there is a pending Django feature request for this. However the ticket is still open. In the meantime is there another way to go about this?
Since Django 1.3 there is a assertNumQueries available exactly for this purpose.
One way to use it (as of Django 3.2) is as a context manager:
# measure queries of some_func and some_func2
with self.assertNumQueries(2):
result = some_func()
result2 = some_func2()
Vinay's response is correct, with one minor addition.
Django's unit test framework actually sets DEBUG to False when it runs, so no matter what you have in settings.py, you will not have anything populated in connection.queries in your unit test unless you re-enable debug mode. The Django docs explain the rationale for this as:
Regardless of the value of the DEBUG setting in your configuration file, all Django tests run with DEBUG=False. This is to ensure that the observed output of your code matches what will be seen in a production setting.
If you're certain that enabling debug will not affect your tests (such as if you're specifically testing DB hits, as it sounds like you are), the solution is to temporarily re-enable debug in your unit test, then set it back afterward:
def test_myself(self):
from django.conf import settings
from django.db import connection
settings.DEBUG = True
connection.queries = []
# Test code as normal
self.assert_(connection.queries)
settings.DEBUG = False
If you are using pytest, pytest-django has django_assert_num_queries fixture for this purpose:
def test_queries(django_assert_num_queries):
with django_assert_num_queries(3):
Item.objects.create('foo')
Item.objects.create('bar')
Item.objects.create('baz')
If you don't want use TestCase (with assertNumQueries) or change settings to DEBUG=True, you can use context manager CaptureQueriesContext (same as assertNumQueries using).
from django.db import ConnectionHandler
from django.test.utils import CaptureQueriesContext
DB_NAME = "default" # name of db configured in settings you want to use - "default" is standard
connection = ConnectionHandler()[DB_NAME]
with CaptureQueriesContext(connection) as context:
... # do your thing
num_queries = context.initial_queries - context.final_queries
assert num_queries == expected_num_queries
db settings
In modern Django (>=1.8) it's well documented (it's also documented for 1.7) here, you have the method reset_queries instead of assigning connection.queries=[] which indeed is raising an error, something like that works on django>=1.8:
class QueriesTests(django.test.TestCase):
def test_queries(self):
from django.conf import settings
from django.db import connection, reset_queries
try:
settings.DEBUG = True
# [... your ORM code ...]
self.assertEquals(len(connection.queries), num_of_expected_queries)
finally:
settings.DEBUG = False
reset_queries()
You may also consider resetting queries on setUp/tearDown to ensure queries are reset for each test instead of doing it on finally clause, but this way is more explicit (although more verbose), or you can use reset_queries in the try clause as many times as you need to evaluate queries counting from 0.
Here is the working prototype of context manager withAssertNumQueriesLessThan
import json
from contextlib import contextmanager
from django.test.utils import CaptureQueriesContext
from django.db import connections
#contextmanager
def withAssertNumQueriesLessThan(self, value, using='default', verbose=False):
with CaptureQueriesContext(connections[using]) as context:
yield # your test will be run here
if verbose:
msg = "\r\n%s" % json.dumps(context.captured_queries, indent=4)
else:
msg = None
self.assertLess(len(context.captured_queries), value, msg=msg)
It can be simply used in your unit tests for example for checking the number of queries per Django REST API call
with self.withAssertNumQueriesLessThan(10):
response = self.client.get('contacts/')
self.assertEqual(response.status_code, 200)
Also you can provide exact DB using and verbose if you want to pretty-print list of actual queries to stdout
If you have DEBUG set to True in your settings.py (presumably so in your test environment) then you can count queries executed in your test as follows:
from django.db import connection
class DoSomethingTests(django.test.TestCase):
def test_something_or_other(self):
num_queries_old = len(connection.queries)
do_something_in_the_database()
num_queries_new = len(connection.queries)
self.assertEqual(n, num_queries_new - num_queries_old)
If you want to use a decorator for that there is a nice gist:
import functools
import sys
import re
from django.conf import settings
from django.db import connection
def shrink_select(sql):
return re.sub("^SELECT(.+)FROM", "SELECT .. FROM", sql)
def shrink_update(sql):
return re.sub("SET(.+)WHERE", "SET .. WHERE", sql)
def shrink_insert(sql):
return re.sub("\((.+)\)", "(..)", sql)
def shrink_sql(sql):
return shrink_update(shrink_insert(shrink_select(sql)))
def _err_msg(num, expected_num, verbose, func=None):
func_name = "%s:" % func.__name__ if func else ""
msg = "%s Expected number of queries is %d, actual number is %d.\n" % (func_name, expected_num, num,)
if verbose > 0:
queries = [query['sql'] for query in connection.queries[-num:]]
if verbose == 1:
queries = [shrink_sql(sql) for sql in queries]
msg += "== Queries == \n" +"\n".join(queries)
return msg
def assertNumQueries(expected_num, verbose=1):
class DecoratorOrContextManager(object):
def __call__(self, func): # decorator
#functools.wraps(func)
def inner(*args, **kwargs):
handled = False
try:
self.__enter__()
return func(*args, **kwargs)
except:
self.__exit__(*sys.exc_info())
handled = True
raise
finally:
if not handled:
self.__exit__(None, None, None)
return inner
def __enter__(self):
self.old_debug = settings.DEBUG
self.old_query_count = len(connection.queries)
settings.DEBUG = True
def __exit__(self, type, value, traceback):
if not type:
num = len(connection.queries) - self.old_query_count
assert expected_num == num, _err_msg(num, expected_num, verbose)
settings.DEBUG = self.old_debug
return DecoratorOrContextManager()