Why don't my Django unittests know that MessageMiddleware is installed? - django

I'm working on a Django project and am writing unittests for it. However, in a test, when I try and log a user in, I get this error:
MessageFailure: You cannot add messages without installing django.contrib.messages.middleware.MessageMiddleware
Logging in on the actual site works fine -- and a login message is displayed using the MessageMiddleware.
In my tests, if I do this:
from django.conf import settings
print settings.MIDDLEWARE_CLASSES
Then it outputs this:
('django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware')
Which appears to show the MessageMiddleware is installed when tests are run.
Is there an obvious step I'm missing?
UPDATE
After suggestions below, it does look like it's a settings thing.
I currently have settings/__init__.py like this:
try:
from settings.development import *
except ImportError:
pass
and settings/defaults.py containing most of the standard settings (including MIDDLEWARE_CLASSES). And then settings.development.py overrides some of those defaults like this:
from defaults import *
DEBUG = True
# etc
It looks like my dev site itself works fine, using the development settings. But although the tests seem to load the settings OK (both defaults and development) settings.DEBUG is set to False. I don't know why, or whether that's the cause of the problem.

Django 1.4 has a expected behavior when you create the request with RequestFactory that can trigger this error.
To resolve this issue, create your request with RequestFactory and do this:
from django.contrib.messages.storage.fallback import FallbackStorage
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
Works for me!

A way to solve this quite elegant is to mock the messages module using mock
Say you have a class based view named FooView in app named myapp
from django.contrib import messages
from django.views.generic import TemplateView
class FooView(TemplateView):
def post(self, request, *args, **kwargs):
...
messages.add_message(request, messages.SUCCESS, '\o/ Profit \o/')
...
You now can test it with
def test_successful_post(self):
mock_messages = patch('myapp.views.FooView.messages').start()
mock_messages.SUCCESS = success = 'super duper'
request = self.rf.post('/', {})
view = FooView.as_view()
response = view(request)
msg = _(u'\o/ Profit \o/')
mock_messages.add_message.assert_called_with(request, success, msg)

In my case (django 1.8) this problem occurs in when unit-test calls signal handler for user_logged_in signal, looks like messages app has not been called, i.e. request._messages is not yet set. This fails:
from django.contrib.auth.signals import user_logged_in
...
#receiver(user_logged_in)
def user_logged_in_handler(sender, user, request, **kwargs):
...
messages.warning(request, "user has logged in")
the same call to messages.warning in normal view function (that is called after) works without any issues.
A workaround I based on one of the suggestions from https://code.djangoproject.com/ticket/17971, use fail_silently argument only in signal handler function, i.e. this solved my problem:
messages.warning(request, "user has logged in",
fail_silently=True )

Do you only have one settings.py?

Tests create custom (tests) database. Maybe you have no messages there or something... Maybe you need setUp() fixtures or something?
Need more info to answer properly.
Why not simply do something like ? You sure run tests in debug mode right?
# settings.py
DEBUG = True
from django.conf import settings
# where message is sent:
if not settings.DEBUG:
# send your message ...

This builds on Tarsis Azevedo's answer by creating a MessagingRequest helper class below.
Given say a KittenAdmin I'd want to get 100% test coverage for:
from django.contrib import admin, messages
class KittenAdmin(admin.ModelAdmin):
def warm_fuzzy_method(self, request):
messages.warning(request, 'Can I haz cheezburger?')
I created a MessagingRequest helper class to use in say a test_helpers.py file:
from django.contrib.messages.storage.fallback import FallbackStorage
from django.http import HttpRequest
class MessagingRequest(HttpRequest):
session = 'session'
def __init__(self):
super(MessagingRequest, self).__init__()
self._messages = FallbackStorage(self)
def get_messages(self):
return getattr(self._messages, '_queued_messages')
def get_message_strings(self):
return [str(m) for m in self.get_messages()]
Then in a standard Django tests.py:
from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from cats.kitten.admin import KittenAdmin
from cats.kitten.models import Kitten
from cats.kitten.test_helpers import MessagingRequest
class KittenAdminTest(TestCase):
def test_kitten_admin_message(self):
admin = KittenAdmin(model=Kitten, admin_site=AdminSite())
expect = ['Can I haz cheezburger?']
request = MessagingRequest()
admin.warm_fuzzy_method(request)
self.assertEqual(request.get_message_strings(), expect)
Results:
coverage run --include='cats/kitten/*' manage.py test; coverage report -m
Creating test database for alias 'default'...
.
----------------------------------------------------------------------
Ran 1 test in 0.001s
OK
Destroying test database for alias 'default'...
Name Stmts Miss Cover Missing
----------------------------------------------------------------------
cats/kitten/__init__.py 0 0 100%
cats/kitten/admin.py 4 0 100%
cats/kitten/migrations/0001_initial.py 5 0 100%
cats/kitten/migrations/__init__.py 0 0 100%
cats/kitten/models.py 3 0 100%
cats/kitten/test_helpers.py 11 0 100%
cats/kitten/tests.py 12 0 100%
----------------------------------------------------------------------
TOTAL 35 0 100%

This happened to me in the login_callback signal receiver function when called from a unit test, and the way around the problem was:
from django.contrib.messages.storage import default_storage
#receiver(user_logged_in)
def login_callback(sender, user, request, **kwargs):
if not hasattr(request, '_messages'): # fails for tests
request._messages = default_storage(request)
Django 2.0.x

I found when I had a problem patching messages the solution was to patch the module from within the class under test (obsolete Django version BTW, YMMV). Pseudocode follows.
my_module.py:
from django.contrib import messages
class MyClass:
def help(self):
messages.add_message(self.request, messages.ERROR, "Foobar!")
test_my_module.py:
from unittest import patch, MagicMock
from my_module import MyClass
class TestMyClass(TestCase):
def test_help(self):
with patch("my_module.messages") as mock_messages:
mock_messages.add_message = MagicMock()
MyClass().help() # shouldn't complain about middleware

If you're seeing a problem in your Middleware, then you're not doing "Unit Test". Unit tests test a unit of functionality. If you interact with other parts of your system, you're making something called "integration" testing.
You should try to write better tests, and this kind of problems shouldn't arise. Try RequestFactory. ;)
def test_some_view(self):
factory = RequestFactory()
user = get_mock_user()
request = factory.get("/my/view")
request.user = user
response = my_view(request)
self.asssertEqual(status_code, 200)

Related

Why does mock patch only work when running specific test and not whole test suite?

I'm using Django and Pytest specifically to run the test suite and am trying to test that a specific form shows up with expected data when a user hits the site (integration test).
This particular view uses a stored procedure, which I am mocking since the test would never have access to that.
My test code looks like this:
#test_integrations.py
from my_app.tests.data_setup import setup_data, setup_sb7_data
from unittest.mock import patch
...
# Setup to use a non-headless browser so we can see whats happening for debugging
#pytest.mark.usefixtures("standard_browser")
class SeniorPageTestCase(StaticLiveServerTestCase):
"""
These tests surround the senior form
"""
#classmethod
def setUpClass(cls):
cls.host = socket.gethostbyname(socket.gethostname())
super(SeniorPageTestCase, cls).setUpClass()
def setUp(self):
# setup the dummy data - this works fine
basic_setup(self)
# setup the 'results'
self.sb7_mock_data = setup_sb7_data(self)
#patch("my_app.utils.get_employee_sb7_data")
def test_senior_form_displays(self, mock_sb7_get):
# login the dummy user we created
login_user(self, "futureuser")
# setup the results
mock_sb7_get.return_value = self.sb7_mock_data
# hit the page for the form
self.browser.get(self.live_server_url + "/my_app/senior")
form_id = "SeniorForm"
# assert that the form displays on the page
self.assertTrue(self.browser.find_element_by_id(form_id))
# utils.py
from django.conf import settings
from django.db import connections
def get_employee_sb7_data(db_name, user_number, window):
"""
Executes the stored procedure for getting employee data
Args:
user_number: Takes the user_number
db (db connection): Takes a string of the DB to connect to
Returns:
"""
cursor = connections[db_name].cursor()
cursor.execute(
'exec sp_sb7 %s, "%s"' % (user_number, window.senior_close)
)
columns = [col[0] for col in cursor.description]
results = [dict(zip(columns, row)) for row in cursor.fetchall()]
return results
# views.py
from myapp.utils import (
get_employee_sb7_data,
)
...
###### Senior ######
#login_required
#group_required("user_senior")
def senior(request):
# Additional Logic / Getting Other Models here
# Execute stored procedure to get data for user
user_number = request.user.user_no
results = get_employee_sb7_data("production_db", user_number, window)
if not results:
return render(request, "users/senior_not_required.html")
# Additional view stuff
return render(
request,
"users/senior.html",
{
"data": data,
"form": form,
"results": results,
},
)
If I run this test itself with:
pytest my_app/tests/test_integrations.py::SeniorPageTestCase
The tests pass without issue. The browser shows up - the form shows up with the dummy data as we would expect and it all works.
However, if I run:
pytest my_app
All other tests run and pass - but all the tests in this class fail because it's not patching the function.
It tries to call the actual stored procedure (which fails because it's not on the production server yet) and it fails.
Why would it patch correctly when I call that TestCase specifically - but not patch correctly when I just run pytest on the app or project level?
I'm at a loss and not sure how to debug this very well. Any help is appreciated
So what's happening is that your views are imported before you're patching.
Let's first see the working case:
pytest imports the test_integrations file
the test is executed and patch decorator's inner function is run
there is no import of the utils yet and so patch imports and replaces the function
test body is executed, which passes a url to the test client
the test client imports the resolver and in turn it imports the views, which imports the utils.
Since the utils are already patched, everything works fine
If another test case runs first, that also imports the same views, then that import wins and patch cannot replace the import.
Your solution is to reference the same symbol. So in test_integrations.py:
#patch("myapp.views.get_employee_sb7_data")

Testing Django Rest Framework: how to test hyperlink relations?

I'm trying to create a true unit test for a customized DjangoRestFramework Hyperlinked related field. But I cannot seem to get around this error:
django.core.exceptions.ImproperlyConfigured: Could not resolve URL for hyperlinked relationship using view name "relatedtestmodel-detail". You may have failed to include the related model in your API, or incorrectly configured the `lookup_field` attribute on this field.
And here is the unit test, stripped down to simplify the example:
from django.conf.urls import url
from django.test import TestCase, override_settings
from api_tests.models import APITestModel, RelatedTestModel
from api_tests.serializers import APITestModelSerializer
def dummy_view(request, pk):
pass
urlpatterns = [
url(r'/path/is/irrelevant/', dummy_view, name='relatedtestmodel-detail')
]
#override_settings(ROOT_URLCONF='tests.test_relations')
class HyperlinkedRelatedFieldTestCase(TestCase):
def setUp(self):
self.parent = APITestModel.objects.create()
self.child = RelatedTestModel.objects.create(parent=self.parent)
assert self.child.parent.id == self.parent.id
def test_to_internal_value_correct_error_message(self):
queryset = APITestModel.objects.all()
serializer = APITestModelSerializer(queryset, many=True, context={'request': None})
expected = [{'foo': 'bar'}]
self.assertEqual(serializer.data, expected)
I more or less lifted the test from https://github.com/encode/django-rest-framework/blob/master/tests/test_relations_hyperlink.py, because I figured who knows best how to unit test DRF than the makers of DRF? But as it stands, my test refuses to run. The error is raised during the test, when I attempt to access serializer.data for the assert.
Notice in particular that I override the settings with a custom urlpatterns (which is this same file, hence the urlpatterns at the top). So I don't understand why DRF thinks that url name doesn't exist - I can clearly see that my url conf in fact has ONLY that view name! I've even gone so far as to edit my actual URL conf and replace it with the single, named, dummy url pattern shown here, and removedthe settings override, just to be sure that it wasn't that the override_settings simply wasn't working, but even then I get the same error.
To my eye, the dummy url pattern is exactly the same as how DRF did it in their tests. Anyone have any ideas what is going on?
A bit more requested context:
api_tests.models:
from django.db import models
class APITestModel(models.Model):
pass
class RelatedTestModel(models.Model):
parent = models.ForeignKey(
APITestModel,
related_name='children',
related_query_name='child'
)
I do not have access to the traceback at this time, but I can confirm it did not pass through any of my code - it was all isolated to the DjangoRestFramework code, basically exclusively relations.py
Preamble
A few things this question is lacking
No definition of APITestModelSerializer
RelatedTestModel is not used in the test and therefore irrelevant for the example
No error stacktrace
No "useful" expected dict for the asserts
APITestModel has no fields so it can't be serialized (your test shouldn't even have run)
Minor things but still relevant
You are creating specific instances of APITestModel and RelatedTestModel in the setUp but in the test you serialize all instances of APITestModel
The line assert self.child.parent.id == self.parent.id should not be in the setUp. It should be in a separate test
My changes
I deleted all irrelevant information for this question mentioned above
I added an integer field to APITestModel
I changed the urlpatterns element from url(...) to path(...)
I added a regex to the relative path
The serializer is a subclass of HyperlinkedModelSerializer and includes fields "url" and "year"
My project and app urls.py files are the "stock" ones (not shown here) to emphasize that this test resolves the path in isolation.
Changed #override_settings(ROOT_URLCONF='tests.test_relations') to #override_settings(ROOT_URLCONF=__name__)
Code
models.py
from django.db import models
class APITestModel(models.Model):
year = models.IntegerField(null=False)
serializers.py
from rest_framework import serializers
from api.models import APITestModel
class APITestModelSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = APITestModel
fields = ["url", "year"]
test_serializers.py
from django.test import TestCase, override_settings
from django.urls import path
from api.models import APITestModel
from api.serializers import APITestModelSerializer
urlpatterns = [
path('whateveryouwant/<int:pk>/', lambda request: None, name='apitestmodel-detail'),
]
#override_settings(ROOT_URLCONF=__name__)
class HyperlinkedRelatedFieldTestCase(TestCase):
def setUp(self):
# Populate db with APITestModel instances
_ = APITestModel.objects.create(year=1960)
_ = APITestModel.objects.create(year=1961)
_ = APITestModel.objects.create(year=1962)
def test_to_internal_value_correct_error_message(self):
queryset = APITestModel.objects.all()
serializer = APITestModelSerializer(queryset, many=True, context={'request': None})
expected = [
{'url': '/whateveryouwant/1/', 'year': 1960},
{'url': '/whateveryouwant/2/', 'year': 1961},
{'url': '/whateveryouwant/3/', 'year': 1962},
]
self.assertEqual(serializer.data, expected)
The other files in the project are the default ones created automatically by django + djangorestframework.
For future readers, I created a github project with this working code and can be found here: https://github.com/Alechan/drf_test_hyperlink_relations

Django unittest with legacy database connection

I have a Django project that pulls data from legacy database (read only connection) into its own database, and when I run integration tests, it tries to read from test_account on legacy connection.
(1049, "Unknown database 'test_account'")
Is there a way to tell Django to leave the legacy connection alone for reading from the test database?
I actually wrote something that lets you create integration test in djenga (available on pypi) if you want to take a look at how to create a separate integration test framework.
Here is the test runner I use when using the django unit test framework:
from django.test.runner import DiscoverRunner
from django.apps import apps
import sys
class UnManagedModelTestRunner(DiscoverRunner):
"""
Test runner that uses a legacy database connection for the duration of the test run.
Many thanks to the Caktus Group: https://www.caktusgroup.com/blog/2013/10/02/skipping-test-db-creation/
"""
def __init__(self, *args, **kwargs):
super(UnManagedModelTestRunner, self).__init__(*args, **kwargs)
self.unmanaged_models = None
self.test_connection = None
self.live_connection = None
self.old_names = None
def setup_databases(self, **kwargs):
# override keepdb so that we don't accidentally overwrite our existing legacy database
self.keepdb = True
# set the Test DB name to the current DB name, which makes this more of an
# integration test, but HEY, at least it's a start
DATABASES['legacy']['TEST'] = { 'NAME': DATABASES['legacy']['NAME'] }
result = super(UnManagedModelTestRunner, self).setup_databases(**kwargs)
return result
# Set Django's test runner to the custom class defined above
TEST_RUNNER = 'config.settings.test_settings.UnManagedModelTestRunner'
TEST_NON_SERIALIZED_APPS = [ 'legacy_app' ]
from django.test import TestCase, override_settings
#override_settings(LOGIN_URL='/other/login/')
class LoginTestCase(TestCase):
def test_login(self):
response = self.client.get('/sekrit/')
self.assertRedirects(response, '/other/login/?next=/sekrit/')
https://docs.djangoproject.com/en/1.10/topics/testing/tools/
You should theoretically be able to use the override settings here and switch to a dif

How to persist data to DB between tests with pytest-django?

How can I persist data to DB when using pytest/pytest-django in a test-run of a Django application?
I run pytest with py.test --nomigrations --reuse-db -s and the Postgres DB test_<configued_db_name> is created as expected, however nothing seems to be persisted to DB between tests and at the end of the test run the DB is empty.
import pytest
from django.contrib.auth.models import User
#pytest.mark.django_db(transaction=False)
def test_insert_user():
user = User.objects.create_user(username="test_user", email="test_user#test.com", password="test")
users = User.objects.all()
assert len(users) > 0
#pytest.mark.django_db(transaction=False)
def test_check_user():
users = User.objects.all()
assert len(users) > 0
The first test passes, the second does not making me wonder if anything is persisted to DB at all. According to the pystest-django documentation #pytest.mark.django_db(transaction=False) will not rollback whatever has been affected by the decorated test.
Thank you,
/David
Another way of prefilling the database with data for each function is like that:
import pytest
from django.contrib.auth.models import User
#pytest.fixture(scope='module')
def django_db_setup(django_db_setup, django_db_blocker):
print('setup')
with django_db_blocker.unblock():
User.objects.create(username='a')
assert set(u.username for u in User.objects.all()) == {'a'}
#pytest.mark.django_db
def test1():
print('test1')
User.objects.create(username='b')
assert set(u.username for u in User.objects.all()) == {'a', 'b'}
#pytest.mark.django_db
def test2():
print('test2')
User.objects.create(username='c')
assert set(u.username for u in User.objects.all()) == {'a', 'c'}
The good thing about this method is that the setup function is only called once:
plugins: django-3.1.2
collected 2 items
mytest.py setup
test1
.test2
.
=================== 2 passed in 1.38 seconds ====================
The bad thing is that 1.38 seconds is a bit too much for such a simple test. --reuse-db is a faster way to do it.
I have solved this problem -- prefill the DB for every function -- by defining a fixture with scope function (i.e. model and session will not work).
Here is the code for testing the views in Django.
# This is used to fill the database more easily
from mixer.backend.django import mixer
import pytest
from django.test import RequestFactory
from inventory import views
from inventory import services
pytestmark = pytest.mark.django_db
#pytest.fixture(scope="function")
def fill_db():
""" Just filling the DB with my data """
for elem in services.Search().get_lookup_data():
mixer.blend('inventory.Enumeration', **elem)
def test_grid_anonymous(fill_db):
request = RequestFactory().get('/grid/')
response = views.items_grid(request)
assert response.status_code == 200, \
"Should be callable by anyone"
def test_list_anonymous(fill_db):
request = RequestFactory().get('/')
response = views.items_list(request)
assert response.status_code == 200, \
"Should be callable by anyone"

Django: is there a way to count SQL queries from an unit test?

I am trying to find out the number of queries executed by a utility function. I have written a unit test for this function and the function is working well. What I would like to do is track the number of SQL queries executed by the function so that I can see if there is any improvement after some refactoring.
def do_something_in_the_database():
# Does something in the database
# return result
class DoSomethingTests(django.test.TestCase):
def test_function_returns_correct_values(self):
self.assertEqual(n, <number of SQL queries executed>)
EDIT: I found out that there is a pending Django feature request for this. However the ticket is still open. In the meantime is there another way to go about this?
Since Django 1.3 there is a assertNumQueries available exactly for this purpose.
One way to use it (as of Django 3.2) is as a context manager:
# measure queries of some_func and some_func2
with self.assertNumQueries(2):
result = some_func()
result2 = some_func2()
Vinay's response is correct, with one minor addition.
Django's unit test framework actually sets DEBUG to False when it runs, so no matter what you have in settings.py, you will not have anything populated in connection.queries in your unit test unless you re-enable debug mode. The Django docs explain the rationale for this as:
Regardless of the value of the DEBUG setting in your configuration file, all Django tests run with DEBUG=False. This is to ensure that the observed output of your code matches what will be seen in a production setting.
If you're certain that enabling debug will not affect your tests (such as if you're specifically testing DB hits, as it sounds like you are), the solution is to temporarily re-enable debug in your unit test, then set it back afterward:
def test_myself(self):
from django.conf import settings
from django.db import connection
settings.DEBUG = True
connection.queries = []
# Test code as normal
self.assert_(connection.queries)
settings.DEBUG = False
If you are using pytest, pytest-django has django_assert_num_queries fixture for this purpose:
def test_queries(django_assert_num_queries):
with django_assert_num_queries(3):
Item.objects.create('foo')
Item.objects.create('bar')
Item.objects.create('baz')
If you don't want use TestCase (with assertNumQueries) or change settings to DEBUG=True, you can use context manager CaptureQueriesContext (same as assertNumQueries using).
from django.db import ConnectionHandler
from django.test.utils import CaptureQueriesContext
DB_NAME = "default" # name of db configured in settings you want to use - "default" is standard
connection = ConnectionHandler()[DB_NAME]
with CaptureQueriesContext(connection) as context:
... # do your thing
num_queries = context.initial_queries - context.final_queries
assert num_queries == expected_num_queries
db settings
In modern Django (>=1.8) it's well documented (it's also documented for 1.7) here, you have the method reset_queries instead of assigning connection.queries=[] which indeed is raising an error, something like that works on django>=1.8:
class QueriesTests(django.test.TestCase):
def test_queries(self):
from django.conf import settings
from django.db import connection, reset_queries
try:
settings.DEBUG = True
# [... your ORM code ...]
self.assertEquals(len(connection.queries), num_of_expected_queries)
finally:
settings.DEBUG = False
reset_queries()
You may also consider resetting queries on setUp/tearDown to ensure queries are reset for each test instead of doing it on finally clause, but this way is more explicit (although more verbose), or you can use reset_queries in the try clause as many times as you need to evaluate queries counting from 0.
Here is the working prototype of context manager withAssertNumQueriesLessThan
import json
from contextlib import contextmanager
from django.test.utils import CaptureQueriesContext
from django.db import connections
#contextmanager
def withAssertNumQueriesLessThan(self, value, using='default', verbose=False):
with CaptureQueriesContext(connections[using]) as context:
yield # your test will be run here
if verbose:
msg = "\r\n%s" % json.dumps(context.captured_queries, indent=4)
else:
msg = None
self.assertLess(len(context.captured_queries), value, msg=msg)
It can be simply used in your unit tests for example for checking the number of queries per Django REST API call
with self.withAssertNumQueriesLessThan(10):
response = self.client.get('contacts/')
self.assertEqual(response.status_code, 200)
Also you can provide exact DB using and verbose if you want to pretty-print list of actual queries to stdout
If you have DEBUG set to True in your settings.py (presumably so in your test environment) then you can count queries executed in your test as follows:
from django.db import connection
class DoSomethingTests(django.test.TestCase):
def test_something_or_other(self):
num_queries_old = len(connection.queries)
do_something_in_the_database()
num_queries_new = len(connection.queries)
self.assertEqual(n, num_queries_new - num_queries_old)
If you want to use a decorator for that there is a nice gist:
import functools
import sys
import re
from django.conf import settings
from django.db import connection
def shrink_select(sql):
return re.sub("^SELECT(.+)FROM", "SELECT .. FROM", sql)
def shrink_update(sql):
return re.sub("SET(.+)WHERE", "SET .. WHERE", sql)
def shrink_insert(sql):
return re.sub("\((.+)\)", "(..)", sql)
def shrink_sql(sql):
return shrink_update(shrink_insert(shrink_select(sql)))
def _err_msg(num, expected_num, verbose, func=None):
func_name = "%s:" % func.__name__ if func else ""
msg = "%s Expected number of queries is %d, actual number is %d.\n" % (func_name, expected_num, num,)
if verbose > 0:
queries = [query['sql'] for query in connection.queries[-num:]]
if verbose == 1:
queries = [shrink_sql(sql) for sql in queries]
msg += "== Queries == \n" +"\n".join(queries)
return msg
def assertNumQueries(expected_num, verbose=1):
class DecoratorOrContextManager(object):
def __call__(self, func): # decorator
#functools.wraps(func)
def inner(*args, **kwargs):
handled = False
try:
self.__enter__()
return func(*args, **kwargs)
except:
self.__exit__(*sys.exc_info())
handled = True
raise
finally:
if not handled:
self.__exit__(None, None, None)
return inner
def __enter__(self):
self.old_debug = settings.DEBUG
self.old_query_count = len(connection.queries)
settings.DEBUG = True
def __exit__(self, type, value, traceback):
if not type:
num = len(connection.queries) - self.old_query_count
assert expected_num == num, _err_msg(num, expected_num, verbose)
settings.DEBUG = self.old_debug
return DecoratorOrContextManager()