nose #with_setup settings fail to be used in the test - flask

using nose to test a flask app. I'm trying to use the with_setup decorator to make my test DRY, without having to repeat the setup every test function. but it doesn't seem to run the #with_setup phase. As the docs state I'm using it with test functions and not test classes. some code:
from flask import *
from app import app
from nose.tools import eq_, assert_true
from nose import with_setup
testapp = app.test_client()
def setup():
app.config['TESTING'] = True
RUNNING_LOCAL = True
RUN_FOLDER = os.path.dirname(os.path.realpath(__file__))
fixture = {'html_hash':'aaaa'} #mocking the hash
def teardown():
app.config['TESTING'] = False
RUNNING_LOCAL = False
#with_setup(setup, teardown)
def test_scrape_wellformed_html():
#RUN_FOLDER = os.path.dirname(os.path.realpath(__file__)) #if it is here instead of inside #with_setup the code works..
#fixture = {'html_hash':'aaaa'} #mocking the hash #if it is here the code works
fixture['gush_id'] = 'current.fixed'
data = scrape_gush(fixture, RUN_FOLDER)
various assertions
for example if I create the fixture dict inside the #with_setup block, instead of inside the specific test method (and in everyone of them) I'll get a NameError (or something similar)
I guess I'm missing something, just not sure what.
Thanks for the help!

The issue is that the names RUN_FOLDER and fixture scoped to the function setup and so will not be available to test_scrape_wellformed_html. If you look at the code for with_setup you will see that it does not do anything to alter the run function's environment.
In order to do what you want to do you need to make your fixtures global variables:
testapp = app.test_client()
RUN_FOLDER = os.path.dirname(os.path.realpath(__file__))
fixture = None
def setup():
global fixture
app.config['TESTING'] = True
fixture = {'html_hash':'aaaa'} #mocking the hash
def teardown():
global fixture
app.config['TESTING'] = False
fixture = None
#with_setup(setup, teardown)
def test_scrape_wellformed_html():
# run test here

Related

how to keep data created in the ready method? Django production vs test database

As you know django give you clear database in testing, but I have a ready() method that create some data for me and I need to query these data in my tests.
class YourAppConfig(AppConfig):
default_auto_field = 'django.db.models.AutoField'
name = 'Functions.MyAppsConfig'
def ready(self):
from django.contrib.auth.models import Permission
from django import apps
from django.contrib.contenttypes.models import ContentType
try:
Permission.objects.get_or_create(....)
MyOtherModel.objects.get_or_create(....)
except:
pass
class TestRules(APITestCase):
def test_my_model(self):
....
x = MyOtherModel.objects.filter(....).first()
# x = None # <=========== problem is here ========= I need to see the data that I created in the ready method
....
You can use the fixtures for that, in each Test case you can fixtures to it as stated documentation example is
class Test(TransactionTestCase):
fixtures = ['user-data.json']
def setUp():
…
Django will load the fixtures before under test case

How to mock redis for Django tests

I am trying to mock out redis in my Django application. I have tried several different methods but none seem to work. What am I doing wrong?
My primary redis instance is called with:
redis_client = redis.from_url(os.environ.get("REDIS_URL"))
That instance is imported in other parts of the app in order to add and retrieve data.
In my tests I tried doing:
import fakeredis
from mock import patch
class TestViews(TestCase):
def setUp(self):
redis_patcher = patch('redis.Redis', fakeredis.FakeRedis)
self.redis = redis_patcher.start()
self.redis.set('UPDATE', 'Spring')
print(redis_client.get('UPDATE'))
def tearDown(self):
self.redis_patcher.stop
When running the tests I want the 'UPDATE' variable to be set. But instead every instance of redis_client fails saying the server is not available. How can I mock out redis and set values, so that they are available when testing my app?
You should mock an item where it is used, not where it came from.
So if redis_client is used in a view like this:
myapp/views.py
from somemodule import redis_client
def some_view_that_uses_redis(request):
result = redis_client(...)
Then in your TestViews you should patch redis_client like this:
class TestViews(TestCase):
def setUp(self):
redis_patcher = patch('myapp.views.redis_client', fakeredis.FakeRedis)
self.redis = redis_patcher.start()

Django unittest with legacy database connection

I have a Django project that pulls data from legacy database (read only connection) into its own database, and when I run integration tests, it tries to read from test_account on legacy connection.
(1049, "Unknown database 'test_account'")
Is there a way to tell Django to leave the legacy connection alone for reading from the test database?
I actually wrote something that lets you create integration test in djenga (available on pypi) if you want to take a look at how to create a separate integration test framework.
Here is the test runner I use when using the django unit test framework:
from django.test.runner import DiscoverRunner
from django.apps import apps
import sys
class UnManagedModelTestRunner(DiscoverRunner):
"""
Test runner that uses a legacy database connection for the duration of the test run.
Many thanks to the Caktus Group: https://www.caktusgroup.com/blog/2013/10/02/skipping-test-db-creation/
"""
def __init__(self, *args, **kwargs):
super(UnManagedModelTestRunner, self).__init__(*args, **kwargs)
self.unmanaged_models = None
self.test_connection = None
self.live_connection = None
self.old_names = None
def setup_databases(self, **kwargs):
# override keepdb so that we don't accidentally overwrite our existing legacy database
self.keepdb = True
# set the Test DB name to the current DB name, which makes this more of an
# integration test, but HEY, at least it's a start
DATABASES['legacy']['TEST'] = { 'NAME': DATABASES['legacy']['NAME'] }
result = super(UnManagedModelTestRunner, self).setup_databases(**kwargs)
return result
# Set Django's test runner to the custom class defined above
TEST_RUNNER = 'config.settings.test_settings.UnManagedModelTestRunner'
TEST_NON_SERIALIZED_APPS = [ 'legacy_app' ]
from django.test import TestCase, override_settings
#override_settings(LOGIN_URL='/other/login/')
class LoginTestCase(TestCase):
def test_login(self):
response = self.client.get('/sekrit/')
self.assertRedirects(response, '/other/login/?next=/sekrit/')
https://docs.djangoproject.com/en/1.10/topics/testing/tools/
You should theoretically be able to use the override settings here and switch to a dif

Integrationtest of scrapy pipeline returning deferred

Is it possible to create a integration test of a scrapy-pipeline? I can't figure out how to do this. In particular I am trying to write a test for the FilesPipeline and I also want it to persist my mocked response to Amazon S3.
Here is my test:
def _mocked_download_func(request, info):
return Response(url=response.url, status=200, body="test", request=request)
class FilesPipelineTests(unittest.TestCase):
def setUp(self):
self.settings = get_project_settings()
crawler = Crawler(self.settings)
crawler.configure()
self.pipeline = FilesPipeline.from_crawler(crawler)
self.pipeline.open_spider(None)
self.pipeline.download_func = _mocked_download_func
#defer.inlineCallbacks
def test_file_should_be_directly_available_from_s3_when_processed(self):
item = CrawlResult()
item['id'] = "test"
item['file_urls'] = ['http://localhost/test']
result = yield self.pipeline.process_item(item, None)
self.assertEquals(result['files'][0]['path'], "full/002338a87aab86c6b37ffa22100504ad1262f21b")
I always run into the following error:
DirtyReactorAggregateError: Reactor was unclean.
How do I create a proper test using twisted and scrapy?
Up do now I did my pipeline tests without the call to from_crawler, so they are not ideal, because they do not test the functionality of from_crawler, but they work.
I do them by using an empty Spider instance:
from scrapy.spiders import Spider
# some other imports for my own stuff and standard libs
#pytest.fixture
def mqtt_client():
client = mock.Mock()
return client
def test_mqtt_pipeline_does_return_item_after_process(mqtt_client):
spider = Spider(name='spider')
pipeline = MqttOutputPipeline(mqtt_client, 'dummy-namespace')
item = BasicItem()
item['url'] = 'http://example.com/'
item['source'] = 'dummy source'
ret = pipeline.process_item(item, spider)
assert ret is not None
(in fact, I forgot to call open_spider())
You can also have a look at how scrapy itself does the testing of pipelines, e.g. for MediaPipeline:
class BaseMediaPipelineTestCase(unittest.TestCase):
pipeline_class = MediaPipeline
settings = None
def setUp(self):
self.spider = Spider('media.com')
self.pipe = self.pipeline_class(download_func=_mocked_download_func,
settings=Settings(self.settings))
self.pipe.open_spider(self.spider)
self.info = self.pipe.spiderinfo
def test_default_media_to_download(self):
request = Request('http://url')
assert self.pipe.media_to_download(request, self.info) is None
You can also have a look through their other unit tests. For me, these are always good inspiration on how to unit test scrapy components.
If you want to test the from_crawler function, too, you could have a look on their Middleware tests. In these tests, they often use from_crawler to create middlewares, e.g. for OffsiteMiddleware.
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
class TestOffsiteMiddleware(TestCase):
def setUp(self):
crawler = get_crawler(Spider)
self.spider = crawler._create_spider(**self._get_spiderargs())
self.mw = OffsiteMiddleware.from_crawler(crawler)
self.mw.spider_opened(self.spider)
I assume the key component here is to call get_crawler from scrapy.utils.test. Seems they factored out some calls you need to do in order to have a testing environment.

Django: is there a way to count SQL queries from an unit test?

I am trying to find out the number of queries executed by a utility function. I have written a unit test for this function and the function is working well. What I would like to do is track the number of SQL queries executed by the function so that I can see if there is any improvement after some refactoring.
def do_something_in_the_database():
# Does something in the database
# return result
class DoSomethingTests(django.test.TestCase):
def test_function_returns_correct_values(self):
self.assertEqual(n, <number of SQL queries executed>)
EDIT: I found out that there is a pending Django feature request for this. However the ticket is still open. In the meantime is there another way to go about this?
Since Django 1.3 there is a assertNumQueries available exactly for this purpose.
One way to use it (as of Django 3.2) is as a context manager:
# measure queries of some_func and some_func2
with self.assertNumQueries(2):
result = some_func()
result2 = some_func2()
Vinay's response is correct, with one minor addition.
Django's unit test framework actually sets DEBUG to False when it runs, so no matter what you have in settings.py, you will not have anything populated in connection.queries in your unit test unless you re-enable debug mode. The Django docs explain the rationale for this as:
Regardless of the value of the DEBUG setting in your configuration file, all Django tests run with DEBUG=False. This is to ensure that the observed output of your code matches what will be seen in a production setting.
If you're certain that enabling debug will not affect your tests (such as if you're specifically testing DB hits, as it sounds like you are), the solution is to temporarily re-enable debug in your unit test, then set it back afterward:
def test_myself(self):
from django.conf import settings
from django.db import connection
settings.DEBUG = True
connection.queries = []
# Test code as normal
self.assert_(connection.queries)
settings.DEBUG = False
If you are using pytest, pytest-django has django_assert_num_queries fixture for this purpose:
def test_queries(django_assert_num_queries):
with django_assert_num_queries(3):
Item.objects.create('foo')
Item.objects.create('bar')
Item.objects.create('baz')
If you don't want use TestCase (with assertNumQueries) or change settings to DEBUG=True, you can use context manager CaptureQueriesContext (same as assertNumQueries using).
from django.db import ConnectionHandler
from django.test.utils import CaptureQueriesContext
DB_NAME = "default" # name of db configured in settings you want to use - "default" is standard
connection = ConnectionHandler()[DB_NAME]
with CaptureQueriesContext(connection) as context:
... # do your thing
num_queries = context.initial_queries - context.final_queries
assert num_queries == expected_num_queries
db settings
In modern Django (>=1.8) it's well documented (it's also documented for 1.7) here, you have the method reset_queries instead of assigning connection.queries=[] which indeed is raising an error, something like that works on django>=1.8:
class QueriesTests(django.test.TestCase):
def test_queries(self):
from django.conf import settings
from django.db import connection, reset_queries
try:
settings.DEBUG = True
# [... your ORM code ...]
self.assertEquals(len(connection.queries), num_of_expected_queries)
finally:
settings.DEBUG = False
reset_queries()
You may also consider resetting queries on setUp/tearDown to ensure queries are reset for each test instead of doing it on finally clause, but this way is more explicit (although more verbose), or you can use reset_queries in the try clause as many times as you need to evaluate queries counting from 0.
Here is the working prototype of context manager withAssertNumQueriesLessThan
import json
from contextlib import contextmanager
from django.test.utils import CaptureQueriesContext
from django.db import connections
#contextmanager
def withAssertNumQueriesLessThan(self, value, using='default', verbose=False):
with CaptureQueriesContext(connections[using]) as context:
yield # your test will be run here
if verbose:
msg = "\r\n%s" % json.dumps(context.captured_queries, indent=4)
else:
msg = None
self.assertLess(len(context.captured_queries), value, msg=msg)
It can be simply used in your unit tests for example for checking the number of queries per Django REST API call
with self.withAssertNumQueriesLessThan(10):
response = self.client.get('contacts/')
self.assertEqual(response.status_code, 200)
Also you can provide exact DB using and verbose if you want to pretty-print list of actual queries to stdout
If you have DEBUG set to True in your settings.py (presumably so in your test environment) then you can count queries executed in your test as follows:
from django.db import connection
class DoSomethingTests(django.test.TestCase):
def test_something_or_other(self):
num_queries_old = len(connection.queries)
do_something_in_the_database()
num_queries_new = len(connection.queries)
self.assertEqual(n, num_queries_new - num_queries_old)
If you want to use a decorator for that there is a nice gist:
import functools
import sys
import re
from django.conf import settings
from django.db import connection
def shrink_select(sql):
return re.sub("^SELECT(.+)FROM", "SELECT .. FROM", sql)
def shrink_update(sql):
return re.sub("SET(.+)WHERE", "SET .. WHERE", sql)
def shrink_insert(sql):
return re.sub("\((.+)\)", "(..)", sql)
def shrink_sql(sql):
return shrink_update(shrink_insert(shrink_select(sql)))
def _err_msg(num, expected_num, verbose, func=None):
func_name = "%s:" % func.__name__ if func else ""
msg = "%s Expected number of queries is %d, actual number is %d.\n" % (func_name, expected_num, num,)
if verbose > 0:
queries = [query['sql'] for query in connection.queries[-num:]]
if verbose == 1:
queries = [shrink_sql(sql) for sql in queries]
msg += "== Queries == \n" +"\n".join(queries)
return msg
def assertNumQueries(expected_num, verbose=1):
class DecoratorOrContextManager(object):
def __call__(self, func): # decorator
#functools.wraps(func)
def inner(*args, **kwargs):
handled = False
try:
self.__enter__()
return func(*args, **kwargs)
except:
self.__exit__(*sys.exc_info())
handled = True
raise
finally:
if not handled:
self.__exit__(None, None, None)
return inner
def __enter__(self):
self.old_debug = settings.DEBUG
self.old_query_count = len(connection.queries)
settings.DEBUG = True
def __exit__(self, type, value, traceback):
if not type:
num = len(connection.queries) - self.old_query_count
assert expected_num == num, _err_msg(num, expected_num, verbose)
settings.DEBUG = self.old_debug
return DecoratorOrContextManager()