18. Discovery
project/frob/tests/__init__.py:
from project.frob.tests.test_forms import *
from project.frob.tests.test_models import *
from project.frob.tests.test_views import *
from project.frob.tests.test_tarts import *
from project.frob.tests.test_urls import *
from project.frob.tests.test_zoos import *
27. Discovery
• No more accidental shadowing
• No more…
SocialCampaignAccountComputationTestCase
SocialCampaignsItemUserUtilsTestCase
SelfServiceCandidateRequestConfirmationTests
28. Discovery
• No more accidental shadowing
• No more…
SocialCampaignAccountComputationTestCase
SocialCampaignsItemUserUtilsTestCase
SelfServiceCandidateRequestConfirmationTests
• No more forgetting to import
38. Functions as Tests
# Look, ma: no class!
def test_height_initted():
"""We should be able to get a height
even on no-tty Terminals."""
t = Terminal(stream=StringIO())
eq_(type(t.height), int)
39. Functions as Tests
# Look, ma: no class!
def test_height_initted():
"""We should be able to get a height
even on no-tty Terminals."""
t = Terminal(stream=StringIO())
eq_(type(t.height), int)
40. Functions as Tests
# Look, ma: no class!
def test_height_initted():
"""We should be able to get a height
even on no-tty Terminals."""
t = Terminal(stream=StringIO())
eq_(type(t.height), int)
@with_setup(setup_func, teardown_func)
def test_woobie():
...
42. Test Generators
data = [('thing1', 'result1'),
('Steven', 'Steve'),
...]
def test_munge():
"""
Test munging independently on
several pieces of data.
"""
for t, r in data:
yield check_datum, t, r
def check_datum(t, r):
assert munge(t) == r
45. Test Attributes
from nose.plugins.attrib import attr
@attr('selenium')
def test_click_around():
...
./manage.py test -a selenium
./manage.py test -a '!selenium'
46. Test Attributes
from nose.plugins.attrib import attr
@attr('selenium')
def test_click_around():
...
./manage.py test -a selenium
./manage.py test -a '!selenium'
@attr(speed='slow')
class MyTestCase:
def test_long_integration(self):
...
def test_end_to_end_something(self):
...
47. Test Attributes
from nose.plugins.attrib import attr
@attr('selenium')
def test_click_around():
...
./manage.py test -a selenium
./manage.py test -a '!selenium'
@attr(speed='slow')
class MyTestCase:
def test_long_integration(self):
...
def test_end_to_end_something(self):
...
./manage.py test speed=slow
48. Test Attributes
from nose.plugins.attrib import attr
@attr('selenium')
def test_click_around():
...
./manage.py test -a selenium
./manage.py test -a '!selenium'
@attr(speed='slow')
class MyTestCase:
def test_long_integration(self):
...
def test_end_to_end_something(self):
...
./manage.py test speed=slow
./manage.py test -a selenium,speed=slow # and
49. Test Attributes
from nose.plugins.attrib import attr
@attr('selenium')
def test_click_around():
...
./manage.py test -a selenium
./manage.py test -a '!selenium'
@attr(speed='slow')
class MyTestCase:
def test_long_integration(self):
...
def test_end_to_end_something(self):
...
./manage.py test speed=slow
./manage.py test -a selenium,speed=slow # and
./manage.py test -a selenium -a speed=slow # or
76. % time ./manage.py test
Creating test database for alias 'default'...
...
blah blah blah
./manage.py test 50.75s user 6.01s system 30% cpu 302.594 total
92. FastFixtureTestCase
class FastFixtureTestCase(TestCase):
"""Loads fixtures just once per class."""
def setup_class(self):
load_fixtures()
commit()
def run_test(self):
run_the_test()
rollback()
def teardown_class(self):
remove_fixtures()
commit()
93. FastFixtureTestCase
class FastFixtureTestCase(TestCase):
"""Loads fixtures just once per class."""
def setup_class(self):
load_fixtures()
commit()
def run_test(self):
run_the_test()
rollback()
def teardown_class(self):
remove_fixtures()
commit()
94. FastFixtureTestCase
class FastFixtureTestCase(TestCase):
"""Loads fixtures just once per class."""
def setup_class(self):
load_fixtures()
commit()
def run_test(self):
run_the_test()
rollback()
def teardown_class(self):
remove_fixtures()
commit()
95. FastFixtureTestCase
class FastFixtureTestCase(TestCase):
"""Loads fixtures just once per class."""
def setup_class(self):
load_fixtures()
commit()
def run_test(self):
run_the_test()
rollback()
def teardown_class(self):
remove_fixtures()
commit()
96. FastFixtureTestCase
class FastFixtureTestCase(TestCase):
"""Loads fixtures just once per class."""
"""A copy of Django 1.3.0's stock loaddata.py, adapted so that, instead of
loading any data, it returns the tables referenced by a set of fixtures so we
def setup_class(self): can truncate them (and no others) quickly after we're finished with them."""
import os
import gzip
load_fixtures()
import zipfile
from django.conf import settings
from django.core import serializers
commit()
from django.db import router, DEFAULT_DB_ALIAS
from django.db.models import get_apps
from django.utils.itercompat import product
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
def run_test(self): def tables_used_by_fixtures(fixture_labels, using=DEFAULT_DB_ALIAS):
"""Act like Django's stock loaddata command, but, instead of loading data,
return an iterable of the names of the tables into which data would be
run_the_test() loaded."""
# Keep a count of the installed objects and fixtures
fixture_count = 0
loaded_object_count = 0
rollback()
fixture_object_count = 0
tables = set()
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if settings.DEBUG:
assert len(self.namelist()) == 1, "Zip-compressed fixtures must
contain only one file."
def teardown_class(self): def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
compression_types = {
remove_fixtures()
None: file,
'gz': gzip.GzipFile,
'zip': SingleZipReader
}
commit()
if has_bz2:
compression_types['bz2'] = bz2.BZ2File
app_module_paths = []
for app in get_apps():
if hasattr(app, '__path__'):
# It's a 'models/' subpackage
for path in app.__path__:
app_module_paths.append(path)
else:
# It's a models.py module
app_module_paths.append(app.__file__)
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in
app_module_paths]
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) > 1 and parts[-1] in compression_types:
105. Fixture Bundling
class ThreadsTemplateTests(FastFixtureTestCase):
fixtures = ['users.json', 'posts.json',
'forums_permissions.json']
...
class ForumsTemplateTests(FastFixtureTestCase):
fixtures = ['users.json', 'posts.json',
'forums_permissions.json']
...
class NewThreadTests(FastFixtureTestCase):
fixtures = ['users.json', 'posts.json',
'forums_permissions.json']
...
106. Fixture Bundling
fixtures
TestCase1→ A B C
TestCase2→ A B C D
TestCase3→ A B C
TestCase4→ B D
TestCase5→ A B C D
TestCase6→ A B C
107. Fixture Bundling
fixtures
TestCase1→ A B C
TestCase3→ A B C
TestCase6→ A B C
TestCase2→ A B C D
TestCase5→ A B C D
TestCase4→ B D
108. Fixture Bundling
fixtures
☚ load ABC
TestCase1→ A B C
TestCase3→ A B C
TestCase6→ A B C
TestCase2→ A B C D
TestCase5→ A B C D
TestCase4→ B D
109. Fixture Bundling
fixtures
☚ load ABC
TestCase1→ A B C
TestCase3→ A B C
TestCase6→ A B C
☚ load ABCD
TestCase2→ A B C D
TestCase5→ A B C D
TestCase4→ B D
110. Fixture Bundling
fixtures
☚ load ABC
TestCase1→ A B C
TestCase3→ A B C
TestCase6→ A B C
☚ load ABCD
TestCase2→ A B C D
TestCase5→ A B C D
☚ load BD
TestCase4→ B D
145. ?
ErikRose
erik@votizen.com
django-nose
sprint
tomorrow,
9am-2pm
Image Credits:
• Anatomical diagram by Patrick J. Lynch,
medical illustrator, edited by user DiebBuche:
http://commons.wikimedia.org/wiki/File:Mouth_anatomy-de.svg
• “Testing!” comic adapted from http://xkcd.com/303/
• Memory hierarchy diagram: http://i.imgur.com/X1Hi1.gif
Editor's Notes
Welcome! It’s great to see so many of you here!\n\nI’m going to take you on a tour of Django’s nasal passage, a journey from its stock test equipment to a fast, boilerplate-free wonderland that you can enjoy right now.\n
First, a little context. I work at Votizen, a startup that aims to reduce the influence of money in politics by substituting social media for expensive channels like TV and phone calls. Through this, we’re trying to give third parties, grass-roots movements a chance.\n\nNow, when you try to model the political process and develop stuff really fast, things get hairy quickly.\n\nSo: fairly heavyweight testing. 1700 tests, and we’ve found that the stock Django test framework is easy to get started with, but you start to hit your head on the ceiling as your project grows.\n
First, a little context. I work at Votizen, a startup that aims to reduce the influence of money in politics by substituting social media for expensive channels like TV and phone calls. Through this, we’re trying to give third parties, grass-roots movements a chance.\n\nNow, when you try to model the political process and develop stuff really fast, things get hairy quickly.\n\nSo: fairly intense testing. 1700 tests, and we’ve found that the stock Django test framework is easy to get started with, but you start to hit your head on the ceiling as your project grows.\n
Almost immediately get too many tests for one tests.py file. So you turn it into a package. Then you have to import each thing into __init__. This is annoying and error-prone.\n\nSlow: full flush for each TTC. Full fixture reload for each test. Creates fresh DBs at every invocation.\n\nOverbroad: Everything in INSTALLED_APPS gets tested: slow and pointless. At best, you’re testing whether any third-party reusable apps are configured right. In the typical case, you’re just wasting your time running tests on third-party stuff that's already known to work.\n\nRough UI. You don’t get any tracebacks till everything’s done. You don’t know when it’ll be done. There’s a lot of trash in the output.\n\nExtensibility is unscalable. If you make one subclass to do XML output, you can’t just mix in somebody else’s that limits testing to just your apps.\n\n“Nose is going to help us solve all of that.”\n\nTODO: more bubbles showing visuals: FS layout for tests/ folder, test output for “Rough”\n
Almost immediately get too many tests for one tests.py file. So you turn it into a package. Then you have to import each thing into __init__. This is annoying and error-prone.\n\nSlow: full flush for each TTC. Full fixture reload for each test. Creates fresh DBs at every invocation.\n\nOverbroad: Everything in INSTALLED_APPS gets tested: slow and pointless. At best, you’re testing whether any third-party reusable apps are configured right. In the typical case, you’re just wasting your time running tests on third-party stuff that's already known to work.\n\nRough UI. You don’t get any tracebacks till everything’s done. You don’t know when it’ll be done. There’s a lot of trash in the output.\n\nExtensibility is unscalable. If you make one subclass to do XML output, you can’t just mix in somebody else’s that limits testing to just your apps.\n\n“Nose is going to help us solve all of that.”\n\nTODO: more bubbles showing visuals: FS layout for tests/ folder, test output for “Rough”\n
Almost immediately get too many tests for one tests.py file. So you turn it into a package. Then you have to import each thing into __init__. This is annoying and error-prone.\n\nSlow: full flush for each TTC. Full fixture reload for each test. Creates fresh DBs at every invocation.\n\nOverbroad: Everything in INSTALLED_APPS gets tested: slow and pointless. At best, you’re testing whether any third-party reusable apps are configured right. In the typical case, you’re just wasting your time running tests on third-party stuff that's already known to work.\n\nRough UI. You don’t get any tracebacks till everything’s done. You don’t know when it’ll be done. There’s a lot of trash in the output.\n\nExtensibility is unscalable. If you make one subclass to do XML output, you can’t just mix in somebody else’s that limits testing to just your apps.\n\n“Nose is going to help us solve all of that.”\n\nTODO: more bubbles showing visuals: FS layout for tests/ folder, test output for “Rough”\n
Almost immediately get too many tests for one tests.py file. So you turn it into a package. Then you have to import each thing into __init__. This is annoying and error-prone.\n\nSlow: full flush for each TTC. Full fixture reload for each test. Creates fresh DBs at every invocation.\n\nOverbroad: Everything in INSTALLED_APPS gets tested: slow and pointless. At best, you’re testing whether any third-party reusable apps are configured right. In the typical case, you’re just wasting your time running tests on third-party stuff that's already known to work.\n\nRough UI. You don’t get any tracebacks till everything’s done. You don’t know when it’ll be done. There’s a lot of trash in the output.\n\nExtensibility is unscalable. If you make one subclass to do XML output, you can’t just mix in somebody else’s that limits testing to just your apps.\n\n“Nose is going to help us solve all of that.”\n\nTODO: more bubbles showing visuals: FS layout for tests/ folder, test output for “Rough”\n
Almost immediately get too many tests for one tests.py file. So you turn it into a package. Then you have to import each thing into __init__. This is annoying and error-prone.\n\nSlow: full flush for each TTC. Full fixture reload for each test. Creates fresh DBs at every invocation.\n\nOverbroad: Everything in INSTALLED_APPS gets tested: slow and pointless. At best, you’re testing whether any third-party reusable apps are configured right. In the typical case, you’re just wasting your time running tests on third-party stuff that's already known to work.\n\nRough UI. You don’t get any tracebacks till everything’s done. You don’t know when it’ll be done. There’s a lot of trash in the output.\n\nExtensibility is unscalable. If you make one subclass to do XML output, you can’t just mix in somebody else’s that limits testing to just your apps.\n\n“Nose is going to help us solve all of that.”\n\nTODO: more bubbles showing visuals: FS layout for tests/ folder, test output for “Rough”\n
Almost immediately get too many tests for one tests.py file. So you turn it into a package. Then you have to import each thing into __init__. This is annoying and error-prone.\n\nSlow: full flush for each TTC. Full fixture reload for each test. Creates fresh DBs at every invocation.\n\nOverbroad: Everything in INSTALLED_APPS gets tested: slow and pointless. At best, you’re testing whether any third-party reusable apps are configured right. In the typical case, you’re just wasting your time running tests on third-party stuff that's already known to work.\n\nRough UI. You don’t get any tracebacks till everything’s done. You don’t know when it’ll be done. There’s a lot of trash in the output.\n\nExtensibility is unscalable. If you make one subclass to do XML output, you can’t just mix in somebody else’s that limits testing to just your apps.\n\n“Nose is going to help us solve all of that.”\n\nTODO: more bubbles showing visuals: FS layout for tests/ folder, test output for “Rough”\n
Almost immediately get too many tests for one tests.py file. So you turn it into a package. Then you have to import each thing into __init__. This is annoying and error-prone.\n\nSlow: full flush for each TTC. Full fixture reload for each test. Creates fresh DBs at every invocation.\n\nOverbroad: Everything in INSTALLED_APPS gets tested: slow and pointless. At best, you’re testing whether any third-party reusable apps are configured right. In the typical case, you’re just wasting your time running tests on third-party stuff that's already known to work.\n\nRough UI. You don’t get any tracebacks till everything’s done. You don’t know when it’ll be done. There’s a lot of trash in the output.\n\nExtensibility is unscalable. If you make one subclass to do XML output, you can’t just mix in somebody else’s that limits testing to just your apps.\n\n“Nose is going to help us solve all of that.”\n\nTODO: more bubbles showing visuals: FS layout for tests/ folder, test output for “Rough”\n
Before we dive into its capabilities, here’s how you install nose for use with Django.\n\npip install django-nose—a shim. It implements a Django test runner that invokes nose. nose itself is a requirement of the django-nose package, so it gets installed automatically.\n\nsettings\n\n./manage.py test, nose runs instead\n\nNow, let’s see what you get.\n
Before we dive into its capabilities, here’s how you install nose for use with Django.\n\npip install django-nose—a shim. It implements a Django test runner that invokes nose. nose itself is a requirement of the django-nose package, so it gets installed automatically.\n\nsettings\n\n./manage.py test, nose runs instead\n\nNow, let’s see what you get.\n
Before we dive into its capabilities, here’s how you install nose for use with Django.\n\npip install django-nose—a shim. It implements a Django test runner that invokes nose. nose itself is a requirement of the django-nose package, so it gets installed automatically.\n\nsettings\n\n./manage.py test, nose runs instead\n\nNow, let’s see what you get.\n
Before we dive into its capabilities, here’s how you install nose for use with Django.\n\npip install django-nose—a shim. It implements a Django test runner that invokes nose. nose itself is a requirement of the django-nose package, so it gets installed automatically.\n\nsettings\n\n./manage.py test, nose runs instead\n\nNow, let’s see what you get.\n
Before we dive into its capabilities, here’s how you install nose for use with Django.\n\npip install django-nose—a shim. It implements a Django test runner that invokes nose. nose itself is a requirement of the django-nose package, so it gets installed automatically.\n\nsettings\n\n./manage.py test, nose runs instead\n\nNow, let’s see what you get.\n
\n
So where Django’s stock testrunner makes you import everything into tests/__init__ like this.\n\nNose lets it look like this.\n\nHow? Nose finds your tests by name….\n
So where Django’s stock testrunner makes you import everything into tests/__init__ like this.\n\nNose lets it look like this.\n\nHow? Nose finds your tests by name….\n
Basically, if nose finds a class or function matching this pattern inside a module matching this pattern, it considers that a test.\n\nThese are some test-like names. \n\nAnd, if you have something that doesn’t fit the pattern, you can use a decorator.\n\nDon’t like pattern? Pass -m. Or write short discovery plugin.\n\nOf course, subclasses of unittest.TestCase are always considered tests, so all your old Django tests continue to work.\n
Basically, if nose finds a class or function matching this pattern inside a module matching this pattern, it considers that a test.\n\nThese are some test-like names. \n\nAnd, if you have something that doesn’t fit the pattern, you can use a decorator.\n\nDon’t like pattern? Pass -m. Or write short discovery plugin.\n\nOf course, subclasses of unittest.TestCase are always considered tests, so all your old Django tests continue to work.\n
Basically, if nose finds a class or function matching this pattern inside a module matching this pattern, it considers that a test.\n\nThese are some test-like names. \n\nAnd, if you have something that doesn’t fit the pattern, you can use a decorator.\n\nDon’t like pattern? Pass -m. Or write short discovery plugin.\n\nOf course, subclasses of unittest.TestCase are always considered tests, so all your old Django tests continue to work.\n
Basically, if nose finds a class or function matching this pattern inside a module matching this pattern, it considers that a test.\n\nThese are some test-like names. \n\nAnd, if you have something that doesn’t fit the pattern, you can use a decorator.\n\nDon’t like pattern? Pass -m. Or write short discovery plugin.\n\nOf course, subclasses of unittest.TestCase are always considered tests, so all your old Django tests continue to work.\n
Basically, if nose finds a class or function matching this pattern inside a module matching this pattern, it considers that a test.\n\nThese are some test-like names. \n\nAnd, if you have something that doesn’t fit the pattern, you can use a decorator.\n\nDon’t like pattern? Pass -m. Or write short discovery plugin.\n\nOf course, subclasses of unittest.TestCase are always considered tests, so all your old Django tests continue to work.\n
Basically, if nose finds a class or function matching this pattern inside a module matching this pattern, it considers that a test.\n\nThese are some test-like names. \n\nAnd, if you have something that doesn’t fit the pattern, you can use a decorator.\n\nDon’t like pattern? Pass -m. Or write short discovery plugin.\n\nOf course, subclasses of unittest.TestCase are always considered tests, so all your old Django tests continue to work.\n
Basically, if nose finds a class or function matching this pattern inside a module matching this pattern, it considers that a test.\n\nThese are some test-like names. \n\nAnd, if you have something that doesn’t fit the pattern, you can use a decorator.\n\nDon’t like pattern? Pass -m. Or write short discovery plugin.\n\nOf course, subclasses of unittest.TestCase are always considered tests, so all your old Django tests continue to work.\n
Basically, if nose finds a class or function matching this pattern inside a module matching this pattern, it considers that a test.\n\nThese are some test-like names. \n\nAnd, if you have something that doesn’t fit the pattern, you can use a decorator.\n\nDon’t like pattern? Pass -m. Or write short discovery plugin.\n\nOf course, subclasses of unittest.TestCase are always considered tests, so all your old Django tests continue to work.\n
Basically, if nose finds a class or function matching this pattern inside a module matching this pattern, it considers that a test.\n\nThese are some test-like names. \n\nAnd, if you have something that doesn’t fit the pattern, you can use a decorator.\n\nDon’t like pattern? Pass -m. Or write short discovery plugin.\n\nOf course, subclasses of unittest.TestCase are always considered tests, so all your old Django tests continue to work.\n
Basically, if nose finds a class or function matching this pattern inside a module matching this pattern, it considers that a test.\n\nThese are some test-like names. \n\nAnd, if you have something that doesn’t fit the pattern, you can use a decorator.\n\nDon’t like pattern? Pass -m. Or write short discovery plugin.\n\nOf course, subclasses of unittest.TestCase are always considered tests, so all your old Django tests continue to work.\n
In addition to making things shorter, this eliminates several classes of errors.\n\n• accidental shadowing. If you happen to have duplicate names between modules, things get shadowed, and tests you think are getting run don’t get run.\n• corollary: absurdly long names to maintain uniqueness\n• We’ve also just flat-out forgot to import a module (or a name, if not using *).\n\nBonus: Since you don’t have to import up into __init__, you’re free to import down from it, using __init__ as a utility module without fear of import cycles. I like to put my test base classes there.\n
In addition to making things shorter, this eliminates several classes of errors.\n\n• accidental shadowing. If you happen to have duplicate names between modules, things get shadowed, and tests you think are getting run don’t get run.\n• corollary: absurdly long names to maintain uniqueness\n• We’ve also just flat-out forgot to import a module (or a name, if not using *).\n\nBonus: Since you don’t have to import up into __init__, you’re free to import down from it, using __init__ as a utility module without fear of import cycles. I like to put my test base classes there.\n
In addition to making things shorter, this eliminates several classes of errors.\n\n• accidental shadowing. If you happen to have duplicate names between modules, things get shadowed, and tests you think are getting run don’t get run.\n• corollary: absurdly long names to maintain uniqueness\n• We’ve also just flat-out forgot to import a module (or a name, if not using *).\n\nBonus: Since you don’t have to import up into __init__, you’re free to import down from it, using __init__ as a utility module without fear of import cycles. I like to put my test base classes there.\n
In addition to making things shorter, this eliminates several classes of errors.\n\n• accidental shadowing. If you happen to have duplicate names between modules, things get shadowed, and tests you think are getting run don’t get run.\n• corollary: absurdly long names to maintain uniqueness\n• We’ve also just flat-out forgot to import a module (or a name, if not using *).\n\nBonus: Since you don’t have to import up into __init__, you’re free to import down from it, using __init__ as a utility module without fear of import cycles. I like to put my test base classes there.\n
In addition to making things shorter, this eliminates several classes of errors.\n\n• accidental shadowing. If you happen to have duplicate names between modules, things get shadowed, and tests you think are getting run don’t get run.\n• corollary: absurdly long names to maintain uniqueness\n• We’ve also just flat-out forgot to import a module (or a name, if not using *).\n\nBonus: Since you don’t have to import up into __init__, you’re free to import down from it, using __init__ as a utility module without fear of import cycles. I like to put my test base classes there.\n
In addition to making things shorter, this eliminates several classes of errors.\n\n• accidental shadowing. If you happen to have duplicate names between modules, things get shadowed, and tests you think are getting run don’t get run.\n• corollary: absurdly long names to maintain uniqueness\n• We’ve also just flat-out forgot to import a module (or a name, if not using *).\n\nBonus: Since you don’t have to import up into __init__, you’re free to import down from it, using __init__ as a utility module without fear of import cycles. I like to put my test base classes there.\n
As I hinted earlier, django_nose limits its discovery to your own project dir.\n\nThat means it tests only your code, saving you a bunch of time every time you test.\n\nIncidentally: means you can have code in your Django project that doesn’t live in an app and yet is still tested.\n\nSo what do you end up with?\n
You end up with freedom. But there’s no point throwing the baby out with the bathwater.\n\nI still like to nestle my tests in a “tests” package in each app. The __init__ is either blank or full of base classes and utility functions. Tests go in “test_whatever”. That naming convention is nice for making tests contrast with other artifacts in the tests folder, like static sample data.\n\nAnother perfectly reasonable convention is to put the high-entropy word first: “model_tests”, “view_tests”, etc. That’s better for type-to-select.\n\nEither way, it’s faster, less error-prone, and fewer lines of code.\n\n-----\nAt Votizen, we toyed around with splitting tests into a deeper hierarchy, putting the tests for each model in their own file e.g., but we ended up going back to this: longer files but less digging around in the filesystem.\n
when you run tests: spelling different\n\nDjango way\n\nnose way: longer, more general\n\nconsolation: can run a module\n\nWraps up the boilerplate-killing portion of our program\n
when you run tests: spelling different\n\nDjango way\n\nnose way: longer, more general\n\nconsolation: can run a module\n\nWraps up the boilerplate-killing portion of our program\n
Here are some ways nose lets you go beyond unittest in capability.\n
If you don’t need any setup or teardown, just make a function.\n\neq_. Is assertEqual. Btw, why not just assert? -O\n\nBut there are allowances for setup and teardown.\n\nSo, if you find yourself trying to figure out which class to shoehorn a test into, just don’t.\n\nPackage-, module-level setup and teardown.\n
If you don’t need any setup or teardown, just make a function.\n\neq_. Is assertEqual. Btw, why not just assert? -O\n\nBut there are allowances for setup and teardown.\n\nSo, if you find yourself trying to figure out which class to shoehorn a test into, just don’t.\n\nPackage-, module-level setup and teardown.\n
If you don’t need any setup or teardown, just make a function.\n\neq_. Is assertEqual. Btw, why not just assert? -O\n\nBut there are allowances for setup and teardown.\n\nSo, if you find yourself trying to figure out which class to shoehorn a test into, just don’t.\n\nPackage-, module-level setup and teardown.\n
Data-driven tests. Test something intricately mathematical, not just a couple of branches that can be easily enumerated in well-partitioned tests. Fuzzy matchers.\n\nunittest in a loop → first one fails, eats the rest\n\nnose → can keep going\n\nyield callables & args\n\nCan’t use it in TestCase subclasses, but works everywhere else.\n\n-----\nSometimes it feels nice to generate several similar assertions, like in a for loop.\n
Sometimes it’s nice to be able to split up your tests into sets.\n\ne.g. selenium\n\nto run selenium\nto run other\n\nvalued\nselect according to values\nbool and & or\n
Sometimes it’s nice to be able to split up your tests into sets.\n\ne.g. selenium\n\nto run selenium\nto run other\n\nvalued\nselect according to values\nbool and & or\n
Sometimes it’s nice to be able to split up your tests into sets.\n\ne.g. selenium\n\nto run selenium\nto run other\n\nvalued\nselect according to values\nbool and & or\n
Sometimes it’s nice to be able to split up your tests into sets.\n\ne.g. selenium\n\nto run selenium\nto run other\n\nvalued\nselect according to values\nbool and & or\n
Sometimes it’s nice to be able to split up your tests into sets.\n\ne.g. selenium\n\nto run selenium\nto run other\n\nvalued\nselect according to values\nbool and & or\n
Sometimes it’s nice to be able to split up your tests into sets.\n\ne.g. selenium\n\nto run selenium\nto run other\n\nvalued\nselect according to values\nbool and & or\n
Jenkins, CruiseControl\n\nnosetests.xml\n\ncustomize file name\n\nresult like this\n
Jenkins, CruiseControl\n\nnosetests.xml\n\ncustomize file name\n\nresult like this\n
Jenkins, CruiseControl\n\nnosetests.xml\n\ncustomize file name\n\nresult like this\n
-s\n\neats prints\neats pdb\n\nalias\n
-s\n\neats prints\neats pdb\n\nalias\n
TODO to represent TDD tests that are committed but whose functionality isn’t written yet.\n
TODO to represent TDD tests that are committed but whose functionality isn’t written yet.\n
TODO to represent TDD tests that are committed but whose functionality isn’t written yet.\n
TODO to represent TDD tests that are committed but whose functionality isn’t written yet.\n
Earlier when I said django-nose was just a shim, I lied. It sure started out that way, but now it has all kinds of crazy performance-enhancing features.\n\nAlmost all implemented as nose plugins—no evulz.\n\nTo demonstrate these speed features, I’m going to use the example of support.mozilla.org (affectionately nicknamed “SUMO”)…\n
…which is short for “support.mozilla.org”. With about 1200 tests…\n
and 1B hits/mo, moderate-sized site at Mozilla\n\nOver time, the tests had grown to take 20 minutes on our build server—5 minutes on my local box. Now, 5 minutes might not sound like long, so it’s worth saying a few words about what faster tests buy you.\n\nFirst, and most obviously, you save the swordfighting time while the tests run.\n
and 1B hits/mo, moderate-sized site at Mozilla\n\nOver time, the tests had grown to take 20 minutes on our build server—5 minutes on my local box. Now, 5 minutes might not sound like long, so it’s worth saying a few words about what faster tests buy you.\n\nFirst, and most obviously, you save the swordfighting time while the tests run.\n
and 1B hits/mo, moderate-sized site at Mozilla\n\nOver time, the tests had grown to take 20 minutes on our build server—5 minutes on my local box. Now, 5 minutes might not sound like long, so it’s worth saying a few words about what faster tests buy you.\n\nFirst, and most obviously, you save the swordfighting time while the tests run.\n
(2) more importantly, time lost context switching, reestablishing flow\n(3) not running the tests at all (or not running all of them).\n(4) …which leads to breaking the build and slowing down your teammates\n\nBut, by using django-nose’s speed optimizations, we can solve all those problems and cut the total runtime from 5 minutes to just 1.\n\nSo where does django-nose go looking for that speed? There’s generally only one answer to that question, and that’s IO.\n\n-------------\n\n\n• CI box will run\n• 20 minutes later\n• Too long for feedback. you've long since kicked that feature out of your head and are on to the next.\n
(2) more importantly, time lost context switching, reestablishing flow\n(3) not running the tests at all (or not running all of them).\n(4) …which leads to breaking the build and slowing down your teammates\n\nBut, by using django-nose’s speed optimizations, we can solve all those problems and cut the total runtime from 5 minutes to just 1.\n\nSo where does django-nose go looking for that speed? There’s generally only one answer to that question, and that’s IO.\n\n-------------\n\n\n• CI box will run\n• 20 minutes later\n• Too long for feedback. you've long since kicked that feature out of your head and are on to the next.\n
(2) more importantly, time lost context switching, reestablishing flow\n(3) not running the tests at all (or not running all of them).\n(4) …which leads to breaking the build and slowing down your teammates\n\nBut, by using django-nose’s speed optimizations, we can solve all those problems and cut the total runtime from 5 minutes to just 1.\n\nSo where does django-nose go looking for that speed? There’s generally only one answer to that question, and that’s IO.\n\n-------------\n\n\n• CI box will run\n• 20 minutes later\n• Too long for feedback. you've long since kicked that feature out of your head and are on to the next.\n
(2) more importantly, time lost context switching, reestablishing flow\n(3) not running the tests at all (or not running all of them).\n(4) …which leads to breaking the build and slowing down your teammates\n\nBut, by using django-nose’s speed optimizations, we can solve all those problems and cut the total runtime from 5 minutes to just 1.\n\nSo where does django-nose go looking for that speed? There’s generally only one answer to that question, and that’s IO.\n\n-------------\n\n\n• CI box will run\n• 20 minutes later\n• Too long for feedback. you've long since kicked that feature out of your head and are on to the next.\n
(2) more importantly, time lost context switching, reestablishing flow\n(3) not running the tests at all (or not running all of them).\n(4) …which leads to breaking the build and slowing down your teammates\n\nBut, by using django-nose’s speed optimizations, we can solve all those problems and cut the total runtime from 5 minutes to just 1.\n\nSo where does django-nose go looking for that speed? There’s generally only one answer to that question, and that’s IO.\n\n-------------\n\n\n• CI box will run\n• 20 minutes later\n• Too long for feedback. you've long since kicked that feature out of your head and are on to the next.\n
(2) more importantly, time lost context switching, reestablishing flow\n(3) not running the tests at all (or not running all of them).\n(4) …which leads to breaking the build and slowing down your teammates\n\nBut, by using django-nose’s speed optimizations, we can solve all those problems and cut the total runtime from 5 minutes to just 1.\n\nSo where does django-nose go looking for that speed? There’s generally only one answer to that question, and that’s IO.\n\n-------------\n\n\n• CI box will run\n• 20 minutes later\n• Too long for feedback. you've long since kicked that feature out of your head and are on to the next.\n
this little chart represents 1ns as a single pixel.\n\ndwarfs\n\n(Incidentally, an SSD has on the order of a 100ns access time, but it still bottlenecks decidedly on writes: FS overhead, write amplification, etc.)\n\nA little digging with tools like the UNIX time command confirms almost all of that time to be in IO—specifically DB IO.\n\nThat’s why django nose provides 4 optimizations for reducing it.\n\n
this little chart represents 1ns as a single pixel.\n\ndwarfs\n\n(Incidentally, an SSD has on the order of a 100ns access time, but it still bottlenecks decidedly on writes: FS overhead, write amplification, etc.)\n\nA little digging with tools like the UNIX time command confirms almost all of that time to be in IO—specifically DB IO.\n\nThat’s why django nose provides 4 optimizations for reducing it.\n\n
this little chart represents 1ns as a single pixel.\n\ndwarfs\n\n(Incidentally, an SSD has on the order of a 100ns access time, but it still bottlenecks decidedly on writes: FS overhead, write amplification, etc.)\n\nA little digging with tools like the UNIX time command confirms almost all of that time to be in IO—specifically DB IO.\n\nThat’s why django nose provides 4 optimizations for reducing it.\n\n
this little chart represents 1ns as a single pixel.\n\ndwarfs\n\n(Incidentally, an SSD has on the order of a 100ns access time, but it still bottlenecks decidedly on writes: FS overhead, write amplification, etc.)\n\nA little digging with tools like the UNIX time command confirms almost all of that time to be in IO—specifically DB IO.\n\nThat’s why django nose provides 4 optimizations for reducing it.\n\n
We can confirm our rule of thumb with some profiling tools.\n\nThe Python profiler doesn&#x2019;t tell you anything about I/O time; everything is in terms of CPU.\n\nHowever, the handy UNIX `time` command measures CPU and clock time of a command.\n\nWe can then look at the CPU percentage&#x2014;and see that our tests are spending by far most of their time in I/O (or at least delegating to other processes).\n\nA little more digging reveals almost all of that time to be in DB IO. So, that&#x2019;s why django nose provides 4 optimizations for reducing it.\n\n-------------\n\n<possible expansion point. see Practical Large-Scale Tests.>\n
We can confirm our rule of thumb with some profiling tools.\n\nThe Python profiler doesn&#x2019;t tell you anything about I/O time; everything is in terms of CPU.\n\nHowever, the handy UNIX `time` command measures CPU and clock time of a command.\n\nWe can then look at the CPU percentage&#x2014;and see that our tests are spending by far most of their time in I/O (or at least delegating to other processes).\n\nA little more digging reveals almost all of that time to be in DB IO. So, that&#x2019;s why django nose provides 4 optimizations for reducing it.\n\n-------------\n\n<possible expansion point. see Practical Large-Scale Tests.>\n
We can confirm our rule of thumb with some profiling tools.\n\nThe Python profiler doesn&#x2019;t tell you anything about I/O time; everything is in terms of CPU.\n\nHowever, the handy UNIX `time` command measures CPU and clock time of a command.\n\nWe can then look at the CPU percentage&#x2014;and see that our tests are spending by far most of their time in I/O (or at least delegating to other processes).\n\nA little more digging reveals almost all of that time to be in DB IO. So, that&#x2019;s why django nose provides 4 optimizations for reducing it.\n\n-------------\n\n<possible expansion point. see Practical Large-Scale Tests.>\n
We can confirm our rule of thumb with some profiling tools.\n\nThe Python profiler doesn&#x2019;t tell you anything about I/O time; everything is in terms of CPU.\n\nHowever, the handy UNIX `time` command measures CPU and clock time of a command.\n\nWe can then look at the CPU percentage&#x2014;and see that our tests are spending by far most of their time in I/O (or at least delegating to other processes).\n\nA little more digging reveals almost all of that time to be in DB IO. So, that&#x2019;s why django nose provides 4 optimizations for reducing it.\n\n-------------\n\n<possible expansion point. see Practical Large-Scale Tests.>\n
The first of these is FastFixtureTestCase.\n
Test fixture data goes into JSON files like this one. This is an actual fixture from forum app within SUMO. This one&#x2019;s on the small side: 39 objects. Trouble is: SQL is 39 statements. Can&#x2019;t use bulk inserts because of possible post-save hooks, so need SQL statement for each.\n
One of SUMO&#x2019;s test classes used this fixture along with two other similarly-sized ones, and it took 4 minutes to run. Seems like a long time to load a few dozen rows, no? What&#x2019;s going on?\n
The trouble became clear when I turned on logging in MySQL: it&#x2019;s just a matter of logging in as the mysql root user and typing SET GLOBAL general_log = 'ON', and then tailing the log file it lays down. What became quickly evident&#x2026;\n
Fixtures reloaded before each test!\n\nEach test&#x2026;\n&#x2022; begins a transaction\n&#x2022; loads fixtures\n&#x2022; runs\n&#x2022; rolls back the transaction.\n\nVery tidy, but very inefficient\nload, rollback,\nload, rollback,\nload, rollback,\nload, rollback.\n\nOn the SUMO tests, this was 37,583 queries. It seemed to me we could do a lot better.\n
Fixtures reloaded before each test!\n\nEach test&#x2026;\n&#x2022; begins a transaction\n&#x2022; loads fixtures\n&#x2022; runs\n&#x2022; rolls back the transaction.\n\nVery tidy, but very inefficient\nload, rollback,\nload, rollback,\nload, rollback,\nload, rollback.\n\nOn the SUMO tests, this was 37,583 queries. It seemed to me we could do a lot better.\n
Fixtures reloaded before each test!\n\nEach test&#x2026;\n&#x2022; begins a transaction\n&#x2022; loads fixtures\n&#x2022; runs\n&#x2022; rolls back the transaction.\n\nVery tidy, but very inefficient\nload, rollback,\nload, rollback,\nload, rollback,\nload, rollback.\n\nOn the SUMO tests, this was 37,583 queries. It seemed to me we could do a lot better.\n
Fixtures reloaded before each test!\n\nEach test&#x2026;\n&#x2022; begins a transaction\n&#x2022; loads fixtures\n&#x2022; runs\n&#x2022; rolls back the transaction.\n\nVery tidy, but very inefficient\nload, rollback,\nload, rollback,\nload, rollback,\nload, rollback.\n\nOn the SUMO tests, this was 37,583 queries. It seemed to me we could do a lot better.\n
Fixtures reloaded before each test!\n\nEach test&#x2026;\n&#x2022; begins a transaction\n&#x2022; loads fixtures\n&#x2022; runs\n&#x2022; rolls back the transaction.\n\nVery tidy, but very inefficient\nload, rollback,\nload, rollback,\nload, rollback,\nload, rollback.\n\nOn the SUMO tests, this was 37,583 queries. It seemed to me we could do a lot better.\n
Fixtures reloaded before each test!\n\nEach test&#x2026;\n&#x2022; begins a transaction\n&#x2022; loads fixtures\n&#x2022; runs\n&#x2022; rolls back the transaction.\n\nVery tidy, but very inefficient\nload, rollback,\nload, rollback,\nload, rollback,\nload, rollback.\n\nOn the SUMO tests, this was 37,583 queries. It seemed to me we could do a lot better.\n
Fixtures reloaded before each test!\n\nEach test&#x2026;\n&#x2022; begins a transaction\n&#x2022; loads fixtures\n&#x2022; runs\n&#x2022; rolls back the transaction.\n\nVery tidy, but very inefficient\nload, rollback,\nload, rollback,\nload, rollback,\nload, rollback.\n\nOn the SUMO tests, this was 37,583 queries. It seemed to me we could do a lot better.\n
Fixtures reloaded before each test!\n\nEach test&#x2026;\n&#x2022; begins a transaction\n&#x2022; loads fixtures\n&#x2022; runs\n&#x2022; rolls back the transaction.\n\nVery tidy, but very inefficient\nload, rollback,\nload, rollback,\nload, rollback,\nload, rollback.\n\nOn the SUMO tests, this was 37,583 queries. It seemed to me we could do a lot better.\n
Fixtures reloaded before each test!\n\nEach test&#x2026;\n&#x2022; begins a transaction\n&#x2022; loads fixtures\n&#x2022; runs\n&#x2022; rolls back the transaction.\n\nVery tidy, but very inefficient\nload, rollback,\nload, rollback,\nload, rollback,\nload, rollback.\n\nOn the SUMO tests, this was 37,583 queries. It seemed to me we could do a lot better.\n
Here&#x2019;s a conceptual mockup\n\n1st def\n2nd def\n3rd def\n\nHow do we do that last bit? Well, we run a modified version of Django&#x2019;s stock fixture-loading routine&#x2014;one that keeps track of what was loaded so we can undo it.\n\nHere&#x2019;s a conceptual mockup of an alternate base TestClass, harnessing that same rollback mechanism to do a class worth of fixture setup once, and then reusing that each test in a class. Before the first test of the class, we set up the fixtures and&#x2014;here&#x2019;s the difference&#x2014;we commit. Then, for each test, we run it, and then we do a rollback afterward. Where in the old world, rolling back took us to a pristine database, it now takes us to an already-set-up fixture set. Finally, after all the tests in the class have been run, we explicitly remove the fixtures&#x2014;remember, they&#x2019;ve been committed, so we can&#x2019;t just roll back&#x2014;and commit. How do we do that last bit? Well, we run a modified version of Django&#x2019;s stock fixture-loading routine&#x2014;one that keeps track of what was loaded so we can undo it.\n
Here&#x2019;s a conceptual mockup\n\n1st def\n2nd def\n3rd def\n\nHow do we do that last bit? Well, we run a modified version of Django&#x2019;s stock fixture-loading routine&#x2014;one that keeps track of what was loaded so we can undo it.\n\nHere&#x2019;s a conceptual mockup of an alternate base TestClass, harnessing that same rollback mechanism to do a class worth of fixture setup once, and then reusing that each test in a class. Before the first test of the class, we set up the fixtures and&#x2014;here&#x2019;s the difference&#x2014;we commit. Then, for each test, we run it, and then we do a rollback afterward. Where in the old world, rolling back took us to a pristine database, it now takes us to an already-set-up fixture set. Finally, after all the tests in the class have been run, we explicitly remove the fixtures&#x2014;remember, they&#x2019;ve been committed, so we can&#x2019;t just roll back&#x2014;and commit. How do we do that last bit? Well, we run a modified version of Django&#x2019;s stock fixture-loading routine&#x2014;one that keeps track of what was loaded so we can undo it.\n
Here&#x2019;s a conceptual mockup\n\n1st def\n2nd def\n3rd def\n\nHow do we do that last bit? Well, we run a modified version of Django&#x2019;s stock fixture-loading routine&#x2014;one that keeps track of what was loaded so we can undo it.\n\nHere&#x2019;s a conceptual mockup of an alternate base TestClass, harnessing that same rollback mechanism to do a class worth of fixture setup once, and then reusing that each test in a class. Before the first test of the class, we set up the fixtures and&#x2014;here&#x2019;s the difference&#x2014;we commit. Then, for each test, we run it, and then we do a rollback afterward. Where in the old world, rolling back took us to a pristine database, it now takes us to an already-set-up fixture set. Finally, after all the tests in the class have been run, we explicitly remove the fixtures&#x2014;remember, they&#x2019;ve been committed, so we can&#x2019;t just roll back&#x2014;and commit. How do we do that last bit? Well, we run a modified version of Django&#x2019;s stock fixture-loading routine&#x2014;one that keeps track of what was loaded so we can undo it.\n
Here&#x2019;s a conceptual mockup\n\n1st def\n2nd def\n3rd def\n\nHow do we do that last bit? Well, we run a modified version of Django&#x2019;s stock fixture-loading routine&#x2014;one that keeps track of what was loaded so we can undo it.\n\nHere&#x2019;s a conceptual mockup of an alternate base TestClass, harnessing that same rollback mechanism to do a class worth of fixture setup once, and then reusing that each test in a class. Before the first test of the class, we set up the fixtures and&#x2014;here&#x2019;s the difference&#x2014;we commit. Then, for each test, we run it, and then we do a rollback afterward. Where in the old world, rolling back took us to a pristine database, it now takes us to an already-set-up fixture set. Finally, after all the tests in the class have been run, we explicitly remove the fixtures&#x2014;remember, they&#x2019;ve been committed, so we can&#x2019;t just roll back&#x2014;and commit. How do we do that last bit? Well, we run a modified version of Django&#x2019;s stock fixture-loading routine&#x2014;one that keeps track of what was loaded so we can undo it.\n
With the stock Django fixture loading, SUMO fired off 37,583 queries during the course of its tests. With per-class fixtures&#x2026;only 4,116. That&#x2019;s 9 times less traffic to MySQL. Or, to look at it in terms of time&#x2026;\n
With the stock Django fixture loading, SUMO fired off 37,583 queries during the course of its tests. With per-class fixtures&#x2026;only 4,116. That&#x2019;s 9 times less traffic to MySQL. Or, to look at it in terms of time&#x2026;\n
With the stock Django fixture loading, SUMO fired off 37,583 queries during the course of its tests. With per-class fixtures&#x2026;only 4,116. That&#x2019;s 9 times less traffic to MySQL. Or, to look at it in terms of time&#x2026;\n
With the stock Django fixture loading, SUMO fired off 37,583 queries during the course of its tests. With per-class fixtures&#x2026;only 4,116. That&#x2019;s 9 times less traffic to MySQL. Or, to look at it in terms of time&#x2026;\n
stock fixtures: 302 seconds: just over 5 minutes\nper-class fixtures: &#x2248;1.5 mins\n\nTo get these improvements, just subclass FFTC instead of TestCase. Caveat: post-save.\n\nIn fact, additional 4 seconds by reusing a single DB connection. Total: 93s. \n\nSo, big improvement! And all thanks to getting rid of I/O. But, there are additional speed optimizations we can enjoy.\n
stock fixtures: 302 seconds: just over 5 minutes\nper-class fixtures: &#x2248;1.5 mins\n\nTo get these improvements, just subclass FFTC instead of TestCase. Caveat: post-save.\n\nIn fact, additional 4 seconds by reusing a single DB connection. Total: 93s. \n\nSo, big improvement! And all thanks to getting rid of I/O. But, there are additional speed optimizations we can enjoy.\n
stock fixtures: 302 seconds: just over 5 minutes\nper-class fixtures: &#x2248;1.5 mins\n\nTo get these improvements, just subclass FFTC instead of TestCase. Caveat: post-save.\n\nIn fact, additional 4 seconds by reusing a single DB connection. Total: 93s. \n\nSo, big improvement! And all thanks to getting rid of I/O. But, there are additional speed optimizations we can enjoy.\n
stock fixtures: 302 seconds: just over 5 minutes\nper-class fixtures: &#x2248;1.5 mins\n\nTo get these improvements, just subclass FFTC instead of TestCase. Caveat: post-save.\n\nIn fact, additional 4 seconds by reusing a single DB connection. Total: 93s. \n\nSo, big improvement! And all thanks to getting rid of I/O. But, there are additional speed optimizations we can enjoy.\n
3 actual test cases from SUMO\nsame fixtures\nmerge into one class? can&#x2019;t organize as we like\nBut, we can get speed anyway\n\nHere are 3 actual test cases from SUMO. They all use the same set of fixtures. Now, how might we make these faster? Since we&#x2019;re loading fixtures once per class, we could merge the classes together, taking all their tests and throwing them into one enormous class. Then the fixtures would load once at the top of the class&#x2026;and unload once at the bottom. That&#x2019;s great, performance-wise, but it takes away our ability to organize our tests into classes as we see fit. It hurts our understandability.\n\nSo here&#x2019;s what we do instead. We take advantage of nose&#x2019;s prepareTest hook that lets us mess with tests before they&#x2019;re run.\n
Typically when nose runs your test classes, it runs them basically in alphabetical order, like shown here.\n\nthe trouble\n\nBut, by using nose&#x2019;s prepareTest hook, we can write a plugin to dynamically re-order them, at test time, according to which fixtures they use. I call this &#x201C;fixture bundling&#x201D;.\n
(explain) dink dink dink\n\nHow does this work? advisory bits\n\nwhether first\nwhether last\n\nThroughout all of this, test independence is preserved; we&#x2019;re just factoring out pointlessly repeated setup.\n\nfuture: subsets\n\nTroubleshooting: you have order dependencies:\nsingletons, locale, other thread-locals\n
(explain) dink dink dink\n\nHow does this work? advisory bits\n\nwhether first\nwhether last\n\nThroughout all of this, test independence is preserved; we&#x2019;re just factoring out pointlessly repeated setup.\n\nfuture: subsets\n\nTroubleshooting: you have order dependencies:\nsingletons, locale, other thread-locals\n
(explain) dink dink dink\n\nHow does this work? advisory bits\n\nwhether first\nwhether last\n\nThroughout all of this, test independence is preserved; we&#x2019;re just factoring out pointlessly repeated setup.\n\nfuture: subsets\n\nTroubleshooting: you have order dependencies:\nsingletons, locale, other thread-locals\n
So what impact did fixture bundling have on SUMO? Well, before bundling, we had 114 classes with fixtures, so we did the loading and unloading 114 times. However, there were only 11 distinct sets of fixtures, so with bundling we do it 11 times. In our case, it took about another quarter off our already improved test run.\n\nYou can use this by subclassing FastFixtureTestCase and passing --with-fixture-bundling\n
So what impact did fixture bundling have on SUMO? Well, before bundling, we had 114 classes with fixtures, so we did the loading and unloading 114 times. However, there were only 11 distinct sets of fixtures, so with bundling we do it 11 times. In our case, it took about another quarter off our already improved test run.\n\nYou can use this by subclassing FastFixtureTestCase and passing --with-fixture-bundling\n
So what impact did fixture bundling have on SUMO? Well, before bundling, we had 114 classes with fixtures, so we did the loading and unloading 114 times. However, there were only 11 distinct sets of fixtures, so with bundling we do it 11 times. In our case, it took about another quarter off our already improved test run.\n\nYou can use this by subclassing FastFixtureTestCase and passing --with-fixture-bundling\n
So what impact did fixture bundling have on SUMO? Well, before bundling, we had 114 classes with fixtures, so we did the loading and unloading 114 times. However, there were only 11 distinct sets of fixtures, so with bundling we do it 11 times. In our case, it took about another quarter off our already improved test run.\n\nYou can use this by subclassing FastFixtureTestCase and passing --with-fixture-bundling\n
So what impact did fixture bundling have on SUMO? Well, before bundling, we had 114 classes with fixtures, so we did the loading and unloading 114 times. However, there were only 11 distinct sets of fixtures, so with bundling we do it 11 times. In our case, it took about another quarter off our already improved test run.\n\nYou can use this by subclassing FastFixtureTestCase and passing --with-fixture-bundling\n
waiting for DB setup sucks, esp. for trivial test\n\nwait through 15s of DB creation and initialization of basic Django metadata&#x2014;stuff which was already perfectly valid at the end of the previous test run \n
So, building on some work we had already done in this direction, I decided to skip the teardown of the test DB and, symmetrically, the setup on future runs.\n\ndink dink\n\nCaveat:\n&#x2022; It&#x2019;s not very observant, so, if you make a schema change, you have to remember to omit the reuse flag.\n\nAnd, at last, we are within a whisker of the mythical 1-minute test run.\n
So, building on some work we had already done in this direction, I decided to skip the teardown of the test DB and, symmetrically, the setup on future runs.\n\ndink dink\n\nCaveat:\n&#x2022; It&#x2019;s not very observant, so, if you make a schema change, you have to remember to omit the reuse flag.\n\nAnd, at last, we are within a whisker of the mythical 1-minute test run.\n
So, building on some work we had already done in this direction, I decided to skip the teardown of the test DB and, symmetrically, the setup on future runs.\n\ndink dink\n\nCaveat:\n&#x2022; It&#x2019;s not very observant, so, if you make a schema change, you have to remember to omit the reuse flag.\n\nAnd, at last, we are within a whisker of the mythical 1-minute test run.\n
So, building on some work we had already done in this direction, I decided to skip the teardown of the test DB and, symmetrically, the setup on future runs.\n\ndink dink\n\nCaveat:\n&#x2022; It&#x2019;s not very observant, so, if you make a schema change, you have to remember to omit the reuse flag.\n\nAnd, at last, we are within a whisker of the mythical 1-minute test run.\n
So, building on some work we had already done in this direction, I decided to skip the teardown of the test DB and, symmetrically, the setup on future runs.\n\ndink dink\n\nCaveat:\n&#x2022; It&#x2019;s not very observant, so, if you make a schema change, you have to remember to omit the reuse flag.\n\nAnd, at last, we are within a whisker of the mythical 1-minute test run.\n
Here&#x2019;s our pursuit of speed so far. dink dink dink dink\n\nWe&#x2019;re saving something like 4 minutes per test run. That may not sound like much, but it adds up. At Mozilla I had a team of 4, and if we conservatively say we each ran the suite 4 times a day, and we save 4 minutes each time, we save 64 minutes a day. That comes out to 261 hours&#x2014;or 32 working days&#x2014;per year.\n\nThat&#x2019;s enough for every team member to take an extra week off.\n\nSo, if you have a lot of fixture-heavy tests, be sure to grab django-nose and turn this stuff on.\n\nBut there&#x2019;s just one more thing.\n
Here&#x2019;s our pursuit of speed so far. dink dink dink dink\n\nWe&#x2019;re saving something like 4 minutes per test run. That may not sound like much, but it adds up. At Mozilla I had a team of 4, and if we conservatively say we each ran the suite 4 times a day, and we save 4 minutes each time, we save 64 minutes a day. That comes out to 261 hours&#x2014;or 32 working days&#x2014;per year.\n\nThat&#x2019;s enough for every team member to take an extra week off.\n\nSo, if you have a lot of fixture-heavy tests, be sure to grab django-nose and turn this stuff on.\n\nBut there&#x2019;s just one more thing.\n
Here&#x2019;s our pursuit of speed so far. dink dink dink dink\n\nWe&#x2019;re saving something like 4 minutes per test run. That may not sound like much, but it adds up. At Mozilla I had a team of 4, and if we conservatively say we each ran the suite 4 times a day, and we save 4 minutes each time, we save 64 minutes a day. That comes out to 261 hours&#x2014;or 32 working days&#x2014;per year.\n\nThat&#x2019;s enough for every team member to take an extra week off.\n\nSo, if you have a lot of fixture-heavy tests, be sure to grab django-nose and turn this stuff on.\n\nBut there&#x2019;s just one more thing.\n
Here&#x2019;s our pursuit of speed so far. dink dink dink dink\n\nWe&#x2019;re saving something like 4 minutes per test run. That may not sound like much, but it adds up. At Mozilla I had a team of 4, and if we conservatively say we each ran the suite 4 times a day, and we save 4 minutes each time, we save 64 minutes a day. That comes out to 261 hours&#x2014;or 32 working days&#x2014;per year.\n\nThat&#x2019;s enough for every team member to take an extra week off.\n\nSo, if you have a lot of fixture-heavy tests, be sure to grab django-nose and turn this stuff on.\n\nBut there&#x2019;s just one more thing.\n
Here&#x2019;s our pursuit of speed so far. dink dink dink dink\n\nWe&#x2019;re saving something like 4 minutes per test run. That may not sound like much, but it adds up. At Mozilla I had a team of 4, and if we conservatively say we each ran the suite 4 times a day, and we save 4 minutes each time, we save 64 minutes a day. That comes out to 261 hours&#x2014;or 32 working days&#x2014;per year.\n\nThat&#x2019;s enough for every team member to take an extra week off.\n\nSo, if you have a lot of fixture-heavy tests, be sure to grab django-nose and turn this stuff on.\n\nBut there&#x2019;s just one more thing.\n
Here&#x2019;s our pursuit of speed so far. dink dink dink dink\n\nWe&#x2019;re saving something like 4 minutes per test run. That may not sound like much, but it adds up. At Mozilla I had a team of 4, and if we conservatively say we each ran the suite 4 times a day, and we save 4 minutes each time, we save 64 minutes a day. That comes out to 261 hours&#x2014;or 32 working days&#x2014;per year.\n\nThat&#x2019;s enough for every team member to take an extra week off.\n\nSo, if you have a lot of fixture-heavy tests, be sure to grab django-nose and turn this stuff on.\n\nBut there&#x2019;s just one more thing.\n
We just talked about the most expensive DB operation you can possibly do: dropping the whole thing and recreating it from scratch.\n\nHorribly enough, this is what happens before every TTC, because TTCs are ones that can commit, for real.\n\nNow, the need for a DB flush is evident enough: if the testrunner is going to guarantee a clean DB for the next test, it has to either track and back out or nuke from orbit.\n\nBut reasons that have been lost to history, TTCs flush the DB before they run rather than after. Now you can imagine what would happen if a TTC ran and then a normal TC. ...\n\nThat&#x2019;s why Django runs all TTCs last. so they can all make a mess&#x2026;\n\nBut, wouldn&#x2019;t it be nice to be able to write TTCs that are more responsible. Hygienic&#x2014;if you will&#x2014;and by making that guarantee, opt out of the flushing madness.\n\nThat could save you 30s of flushing PER TEST.\n
We just talked about the most expensive DB operation you can possibly do: dropping the whole thing and recreating it from scratch.\n\nHorribly enough, this is what happens before every TTC, because TTCs are ones that can commit, for real.\n\nNow, the need for a DB flush is evident enough: if the testrunner is going to guarantee a clean DB for the next test, it has to either track and back out or nuke from orbit.\n\nBut reasons that have been lost to history, TTCs flush the DB before they run rather than after. Now you can imagine what would happen if a TTC ran and then a normal TC. ...\n\nThat&#x2019;s why Django runs all TTCs last. so they can all make a mess&#x2026;\n\nBut, wouldn&#x2019;t it be nice to be able to write TTCs that are more responsible. Hygienic&#x2014;if you will&#x2014;and by making that guarantee, opt out of the flushing madness.\n\nThat could save you 30s of flushing PER TEST.\n
We just talked about the most expensive DB operation you can possibly do: dropping the whole thing and recreating it from scratch.\n\nHorribly enough, this is what happens before every TTC, because TTCs are ones that can commit, for real.\n\nNow, the need for a DB flush is evident enough: if the testrunner is going to guarantee a clean DB for the next test, it has to either track and back out or nuke from orbit.\n\nBut reasons that have been lost to history, TTCs flush the DB before they run rather than after. Now you can imagine what would happen if a TTC ran and then a normal TC. ...\n\nThat&#x2019;s why Django runs all TTCs last. so they can all make a mess&#x2026;\n\nBut, wouldn&#x2019;t it be nice to be able to write TTCs that are more responsible. Hygienic&#x2014;if you will&#x2014;and by making that guarantee, opt out of the flushing madness.\n\nThat could save you 30s of flushing PER TEST.\n
We just talked about the most expensive DB operation you can possibly do: dropping the whole thing and recreating it from scratch.\n\nHorribly enough, this is what happens before every TTC, because TTCs are ones that can commit, for real.\n\nNow, the need for a DB flush is evident enough: if the testrunner is going to guarantee a clean DB for the next test, it has to either track and back out or nuke from orbit.\n\nBut reasons that have been lost to history, TTCs flush the DB before they run rather than after. Now you can imagine what would happen if a TTC ran and then a normal TC. ...\n\nThat&#x2019;s why Django runs all TTCs last. so they can all make a mess&#x2026;\n\nBut, wouldn&#x2019;t it be nice to be able to write TTCs that are more responsible. Hygienic&#x2014;if you will&#x2014;and by making that guarantee, opt out of the flushing madness.\n\nThat could save you 30s of flushing PER TEST.\n
Mark your TTCs with cleans_up_after_itself.\n\ndjango-nose will run it before any of those nasty, trash-spewing TTCs, so they don&#x2019;t have to pre-flush.\n\nWith a large schema, this can save minutes of IO!\n\nAtm, you have to bring your own overrides to not flush. Soon, it&#x2019;ll have a superclass for that.\n
That wraps up the computer speed portion of our program.\n\nBut what about human speed?\n\nWhat about a decent UI&#x2026;\n
Django's database fixtures a good example of a piece of test setup that gets out of control, but the speed problems are actually the most harmless manifestation. There&#x2019;s actually a much more insidious, more general problem here.\n
&#x2026;which is that setup is evil.\n
standard unitttest testcase / bunch of setup in setUp() / nice. DRY\n\nThe problem is this: you start with a manageably small test fixture&#x2026; (and on it goes&#x2026;)\n\nsetUp & fixtures the same Whether this takes the form of a Django-style DB fixture or a unittest setUp() routine matters not at all. What matters is that you have some common setup, and then you write some tests that depend on all of it.\nstory: forums fixture\n\nThen you add some more tests, ones that use parts of the setup but not all. Pretty soon, you have 30 tests depending on your setup routine. Then your requirements change, and you need to modify the setup slightly.\n\nQuick quiz: which of your tests have you invalidated?\n\nWhich of them don&#x2019;t care about the bits you changed and are still okay? There&#x2019;s simply no way to know without going back and rethinking your way through each test again, and you run the risk of invalidating them.\n
describe / my assertion is that this way is better\n\nhelper methods / call them when you need a particular piece of setup\nThese are simplified to fit on the slide: imagine them setting up many interrelated objects&#x2014;like many setups do&#x2014;and returning them.\n\nequally efficient setUp() is called once for each test anyway.\nmore efficient In fact, it&#x2019;s more efficient, because you never set up state that you don&#x2019;t need\n\nfuture: memoized properties\n
equivalent thing often happened to us with Django fixtures. On one occasion, we had this test, which makes sure user in a specific group can&#x2019;t change the locked status on a forum that he doesn&#x2019;t have permission on. Well, thank goodness for the docstring because I would have no idea whatsoever what was going on otherwise: id num / hidden data / fixture file off to the side.\n\nAnd can you imagine going in to edit that fixture? What if you had another test that used some of those objects, and you want to make a tweak to their state for the sake of that test? How do you find out what you shouldn&#x2019;t change to keep this one happy?\n\ncoupling disaster\n
uses model makers\n\nsets attrs how you say and the rest how they have to be\n\nIn this case, we&#x2019;re making sure a wiki document whose title starts with &#x201C;Template&#x201D; gets marked as a template in the DB. All we care about in the test is the title. Now, there are all sorts of other invariants going on behind the scenes: a language has to be chosen for the document, it has to have a category, etc. model maker takes care of that\n\nno special-purpose setup at all. document() used all over our tests.\n
nesting model makers\nlexically nest and reflect structure of objects, even if inside out\n\nno DB hits\n\nso simple, no library to write them&#x2026;\n\n
actual model maker from the wiki. Nothing really to factor out. this a complicated one.\n\nBest practices seem to be (1) make the minimal valid object (2) Don&#x2019;t assume things you don&#x2019;t pass as a kwarg. (3) Don&#x2019;t save\n\n@with_save\n\nfactory-boy factory-girl\n
So, I say any setup() routine or fixture that shares more than a little state between tests is an antipattern. It couples your tests together needlessly. It makes them brittle. And it makes them hard to understand&#x2014;which bits of the data do they depend on, and which are don&#x2019;t-cares, there just to satisfy some invariant from some unrelated part of the code? Furthermore, the setup is lexically far from the test code. Wouldn&#x2019;t it be preferable&#x2014;all other things being equal&#x2014;to have it nearby so you could just read straight through a test method and walk away enlightened?\n
So, I say any setup() routine or fixture that shares more than a little state between tests is an antipattern. It couples your tests together needlessly. It makes them brittle. And it makes them hard to understand&#x2014;which bits of the data do they depend on, and which are don&#x2019;t-cares, there just to satisfy some invariant from some unrelated part of the code? Furthermore, the setup is lexically far from the test code. Wouldn&#x2019;t it be preferable&#x2014;all other things being equal&#x2014;to have it nearby so you could just read straight through a test method and walk away enlightened?\n
So, I say any setup() routine or fixture that shares more than a little state between tests is an antipattern. It couples your tests together needlessly. It makes them brittle. And it makes them hard to understand&#x2014;which bits of the data do they depend on, and which are don&#x2019;t-cares, there just to satisfy some invariant from some unrelated part of the code? Furthermore, the setup is lexically far from the test code. Wouldn&#x2019;t it be preferable&#x2014;all other things being equal&#x2014;to have it nearby so you could just read straight through a test method and walk away enlightened?\n
So, I say any setup() routine or fixture that shares more than a little state between tests is an antipattern. It couples your tests together needlessly. It makes them brittle. And it makes them hard to understand&#x2014;which bits of the data do they depend on, and which are don&#x2019;t-cares, there just to satisfy some invariant from some unrelated part of the code? Furthermore, the setup is lexically far from the test code. Wouldn&#x2019;t it be preferable&#x2014;all other things being equal&#x2014;to have it nearby so you could just read straight through a test method and walk away enlightened?\n
So, I say any setup() routine or fixture that shares more than a little state between tests is an antipattern. It couples your tests together needlessly. It makes them brittle. And it makes them hard to understand&#x2014;which bits of the data do they depend on, and which are don&#x2019;t-cares, there just to satisfy some invariant from some unrelated part of the code? Furthermore, the setup is lexically far from the test code. Wouldn&#x2019;t it be preferable&#x2014;all other things being equal&#x2014;to have it nearby so you could just read straight through a test method and walk away enlightened?\n
&#x201C;Then you&#x2019;d have&#x201D;\n\ndink dink dink dink\n\nalso&#x2026;\nfreedom to refactor without breaking tests\n\nfreedom to organize into testcases by higher-level criteria, not just shared setup\n
&#x201C;Then you&#x2019;d have&#x201D;\n\ndink dink dink dink\n\nalso&#x2026;\nfreedom to refactor without breaking tests\n\nfreedom to organize into testcases by higher-level criteria, not just shared setup\n
&#x201C;Then you&#x2019;d have&#x201D;\n\ndink dink dink dink\n\nalso&#x2026;\nfreedom to refactor without breaking tests\n\nfreedom to organize into testcases by higher-level criteria, not just shared setup\n
&#x201C;Then you&#x2019;d have&#x201D;\n\ndink dink dink dink\n\nalso&#x2026;\nfreedom to refactor without breaking tests\n\nfreedom to organize into testcases by higher-level criteria, not just shared setup\n
&#x201C;Then you&#x2019;d have&#x201D;\n\ndink dink dink dink\n\nalso&#x2026;\nfreedom to refactor without breaking tests\n\nfreedom to organize into testcases by higher-level criteria, not just shared setup\n
&#x2026;that makes tests informative to humans.\n\nwhile & after running\n\nhelps you diagnose & debug?\n
This is the standard Django test display. It&#x2019;s basically what standard unittest spits out: TextTestRunner. Took the liberty of trimming out some of the time-consuming setup.\n\nBut look at this. Dots. Dot after dot after dot. Errors&#x2014;can&#x2019;t do anything about them. Can&#x2019;t even see what they are. Don&#x2019;t know how long to wait. Should I get a drink? A sandwich? A spouse? In fact, this suite goes on for over 2 minutes. Start thinking of questions.\n\nAnd finally we&#x2019;re left with this mess. Wrapped around, full of garbage like &#x201C;File&#x201D; and 8&#x201C;Traceback (most recent line last)&#x201D; as if we&#x2019;ve never read a traceback before. Unless you&#x2019;re using a fancy IDE, you know well the routine of squinting at the traceback, finding what file went wrong, loading it up in your editor, and then typing in the line number to go to. What a waste of time!\n
Wouldn&#x2019;t something like this be better? I&#x2019;ve put together an alternative testrunner called nose-progressive. It works with your existing tests without you having to do anything. Here&#x2019;s what it does. (K to pause)\n\nprogress bar\nnames of tests\nprompt tracebacks\ntraceback formatting is spectacular:\nno wasted lines\nno wasted columns. paths relativized\ntest frame first, omit junk frames (eq_ too)\ncolored function names. easy to scan down the stack\ncopy & paste test name to re-run\nbest of all, editor shortcuts\n
One you know where you want to go, just triple-click that line, do a quick copy-paste, and you end up right in your editor, at the right line.\n\nworks with all editors\n
To get this in your project&#x2026;\npip install nose-progressive\n./manage.py test --with-progressive\n\nAlso works great when you&#x2019;re not using Django.\n
\n
\n
There&#x2019;s lots more potential, and I&#x2019;m behind on the review queue for django-nose. We&#x2019;re having a short sprint tomorrow 9am-2pm on, to see how far we can get merging patches.\n\nThat brings us to the end. Thank you very much!\n\nburning questions\n
There&#x2019;s lots more potential, and I&#x2019;m behind on the review queue for django-nose. We&#x2019;re having a short sprint tomorrow 9am-2pm on, to see how far we can get merging patches.\n\nThat brings us to the end. Thank you very much!\n\nburning questions\n
There&#x2019;s lots more potential, and I&#x2019;m behind on the review queue for django-nose. We&#x2019;re having a short sprint tomorrow 9am-2pm on, to see how far we can get merging patches.\n\nThat brings us to the end. Thank you very much!\n\nburning questions\n