diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000000..48670b41d48 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,6 @@ +[run] +omit = + # standlonetemplate is read dynamically and tested by test_genscript + *standalonetemplate.py + # oldinterpret could be removed, as it is no longer used in py26+ + *oldinterpret.py diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..242d3da0d74 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +CHANGELOG merge=union diff --git a/.gitignore b/.gitignore index 2b7c267b0a6..cd6a7fc9ea7 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,11 @@ include/ *.orig *~ +.eggs/ + +# this file is managed by setuptools_scm +_pytest/__init__.py + doc/*/_build build/ dist/ diff --git a/.hgignore b/.hgignore deleted file mode 100644 index 16257ba784e..00000000000 --- a/.hgignore +++ /dev/null @@ -1,39 +0,0 @@ -# Automatically generated by `hgimportsvn` -syntax:glob -.svn -.hgsvn - -# Ignore local virtualenvs -syntax:glob -lib/ -bin/ -include/ -.Python/ -.env/ - -# These lines are suggested according to the svn:ignore property -# Feel free to enable them by uncommenting them -syntax:glob -*.pyc -*.pyo -*.swp -*.html -*.class -*.orig -*~ - -doc/*/_build -build/ -dist/ -testing/cx_freeze/build -testing/cx_freeze/cx_freeze_source -*.egg-info -issue/ -env/ -env3/ -3rdparty/ -.tox -.cache -.coverage -.ropeproject -*.sublime-* diff --git a/.hgtags b/.hgtags deleted file mode 100644 index be833a1e9cf..00000000000 --- a/.hgtags +++ /dev/null @@ -1,77 +0,0 @@ -52c6d9e78777a5a34e813123997dfc614a1a4767 1.0.0b3 -1c7aaa8c61f3b0945921a9acc7beb184201aed4b 1.0.0b4 -1c7aaa8c61f3b0945921a9acc7beb184201aed4b 1.0.0b4 -0000000000000000000000000000000000000000 1.0.0b4 -0000000000000000000000000000000000000000 1.0.0b4 -8cd6eb91eba313b012d6e568f37d844dc0751f2e 1.0.0b4 -8cd6eb91eba313b012d6e568f37d844dc0751f2e 1.0.0b4 -0000000000000000000000000000000000000000 1.0.0b4 -2cc0507f117ffe721dff7ee026648cfce00ec92f 1.0.0b6 -86f1e1b6e49bf5882a809f11edd1dbb08162cdad 1.0.0b8 -86f1e1b6e49bf5882a809f11edd1dbb08162cdad 1.0.0b8 -c63f35c266cbb26dad6b87b5e115d65685adf448 1.0.0b8 -c63f35c266cbb26dad6b87b5e115d65685adf448 1.0.0b8 -0eaa0fdf2ba0163cf534dc2eff4ba2e5fc66c261 1.0.0b8 -e2a60653cb490aeed81bbbd83c070b99401c211c 1.0.0b9 -5ea0cdf7854c3d4278d36eda94a2b68483a0e211 1.0.0 -5ea0cdf7854c3d4278d36eda94a2b68483a0e211 1.0.0 -7acde360d94b6a2690ce3d03ff39301da84c0a2b 1.0.0 -6bd221981ac99103002c1cb94fede400d23a96a1 1.0.1 -4816e8b80602a3fd3a0a120333ad85fbe7d8bab4 1.0.2 -60c44bdbf093285dc69d5462d4dbb4acad325ca6 1.1.0 -319187fcda66714c5eb1353492babeec3d3c826f 1.1.1 -4fc5212f7626a56b9eb6437b5c673f56dd7eb942 1.2.0 -c143a8c8840a1c68570890c8ac6165bbf92fd3c6 1.2.1 -eafd3c256e8732dfb0a4d49d051b5b4339858926 1.3.0 -d5eacf390af74553227122b85e20345d47b2f9e6 1.3.1 -d5eacf390af74553227122b85e20345d47b2f9e6 1.3.1 -8b8e7c25a13cf863f01b2dd955978285ae9daf6a 1.3.1 -3bff44b188a7ec1af328d977b9d39b6757bb38df 1.3.2 -c59d3fa8681a5b5966b8375b16fccd64a3a8dbeb 1.3.3 -79ef6377705184c55633d456832eea318fedcf61 1.3.4 -79ef6377705184c55633d456832eea318fedcf61 1.3.4 -90fffd35373e9f125af233f78b19416f0938d841 1.3.4 -e9e127acd6f0497324ef7f40cfb997cad4c4cd17 2.0.0 -e4497c2aed358c1988cf7be83ca9394c3c707fa2 2.0.1 -84e5c54b72448194a0f6f815da7e048ac8019d50 2.0.2 -2ef82d82daacb72733a3a532a95c5a37164e5819 2.0.3 -2ef82d82daacb72733a3a532a95c5a37164e5819 2.0.3 -c777dcad166548b7499564cb49ae5c8b4b07f935 2.0.3 -c777dcad166548b7499564cb49ae5c8b4b07f935 2.0.3 -49f11dbff725acdcc5fe3657cbcdf9ae04e25bbc 2.0.3 -49f11dbff725acdcc5fe3657cbcdf9ae04e25bbc 2.0.3 -363e5a5a59c803e6bc176a6f9cc4bf1a1ca2dab0 2.0.3 -e5e1746a197f0398356a43fbe2eebac9690f795d 2.1.0 -5864412c6f3c903384243bd315639d101d7ebc67 2.1.2 -12a05d59249f80276e25fd8b96e8e545b1332b7a 2.1.3 -1522710369337d96bf9568569d5f0ca9b38a74e0 2.2.0 -3da8cec6c5326ed27c144c9b6d7a64a648370005 2.2.1 -92b916483c1e65a80dc80e3f7816b39e84b36a4d 2.2.2 -3c11c5c9776f3c678719161e96cc0a08169c1cb8 2.2.3 -ad9fe504a371ad8eb613052d58f229aa66f53527 2.2.4 -c27a60097767c16a54ae56d9669a77925b213b9b 2.3.0 -acf0e1477fb19a1d35a4e40242b77fa6af32eb17 2.3.1 -8738b828dec53937765db71951ef955cca4c51f6 2.3.2 -7fe44182c434f8ac89149a3c340479872a5d5ccb 2.3.3 -ef299e57f24218dbdd949498d7e660723636bcc3 2.3.4 -fc3a793e87ec907000a47ea0d3a372a2fe218c0a 2.3.5 -b93ac0cdae02effaa3c136a681cc45bba757fe46 1.4.14 -b93ac0cdae02effaa3c136a681cc45bba757fe46 1.4.14 -0000000000000000000000000000000000000000 1.4.14 -0000000000000000000000000000000000000000 1.4.14 -0000000000000000000000000000000000000000 1.4.14 -af860de70cc3f157ac34ca1d4bf557a057bff775 2.4.0 -8828c924acae0b4cad2e2cb92943d51da7cb744a 2.4.1 -8d051f89184bfa3033f5e59819dff9f32a612941 2.4.2 -a064ad64d167508a8e9e73766b1a4e6bd10c85db 2.5.0 -039d543d1ca02a716c0b0de9a7131beb8021e8a2 2.5.1 -421d3b4d150d901de24b1cbeb8955547b1420483 2.5.2 -60725b17a9d1af4100abb8be3f9f4ddf6262bf34 2.6.0 -60725b17a9d1af4100abb8be3f9f4ddf6262bf34 2.6.0 -88af949b9611494e2c65d528f9e565b00fb7e8ca 2.6.0 -a4f9639702baa3eb4f3b16e162f74f7b69f3f9e1 2.6.1 -a4f25c5e649892b5cc746d21be971e4773478af9 2.6.2 -2967aa416a4f3cdb65fc75073a2a148e1f372742 2.6.3 -f03b6de8325f5b6c35cea7c3de092f134ea8ef07 2.6.4 -7ed701fa2fb554bfc0618d447dfec700cc697407 2.7.0 -edc1d080bab5a970da8f6c776be50768829a7b09 2.7.1 diff --git a/.travis.yml b/.travis.yml index 0f5bcea9ba5..e83220105cc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,31 +7,35 @@ install: "pip install -U tox" # # command to run tests env: matrix: + - TESTENV=coveralls + - TESTENV=doctesting - TESTENV=flakes - TESTENV=py26 - TESTENV=py27 - - TESTENV=py33 - - TESTENV=py34 - - TESTENV=py35 - - TESTENV=pypy - - TESTENV=py27-pexpect - - TESTENV=py34-pexpect + - TESTENV=py27-cxfreeze - TESTENV=py27-nobyte - - TESTENV=py27-xdist - - TESTENV=py34-xdist + - TESTENV=py27-pexpect + - TESTENV=py27-subprocess - TESTENV=py27-trial + - TESTENV=py27-xdist + - TESTENV=py33 - TESTENV=py33 + - TESTENV=py34 + - TESTENV=py34-pexpect - TESTENV=py34-trial - # inprocess tests by default were introduced in 2.8 only; - # this TESTENV should be enabled when merged back to master - #- TESTENV=py27-subprocess - - TESTENV=doctesting - - TESTENV=py27-cxfreeze - - TESTENV=coveralls -script: tox --recreate -i ALL=https://devpi.net/hpk/dev/ -e $TESTENV + - TESTENV=py34-trial + - TESTENV=py34-xdist + - TESTENV=py35 + - TESTENV=pypy + +script: tox --recreate -e $TESTENV notifications: irc: - - "chat.freenode.net#pytest-dev" + channels: + - "chat.freenode.net#pytest" + on_success: change + on_failure: change + skip_join: true email: - pytest-commit@python.org diff --git a/AUTHORS b/AUTHORS index ca82b483f53..8d700878cfe 100644 --- a/AUTHORS +++ b/AUTHORS @@ -3,6 +3,7 @@ merlinux GmbH, Germany, office at merlinux eu Contributors include:: +Abhijeet Kasurde Anatoly Bubenkoff Andreas Zeidler Andy Freeland @@ -14,6 +15,7 @@ Bob Ippolito Brian Dorsey Brian Okken Brianna Laugher +Bruno Oliveira Carl Friedrich Bolz Charles Cloud Chris Lamb @@ -25,6 +27,11 @@ Daniel Nuri Dave Hunt David Mohr Edison Gustavo Muenz +Eduardo Schettino +Elizaveta Shashkova +Eric Hunsberger +Eric Siegerman +Florian Bruhin Floris Bruynooghe Graham Horler Grig Gheorghiu @@ -33,13 +40,16 @@ Harald Armin Massa Ian Bicking Jaap Broekhuizen Jan Balster +Janne Vanhala Jason R. Coombs Jurko Gospodnetić Katarzyna Jachim +Kevin Cox Maciek Fijalkowski Maho Marc Schlaich Mark Abramowitz +Markus Unterwaditzer Martijn Faassen Nicolas Delaby Pieter Mulder @@ -52,3 +62,4 @@ Samuele Pedroni Tom Viner Trevor Bekolay Wouter van Ackooy +David Díaz-Barquero diff --git a/CHANGELOG b/CHANGELOG index c42743d88f1..252c4ba1f35 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,190 @@ +2.8.0.dev (compared to 2.7.X) +----------------------------- + +- "-r" option now accepts "a" to include all possible reports, similar + to passing "fEsxXw" explicitly (isse960). + Thanks Abhijeet Kasurde for the PR. + +- fix issue562: @nose.tools.istest now fully respected. + +- fix issue934: when string comparison fails and a diff is too large to display + without passing -vv, still show a few lines of the diff. + Thanks Florian Bruhin for the report and Bruno Oliveira for the PR. + +- fix issue736: Fix a bug where fixture params would be discarded when combined + with parametrization markers. + Thanks to Markus Unterwaditzer for the PR. + +- fix issue710: introduce ALLOW_UNICODE doctest option: when enabled, the + ``u`` prefix is stripped from unicode strings in expected doctest output. This + allows doctests which use unicode to run in Python 2 and 3 unchanged. + Thanks Jason R. Coombs for the report and Bruno Oliveira for the PR. + +- parametrize now also generates meaningful test IDs for enum, regex and class + objects (as opposed to class instances). + Thanks to Florian Bruhin for the PR. + +- Add 'warns' to assert that warnings are thrown (like 'raises'). + Thanks to Eric Hunsberger for the PR. + +- Fix issue683: Do not apply an already applied mark. Thanks ojake for the PR. + +- Deal with capturing failures better so fewer exceptions get lost to + /dev/null. Thanks David Szotten for the PR. + +- fix issue730: deprecate and warn about the --genscript option. + Thanks Ronny Pfannschmidt for the report and Christian Pommranz for the PR. + +- fix issue751: multiple parametrize with ids bug if it parametrizes class with + two or more test methods. Thanks Sergey Chipiga for reporting and Jan + Bednarik for PR. + +- fix issue82: avoid loading conftest files from setup.cfg/pytest.ini/tox.ini + files and upwards by default (--confcutdir can still be set to override this). + Thanks Bruno Oliveira for the PR. + +- fix issue768: docstrings found in python modules were not setting up session + fixtures. Thanks Jason R. Coombs for reporting and Bruno Oliveira for the PR. + +- added `tmpdir_factory`, a session-scoped fixture that can be used to create + directories under the base temporary directory. Previously this object was + installed as a `_tmpdirhandler` attribute of the `config` object, but now it + is part of the official API and using `config._tmpdirhandler` is + deprecated. + Thanks Bruno Oliveira for the PR. + +- fix issue808: pytest's internal assertion rewrite hook now implements the + optional PEP302 get_data API so tests can access data files next to them. + Thanks xmo-odoo for request and example and Bruno Oliveira for + the PR. + +- rootdir and inifile are now displayed during usage errors to help + users diagnose problems such as unexpected ini files which add + unknown options being picked up by pytest. Thanks to Pavel Savchenko for + bringing the problem to attention in #821 and Bruno Oliveira for the PR. + +- Summary bar now is colored yellow for warning + situations such as: all tests either were skipped or xpass/xfailed, + or no tests were run at all (this is a partial fix for issue500). +- fix issue812: pytest now exits with status code 5 in situations where no + tests were run at all, such as the directory given in the command line does + not contain any tests or as result of a command line option filters + all out all tests (-k for example). + Thanks Eric Siegerman (issue812) and Bruno Oliveira for the PR. + +- Summary bar now is colored yellow for warning + situations such as: all tests either were skipped or xpass/xfailed, + or no tests were run at all (related to issue500). + Thanks Eric Siegerman. + +- New `testpaths` ini option: list of directories to search for tests + when executing pytest from the root directory. This can be used + to speed up test collection when a project has well specified directories + for tests, being usually more practical than configuring norecursedirs for + all directories that do not contain tests. + Thanks to Adrian for idea (#694) and Bruno Oliveira for the PR. + +- fix issue713: JUnit XML reports for doctest failures. + Thanks Punyashloka Biswal. + +- fix issue970: internal pytest warnings now appear as "pytest-warnings" in + the terminal instead of "warnings", so it is clear for users that those + warnings are from pytest and not from the builtin "warnings" module. + Thanks Bruno Oliveira. + +- Include setup and teardown in junitxml test durations. + Thanks Janne Vanhala. + +- fix issue735: assertion failures on debug versions of Python 3.4+ + +- change test module importing behaviour to append to sys.path + instead of prepending. This better allows to run test modules + against installated versions of a package even if the package + under test has the same import root. In this example:: + + testing/__init__.py + testing/test_pkg_under_test.py + pkg_under_test/ + + the tests will preferrably run against the installed version + of pkg_under_test whereas before they would always pick + up the local version. Thanks Holger Krekel. + +- pytester: add method ``TmpTestdir.delete_loaded_modules()``, and call it + from ``inline_run()`` to allow temporary modules to be reloaded. + Thanks Eduardo Schettino. + +- internally refactor pluginmanager API and code so that there + is a clear distinction between a pytest-agnostic rather simple + pluginmanager and the PytestPluginManager which adds a lot of + behaviour, among it handling of the local conftest files. + In terms of documented methods this is a backward compatible + change but it might still break 3rd party plugins which relied on + details like especially the pluginmanager.add_shutdown() API. + Thanks Holger Krekel. + +- pluginmanagement: introduce ``pytest.hookimpl`` and + ``pytest.hookspec`` decorators for setting impl/spec + specific parameters. This substitutes the previous + now deprecated use of ``pytest.mark`` which is meant to + contain markers for test functions only. + +- write/refine docs for "writing plugins" which now have their + own page and are separate from the "using/installing plugins`` page. + +- fix issue732: properly unregister plugins from any hook calling + sites allowing to have temporary plugins during test execution. + +- deprecate and warn about ``__multicall__`` argument in hook + implementations. Use the ``hookwrapper`` mechanism instead already + introduced with pytest-2.7. + +- speed up pytest's own test suite considerably by using inprocess + tests by default (testrun can be modified with --runpytest=subprocess + to create subprocesses in many places instead). The main + APIs to run pytest in a test is "runpytest()" or "runpytest_subprocess" + and "runpytest_inprocess" if you need a particular way of running + the test. In all cases you get back a RunResult but the inprocess + one will also have a "reprec" attribute with the recorded events/reports. + +- fix monkeypatch.setattr("x.y", raising=False) to actually not raise + if "y" is not a pre-existing attribute. Thanks Florian Bruhin. + +- fix issue741: make running output from testdir.run copy/pasteable + Thanks Bruno Oliveira. + +- add a new ``--noconftest`` argument which ignores all ``conftest.py`` files. + +- add ``file`` and ``line`` attributes to JUnit-XML output. + +- fix issue890: changed extension of all documentation files from ``txt`` to + ``rst``. Thanks to Abhijeet for the PR. + +- fix issue714: add ability to apply indirect=True parameter on particular argnames. + Thanks Elizaveta239. + +- fix issue714: add ability to apply indirect=True parameter on particular argnames. + +- fix issue890: changed extension of all documentation files from ``txt`` to + ``rst``. Thanks to Abhijeet for the PR. + +- fix issue957: "# doctest: SKIP" option will now register doctests as SKIPPED + rather than PASSED. + Thanks Thomas Grainger for the report and Bruno Oliveira for the PR. + +- issue951: add new record_xml_property fixture, that supports logging + additional information on xml output. Thanks David Diaz for the PR. + +- issue949: paths after normal options (for example `-s`, `-v`, etc) are now + properly used to discover `rootdir` and `ini` files. + Thanks Peter Lauri for the report and Bruno Oliveira for the PR. + 2.7.3 (compared to 2.7.2) ----------------------------- +- fix issue 877: propperly handle assertion explanations with non-ascii repr + Thanks Mathieu Agopian for the report + - Allow 'dev', 'rc', or other non-integer version strings in `importorskip`. Thanks to Eric Hunsberger for the PR. @@ -41,7 +225,6 @@ directories created by this fixture (defaults to $TEMP/pytest-$USER). Thanks Bruno Oliveira for the PR. - 2.7.2 (compared to 2.7.1) ----------------------------- @@ -164,7 +347,7 @@ it from the "decorator" case. Thanks Tom Viner. - "python_classes" and "python_functions" options now support glob-patterns - for test discovery, as discussed in issue600. Thanks Ldiary Translations. + for test discovery, as discussed in issue600. Thanks Ldiary Translations. - allow to override parametrized fixtures with non-parametrized ones and vice versa (bubenkoff). @@ -1973,7 +2156,7 @@ v1.0.0b1 * introduced new "funcarg" setup method, see doc/test/funcarg.txt -* introduced plugin architecuture and many +* introduced plugin architecture and many new py.test plugins, see doc/test/plugins.txt diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 587b309a6d8..4a418e62e3b 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -21,7 +21,7 @@ in repositories living under: - `the pytest-dev bitbucket team `_ -All pytest-dev team members have write access to all contained +All pytest-dev Contributors team members have write access to all contained repositories. pytest core and plugins are generally developed using `pull requests`_ to respective repositories. @@ -46,9 +46,9 @@ the following: If no contributor strongly objects and two agree, the repo will be transferred to the ``pytest-dev`` organisation and you'll become a -member of the ``pytest-dev`` team, with commit rights to all projects. -We recommend that each plugin has at least three people who have the -right to release to pypi. +member of the ``pytest-dev Contributors`` team, with commit rights +to all projects. We recommend that each plugin has at least three +people who have the right to release to pypi. .. _reportbugs: @@ -66,6 +66,10 @@ If you are reporting a bug, please include: installed libraries and pytest version. * Detailed steps to reproduce the bug. +If you can write a demonstration test that currently fails but should pass (xfail), +that is a very useful commit to make as well, even if you can't find how +to fix the bug yet. + .. _submitfeedback: Submit feedback for developers @@ -93,6 +97,8 @@ https://github.com/pytest-dev/pytest/labels/bug :ref:`Talk ` to developers to find out how you can fix specific bugs. +Don't forget to check the issue trackers of your favourite plugins, too! + .. _writeplugins: Implement features @@ -111,10 +117,14 @@ Write documentation pytest could always use more documentation. What exactly is needed? * More complementary documentation. Have you perhaps found something unclear? -* Documentation translations. We currently have English and Japanese versions. +* Documentation translations. We currently have only English. * Docstrings. There's never too much of them. * Blog posts, articles and such -- they're all very appreciated. +You can also edit documentation files directly in the Github web interface +without needing to make a fork and local copy. This can be convenient for +small fixes. + .. _`pull requests`: .. _pull-requests: @@ -181,9 +191,13 @@ but here is a simple overview: $ git commit -a -m "" $ git push -u + Make sure you add a CHANGELOG message, and add yourself to AUTHORS. If you + are unsure about either of these steps, submit your pull request and we'll + help you fix it up. + #. Finally, submit a pull request through the GitHub website: - .. image:: img/pullrequest.png + .. image:: doc/en/img/pullrequest.png :width: 700px :align: center diff --git a/HOWTORELEASE.rst b/HOWTORELEASE.rst index a2087e2d296..b886f0fd699 100644 --- a/HOWTORELEASE.rst +++ b/HOWTORELEASE.rst @@ -1,59 +1,87 @@ - -How to release pytest (draft) +How to release pytest -------------------------------------------- -1. bump version numbers in _pytest/__init__.py (setup.py reads it) +Note: this assumes you have already registered on pypi. + +1. Bump version numbers in _pytest/__init__.py (setup.py reads it) -2. check and finalize CHANGELOG +2. Check and finalize CHANGELOG -3. write doc/en/announce/release-VERSION.txt and include +3. Write doc/en/announce/release-VERSION.txt and include it in doc/en/announce/index.txt -4. use devpi for uploading a release tarball to a staging area: - - ``devpi use https://devpi.net/USER/dev`` - - ``devpi upload --formats sdist,bdist_wheel`` +4. Use devpi for uploading a release tarball to a staging area: + + ``devpi use https://devpi.net/USER/dev`` + ``devpi upload --formats sdist,bdist_wheel`` + +5. Run from multiple machines: + + ``devpi use https://devpi.net/USER/dev`` + ``devpi test pytest==VERSION`` -5. run from multiple machines: - - ``devpi use https://devpi.net/USER/dev`` - - ``devpi test pytest==VERSION`` +6. Check that tests pass for relevant combinations with + + ``devpi list pytest`` -6. check that tests pass for relevant combinations with - ``devpi list pytest`` or look at failures with "devpi list -f pytest". There will be some failed environments like e.g. the py33-trial or py27-pexpect tox environments on Win32 platforms which is ok (tox does not support skipping on per-platform basis yet). -7. Regenerate the docs examples using tox:: - # Create and activate a virtualenv with regendoc installed - # (currently needs revision 4a9ec1035734) +7. Regenerate the docs examples using tox, and check for regressions:: + tox -e regen + git diff + -8. Build the docs, you need a virtualenv with, py and sphinx +8. Build the docs, you need a virtualenv with py and sphinx installed:: - cd docs/en + + cd doc/en make html + Commit any changes before tagging the release. + 9. Tag the release:: - hg tag VERSION -10. Upload the docs using docs/en/Makefile:: - cd docs/en - make install # or "installall" if you have LaTeX installed + git tag VERSION + git push + +10. Upload the docs using doc/en/Makefile:: + + cd doc/en + make install # or "installall" if you have LaTeX installed for PDF + This requires ssh-login permission on pytest.org because it uses rsync. Note that the "install" target of doc/en/Makefile defines where the rsync goes to, typically to the "latest" section of pytest.org. -11. publish to pypi "devpi push pytest-VERSION pypi:NAME" where NAME - is the name of pypi.python.org as configured in your - ~/.pypirc file -- it's the same you would use with - "setup.py upload -r NAME" + If you are making a minor release (e.g. 5.4), you also need to manually + create a symlink for "latest":: + + ssh pytest-dev@pytest.org + ln -s 5.4 latest + + Browse to pytest.org to verify. + +11. Publish to pypi:: + + devpi push pytest-VERSION pypi:NAME + + where NAME is the name of pypi.python.org as configured in your + ~/.pypirc file `for devpi `_. + + +12. Send release announcement to mailing lists: -12. send release announcement to mailing lists: + - pytest-dev + - testing-in-python + - python-announce-list@python.org - pytest-dev - testing-in-python - python-announce-list@python.org +13. **after the release** Bump the version number in ``_pytest/__init__.py``, + to the next Minor release version (i.e. if you released ``pytest-2.8.0``, + set it to ``pytest-2.9.0.dev1``). diff --git a/Makefile b/Makefile index ddf28741846..0b0fd61fef0 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,11 @@ # Set of targets useful for development/release process PYTHON = python2.7 PATH := $(PWD)/.env/bin:$(PATH) +REGENDOC_ARGS := \ + --normalize "/={8,} (.*) ={8,}/======= \1 ========/" \ + --normalize "/_{8,} (.*) _{8,}/_______ \1 ________/" \ + --normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \ + --normalize "@/tmp/pytest-\d+/@/tmp/pytest-NaN/@" # prepare virtual python environment .env: @@ -16,10 +21,11 @@ clean: # generate documentation docs: develop - find doc/en -name '*.txt' -not -path 'doc/en/_build/*' | xargs .env/bin/regendoc + find doc/en -name '*.rst' -not -path 'doc/en/_build/*' | xargs .env/bin/regendoc ${REGENDOC_ARGS} cd doc/en; make html # upload documentation upload-docs: develop - find doc/en -name '*.txt' -not -path 'doc/en/_build/*' | xargs .env/bin/regendoc --update - cd doc/en; make install + find doc/en -name '*.rst' -not -path 'doc/en/_build/*' | xargs .env/bin/regendoc ${REGENDOC_ARGS} --update + #cd doc/en; make install + diff --git a/README.rst b/README.rst index 704536932c7..e816b2ca7b3 100644 --- a/README.rst +++ b/README.rst @@ -1,16 +1,27 @@ -.. image:: https://pypip.in/v/pytest/badge.png +====== +pytest +====== + +The ``pytest`` testing tool makes it easy to write small tests, yet +scales to support complex functional testing. + +.. image:: http://img.shields.io/pypi/v/pytest.svg :target: https://pypi.python.org/pypi/pytest +.. image:: http://img.shields.io/coveralls/pytest-dev/pytest/master.svg + :target: https://coveralls.io/r/pytest-dev/pytest +.. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master + :target: https://travis-ci.org/pytest-dev/pytest +.. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true + :target: https://ci.appveyor.com/project/pytestbot/pytest Documentation: http://pytest.org/latest/ Changelog: http://pytest.org/latest/changelog.html -Issues: https://bitbucket.org/pytest-dev/pytest/issues?status=open +Issues: https://github.com/pytest-dev/pytest/issues -CI: https://drone.io/bitbucket.org/pytest-dev/pytest - -The ``pytest`` testing tool makes it easy to write small tests, yet -scales to support complex functional testing. It provides +Features +-------- - `auto-discovery `_ @@ -27,7 +38,9 @@ scales to support complex functional testing. It provides - many `external plugins `_. -A simple example for a test:: +A simple example for a test: + +.. code-block:: python # content of test_module.py def test_function(): @@ -42,12 +55,12 @@ For much more info, including PDF docs, see and report bugs at: - http://bitbucket.org/pytest-dev/pytest/issues/ + https://github.com/pytest-dev/pytest/issues and checkout or fork repo at: - http://bitbucket.org/pytest-dev/pytest/ + https://github.com/pytest-dev/pytest -Copyright Holger Krekel and others, 2004-2014 +Copyright Holger Krekel and others, 2004-2015 Licensed under the MIT license. diff --git a/_pytest/__init__.py b/_pytest/__init__.py deleted file mode 100644 index a521e0f8a19..00000000000 --- a/_pytest/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# -__version__ = '2.7.2' diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py index ef3a63f9539..54742347c66 100644 --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -70,12 +70,11 @@ def pytest_configure(config): config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook config._assertstate.trace("configured with mode set to %r" % (mode,)) - - -def pytest_unconfigure(config): - hook = config._assertstate.hook - if hook is not None and hook in sys.meta_path: - sys.meta_path.remove(hook) + def undo(): + hook = config._assertstate.hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + config.add_cleanup(undo) def pytest_collection(session): @@ -115,8 +114,11 @@ def callbinrepr(op, left, right): if new_expl: if (sum(len(p) for p in new_expl[1:]) > 80*8 and item.config.option.verbose < 2): - new_expl[1:] = [py.builtin._totext( - 'Detailed information truncated, use "-vv" to show')] + show_max = 10 + truncated_lines = len(new_expl) - show_max + new_expl[show_max:] = [py.builtin._totext( + 'Detailed information truncated (%d more lines)' + ', use "-vv" to show' % truncated_lines)] new_expl = [line.replace("\n", "\\n") for line in new_expl] res = py.builtin._totext("\n~").join(new_expl) if item.config.getvalue("assertmode") == "rewrite": diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py index 6dbbd4f49fb..b7a6c1c5e70 100644 --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -203,6 +203,12 @@ def _register_with_pkg_resources(cls): # DefaultProvider is appropriate. pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + def get_data(self, pathname): + """Optional PEP302 get_data API. + """ + with open(pathname, 'rb') as f: + return f.read() + def _write_pyc(state, co, source_stat, pyc): # Technically, we don't have to have the same pyc format as diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py index fb5f9be0ee4..ca54f1692c0 100644 --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -129,7 +129,16 @@ def assertrepr_compare(config, op, left, right): width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = u('%s %s %s') % (left_repr, op, right_repr) + + # the re-encoding is needed for python2 repr + # with non-ascii characters (see issue 877) + def ecu(s): + try: + return u(s, 'utf-8', 'replace') + except TypeError: + return s + + summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr)) issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and not isinstance(x, basestring)) diff --git a/_pytest/capture.py b/_pytest/capture.py index 0042b274bd4..58f9cb52562 100644 --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -29,7 +29,7 @@ def pytest_addoption(parser): help="shortcut for --capture=no.") -@pytest.mark.hookwrapper +@pytest.hookimpl(hookwrapper=True) def pytest_load_initial_conftests(early_config, parser, args): ns = early_config.known_args_namespace pluginmanager = early_config.pluginmanager @@ -37,13 +37,13 @@ def pytest_load_initial_conftests(early_config, parser, args): pluginmanager.register(capman, "capturemanager") # make sure that capturemanager is properly reset at final shutdown - pluginmanager.add_shutdown(capman.reset_capturings) + early_config.add_cleanup(capman.reset_capturings) # make sure logging does not raise exceptions at the end def silence_logging_at_shutdown(): if "logging" in sys.modules: sys.modules["logging"].raiseExceptions = False - pluginmanager.add_shutdown(silence_logging_at_shutdown) + early_config.add_cleanup(silence_logging_at_shutdown) # finally trigger conftest loading but while capturing (issue93) capman.init_capturings() @@ -86,8 +86,10 @@ def suspendcapture(self, in_=False): self.deactivate_funcargs() cap = getattr(self, "_capturing", None) if cap is not None: - outerr = cap.readouterr() - cap.suspend_capturing(in_=in_) + try: + outerr = cap.readouterr() + finally: + cap.suspend_capturing(in_=in_) return outerr def activate_funcargs(self, pyfuncitem): @@ -101,7 +103,7 @@ def deactivate_funcargs(self): if capfuncarg is not None: capfuncarg.close() - @pytest.mark.hookwrapper + @pytest.hookimpl(hookwrapper=True) def pytest_make_collect_report(self, collector): if isinstance(collector, pytest.File): self.resumecapture() @@ -115,13 +117,13 @@ def pytest_make_collect_report(self, collector): else: yield - @pytest.mark.hookwrapper + @pytest.hookimpl(hookwrapper=True) def pytest_runtest_setup(self, item): self.resumecapture() yield self.suspendcapture_item(item, "setup") - @pytest.mark.hookwrapper + @pytest.hookimpl(hookwrapper=True) def pytest_runtest_call(self, item): self.resumecapture() self.activate_funcargs(item) @@ -129,17 +131,17 @@ def pytest_runtest_call(self, item): #self.deactivate_funcargs() called from suspendcapture() self.suspendcapture_item(item, "call") - @pytest.mark.hookwrapper + @pytest.hookimpl(hookwrapper=True) def pytest_runtest_teardown(self, item): self.resumecapture() yield self.suspendcapture_item(item, "teardown") - @pytest.mark.tryfirst + @pytest.hookimpl(tryfirst=True) def pytest_keyboard_interrupt(self, excinfo): self.reset_capturings() - @pytest.mark.tryfirst + @pytest.hookimpl(tryfirst=True) def pytest_internalerror(self, excinfo): self.reset_capturings() diff --git a/_pytest/config.py b/_pytest/config.py index ad944adc582..2a3c71201f7 100644 --- a/_pytest/config.py +++ b/_pytest/config.py @@ -8,11 +8,16 @@ import py # DON't import pytest here because it causes import cycle troubles import sys, os -from _pytest import hookspec # the extension point definitions -from _pytest.core import PluginManager +import _pytest.hookspec # the extension point definitions +from pluggy import PluginManager, HookimplMarker, HookspecMarker + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") # pytest startup # + + class ConftestImportFailure(Exception): def __init__(self, path, excinfo): Exception.__init__(self, path, excinfo) @@ -29,16 +34,24 @@ def main(args=None, plugins=None): initialization. """ try: - config = _prepareconfig(args, plugins) - except ConftestImportFailure: - e = sys.exc_info()[1] - tw = py.io.TerminalWriter(sys.stderr) - for line in traceback.format_exception(*e.excinfo): - tw.line(line.rstrip(), red=True) - tw.line("ERROR: could not load %s\n" % (e.path), red=True) + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + tw = py.io.TerminalWriter(sys.stderr) + for line in traceback.format_exception(*e.excinfo): + tw.line(line.rstrip(), red=True) + tw.line("ERROR: could not load %s\n" % (e.path), red=True) + return 4 + else: + try: + config.pluginmanager.check_pending() + return config.hook.pytest_cmdline_main(config=config) + finally: + config._ensure_unconfigure() + except UsageError as e: + for msg in e.args: + sys.stderr.write("ERROR: %s\n" %(msg,)) return 4 - else: - return config.hook.pytest_cmdline_main(config=config) class cmdline: # compatibility namespace main = staticmethod(main) @@ -53,19 +66,34 @@ class UsageError(Exception): "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " "junitxml resultlog doctest").split() +builtin_plugins = set(default_plugins) +builtin_plugins.add("pytester") + + def _preloadplugins(): assert not _preinit - _preinit.append(get_plugin_manager()) + _preinit.append(get_config()) -def get_plugin_manager(): +def get_config(): if _preinit: return _preinit.pop(0) # subsequent calls to main will create a fresh instance pluginmanager = PytestPluginManager() - pluginmanager.config = Config(pluginmanager) # XXX attr needed? + config = Config(pluginmanager) for spec in default_plugins: pluginmanager.import_plugin(spec) - return pluginmanager + return config + +def get_plugin_manager(): + """ + Obtain a new instance of the + :py:class:`_pytest.config.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager def _prepareconfig(args=None, plugins=None): if args is None: @@ -76,7 +104,8 @@ def _prepareconfig(args=None, plugins=None): if not isinstance(args, str): raise ValueError("not a string or argument list: %r" % (args,)) args = shlex.split(args) - pluginmanager = get_plugin_manager() + config = get_config() + pluginmanager = config.pluginmanager try: if plugins: for plugin in plugins: @@ -86,13 +115,37 @@ def _prepareconfig(args=None, plugins=None): pluginmanager.register(plugin) return pluginmanager.hook.pytest_cmdline_parse( pluginmanager=pluginmanager, args=args) - except Exception: - pluginmanager.ensure_shutdown() + except BaseException: + config._ensure_unconfigure() raise + +def exclude_pytest_names(name): + return not name.startswith(name) or name == "pytest_plugins" or \ + name.startswith("pytest_funcarg__") + + + class PytestPluginManager(PluginManager): - def __init__(self, hookspecs=[hookspec]): - super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + """ + Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific + functionality: + + * loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded; + * ``conftest.py`` loading during start-up; + """ + def __init__(self): + super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_") + self._conftest_plugins = set() + + # state related to local conftest plugins + self._path2confmods = {} + self._conftestpath2mod = {} + self._confcutdir = None + self._noconftest = False + + self.add_hookspecs(_pytest.hookspec) self.register(self) if os.environ.get('PYTEST_DEBUG'): err = sys.stderr @@ -101,21 +154,252 @@ def __init__(self, hookspecs=[hookspec]): err = py.io.dupfile(err, encoding=encoding) except Exception: pass - self.set_tracing(err.write) + self.trace.root.setwriter(err.write) + self.enable_tracing() + + def addhooks(self, module_or_class): + """ + .. deprecated:: 2.8 + + Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead. + """ + warning = dict(code="I2", + fslocation=py.code.getfslineno(sys._getframe(1)), + message="use pluginmanager.add_hookspecs instead of " + "deprecated addhooks() method.") + self._warn(warning) + return self.add_hookspecs(module_or_class) + + def parse_hookimpl_opts(self, plugin, name): + if exclude_pytest_names(name): + return None + + method = getattr(plugin, name) + opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name) + if opts is not None: + for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): + opts.setdefault(name, hasattr(method, name)) + return opts + + def parse_hookspec_opts(self, module_or_class, name): + opts = super(PytestPluginManager, self).parse_hookspec_opts( + module_or_class, name) + if opts is None: + method = getattr(module_or_class, name) + if name.startswith("pytest_"): + opts = {"firstresult": hasattr(method, "firstresult"), + "historic": hasattr(method, "historic")} + return opts + + def _verify_hook(self, hook, hookmethod): + super(PytestPluginManager, self)._verify_hook(hook, hookmethod) + if "__multicall__" in hookmethod.argnames: + fslineno = py.code.getfslineno(hookmethod.function) + warning = dict(code="I1", + fslocation=fslineno, + nodeid=None, + message="%r hook uses deprecated __multicall__ " + "argument" % (hook.name)) + self._warn(warning) + + def register(self, plugin, name=None): + ret = super(PytestPluginManager, self).register(plugin, name) + if ret: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict(plugin=plugin, manager=self)) + return ret + + def getplugin(self, name): + # support deprecated naming because plugins (xdist e.g.) use it + return self.get_plugin(name) + + def hasplugin(self, name): + """Return True if the plugin with the given name is registered.""" + return bool(self.get_plugin(name)) def pytest_configure(self, config): + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers config.addinivalue_line("markers", "tryfirst: mark a hook implementation function such that the " "plugin machinery will try to call it first/as early as possible.") config.addinivalue_line("markers", "trylast: mark a hook implementation function such that the " "plugin machinery will try to call it last/as late as possible.") - for warning in self._warnings: - config.warn(code="I1", message=warning) + + def _warn(self, message): + kwargs = message if isinstance(message, dict) else { + 'code': 'I1', + 'message': message, + 'fslocation': None, + 'nodeid': None, + } + self.hook.pytest_logwarning.call_historic(kwargs=kwargs) + + # + # internal API for local conftest plugin handling + # + def _set_initial_conftests(self, namespace): + """ load initial conftest files given a preparsed "namespace". + As conftest files may add their own command line options + which have arguments ('--my-opt somepath') we might get some + false positives. All builtin and 3rd party plugins will have + been loaded, however, so common options will not confuse our logic + here. + """ + current = py.path.local() + self._confcutdir = current.join(namespace.confcutdir, abs=True) \ + if namespace.confcutdir else None + self._noconftest = namespace.noconftest + testpaths = namespace.file_or_dir + foundanchor = False + for path in testpaths: + path = str(path) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = current.join(path, abs=1) + if exists(anchor): # we found some file object + self._try_load_conftest(anchor) + foundanchor = True + if not foundanchor: + self._try_load_conftest(current) + + def _try_load_conftest(self, anchor): + self._getconftestmodules(anchor) + # let's also consider test* subdirs + if anchor.check(dir=1): + for x in anchor.listdir("test*"): + if x.check(dir=1): + self._getconftestmodules(x) + + def _getconftestmodules(self, path): + if self._noconftest: + return [] + try: + return self._path2confmods[path] + except KeyError: + if path.isfile(): + clist = self._getconftestmodules(path.dirpath()) + else: + # XXX these days we may rather want to use config.rootdir + # and allow users to opt into looking into the rootdir parent + # directories instead of requiring to specify confcutdir + clist = [] + for parent in path.parts(): + if self._confcutdir and self._confcutdir.relto(parent): + continue + conftestpath = parent.join("conftest.py") + if conftestpath.isfile(): + mod = self._importconftest(conftestpath) + clist.append(mod) + + self._path2confmods[path] = clist + return clist + + def _rget_with_confmod(self, name, path): + modules = self._getconftestmodules(path) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest(self, conftestpath): + try: + return self._conftestpath2mod[conftestpath] + except KeyError: + pkgpath = conftestpath.pypkgpath() + if pkgpath is None: + _ensure_removed_sysmodule(conftestpath.purebasename) + try: + mod = conftestpath.pyimport() + except Exception: + raise ConftestImportFailure(conftestpath, sys.exc_info()) + + self._conftest_plugins.add(mod) + self._conftestpath2mod[conftestpath] = mod + dirpath = conftestpath.dirpath() + if dirpath in self._path2confmods: + for path, mods in self._path2confmods.items(): + if path and path.relto(dirpath) or path == dirpath: + assert mod not in mods + mods.append(mod) + self.trace("loaded conftestmodule %r" %(mod)) + self.consider_conftest(mod) + return mod + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse(self, args): + for opt1,opt2 in zip(args, args[1:]): + if opt1 == "-p": + self.consider_pluginarg(opt2) + + def consider_pluginarg(self, arg): + if arg.startswith("no:"): + name = arg[3:] + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + self.import_plugin(arg) + + def consider_conftest(self, conftestmodule): + if self.register(conftestmodule, name=conftestmodule.__file__): + self.consider_module(conftestmodule) + + def consider_env(self): + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod): + self._import_plugin_specs(getattr(mod, "pytest_plugins", None)) + + def _import_plugin_specs(self, spec): + if spec: + if isinstance(spec, str): + spec = spec.split(",") + for import_spec in spec: + self.import_plugin(import_spec) + + def import_plugin(self, modname): + # most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance(modname, str) + if self.get_plugin(modname) is not None: + return + if modname in builtin_plugins: + importspec = "_pytest." + modname + else: + importspec = modname + try: + __import__(importspec) + except ImportError: + raise + except Exception as e: + import pytest + if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception): + raise + self._warn("skipped plugin %r: %s" %((modname, e.msg))) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + self.consider_module(mod) class Parser: - """ Parser for command line arguments and ini-file values. """ + """ Parser for command line arguments and ini-file values. + + :ivar extra_info: dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) @@ -124,6 +408,7 @@ def __init__(self, usage=None, processopt=None): self._usage = usage self._inidict = {} self._ininames = [] + self.extra_info = {} def processoption(self, option): if self._processopt: @@ -177,7 +462,7 @@ def parse(self, args): def _getparser(self): from _pytest._argcomplete import filescompleter - optparser = MyOptionParser(self) + optparser = MyOptionParser(self, self.extra_info) groups = self._groups + [self._anonymous] for group in groups: if group.options: @@ -198,9 +483,18 @@ def parse_setoption(self, args, option): return getattr(parsedoption, FILE_OR_DIR) def parse_known_args(self, args): + """parses and returns a namespace object with known arguments at this + point. + """ + return self.parse_known_and_unknown_args(args)[0] + + def parse_known_and_unknown_args(self, args): + """parses and returns a namespace object with known arguments, and + the remaining arguments unknown at this point. + """ optparser = self._getparser() args = [str(x) for x in args] - return optparser.parse_known_args(args)[0] + return optparser.parse_known_args(args) def addini(self, name, help, type=None, default=None): """ register an ini-file option. @@ -402,10 +696,15 @@ def _addoption_instance(self, option, shortupper=False): class MyOptionParser(argparse.ArgumentParser): - def __init__(self, parser): + def __init__(self, parser, extra_info=None): + if not extra_info: + extra_info = {} self._parser = parser argparse.ArgumentParser.__init__(self, usage=parser._usage, add_help=False, formatter_class=DropShorterLongHelpFormatter) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user + self.extra_info = extra_info def parse_args(self, args=None, namespace=None): """allow splitting of positional arguments""" @@ -413,11 +712,14 @@ def parse_args(self, args=None, namespace=None): if argv: for arg in argv: if arg and arg[0] == '-': - msg = argparse._('unrecognized arguments: %s') - self.error(msg % ' '.join(argv)) + lines = ['unrecognized arguments: %s' % (' '.join(argv))] + for k, v in sorted(self.extra_info.items()): + lines.append(' %s: %s' % (k, v)) + self.error('\n'.join(lines)) getattr(args, FILE_OR_DIR).extend(argv) return args + class DropShorterLongHelpFormatter(argparse.HelpFormatter): """shorten help for long options that differ only in extra hyphens @@ -467,96 +769,6 @@ def _format_action_invocation(self, action): return action._formatted_action_invocation -class Conftest(object): - """ the single place for accessing values and interacting - towards conftest modules from pytest objects. - """ - def __init__(self, onimport=None): - self._path2confmods = {} - self._onimport = onimport - self._conftestpath2mod = {} - self._confcutdir = None - - def setinitial(self, namespace): - """ load initial conftest files given a preparsed "namespace". - As conftest files may add their own command line options - which have arguments ('--my-opt somepath') we might get some - false positives. All builtin and 3rd party plugins will have - been loaded, however, so common options will not confuse our logic - here. - """ - current = py.path.local() - self._confcutdir = current.join(namespace.confcutdir, abs=True) \ - if namespace.confcutdir else None - testpaths = namespace.file_or_dir - foundanchor = False - for path in testpaths: - path = str(path) - # remove node-id syntax - i = path.find("::") - if i != -1: - path = path[:i] - anchor = current.join(path, abs=1) - if exists(anchor): # we found some file object - self._try_load_conftest(anchor) - foundanchor = True - if not foundanchor: - self._try_load_conftest(current) - - def _try_load_conftest(self, anchor): - self.getconftestmodules(anchor) - # let's also consider test* subdirs - if anchor.check(dir=1): - for x in anchor.listdir("test*"): - if x.check(dir=1): - self.getconftestmodules(x) - - def getconftestmodules(self, path): - try: - return self._path2confmods[path] - except KeyError: - clist = [] - for parent in path.parts(): - if self._confcutdir and self._confcutdir.relto(parent): - continue - conftestpath = parent.join("conftest.py") - if conftestpath.check(file=1): - mod = self.importconftest(conftestpath) - clist.append(mod) - self._path2confmods[path] = clist - return clist - - def rget_with_confmod(self, name, path): - modules = self.getconftestmodules(path) - for mod in reversed(modules): - try: - return mod, getattr(mod, name) - except AttributeError: - continue - raise KeyError(name) - - def importconftest(self, conftestpath): - try: - return self._conftestpath2mod[conftestpath] - except KeyError: - pkgpath = conftestpath.pypkgpath() - if pkgpath is None: - _ensure_removed_sysmodule(conftestpath.purebasename) - try: - mod = conftestpath.pyimport() - except Exception: - raise ConftestImportFailure(conftestpath, sys.exc_info()) - self._conftestpath2mod[conftestpath] = mod - dirpath = conftestpath.dirpath() - if dirpath in self._path2confmods: - for path, mods in self._path2confmods.items(): - if path and path.relto(dirpath) or path == dirpath: - assert mod not in mods - mods.append(mod) - if self._onimport: - self._onimport(mod) - return mod - def _ensure_removed_sysmodule(modname): try: @@ -577,6 +789,7 @@ def __repr__(self): notset = Notset() FILE_OR_DIR = 'file_or_dir' + class Config(object): """ access to configuration values, pluginmanager and plugin hooks. """ @@ -592,58 +805,52 @@ def __init__(self, pluginmanager): #: a pluginmanager instance self.pluginmanager = pluginmanager self.trace = self.pluginmanager.trace.root.get("config") - self._conftest = Conftest(onimport=self._onimportconftest) self.hook = self.pluginmanager.hook self._inicache = {} self._opt2dest = {} self._cleanup = [] + self._warn = self.pluginmanager._warn self.pluginmanager.register(self, "pytestconfig") - self.pluginmanager.set_register_callback(self._register_plugin) self._configured = False - - def _register_plugin(self, plugin, name): - call_plugin = self.pluginmanager.call_plugin - call_plugin(plugin, "pytest_addhooks", - {'pluginmanager': self.pluginmanager}) - self.hook.pytest_plugin_registered(plugin=plugin, - manager=self.pluginmanager) - dic = call_plugin(plugin, "pytest_namespace", {}) or {} - if dic: + def do_setns(dic): import pytest setns(pytest, dic) - call_plugin(plugin, "pytest_addoption", {'parser': self._parser}) - if self._configured: - call_plugin(plugin, "pytest_configure", {'config': self}) + self.hook.pytest_namespace.call_historic(do_setns, {}) + self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser)) - def do_configure(self): + def add_cleanup(self, func): + """ Add a function to be called when the config object gets out of + use (usually coninciding with pytest_unconfigure).""" + self._cleanup.append(func) + + def _do_configure(self): assert not self._configured self._configured = True - self.hook.pytest_configure(config=self) + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) - def do_unconfigure(self): - assert self._configured - self._configured = False - self.hook.pytest_unconfigure(config=self) - self.pluginmanager.ensure_shutdown() + def _ensure_unconfigure(self): + if self._configured: + self._configured = False + self.hook.pytest_unconfigure(config=self) + self.hook.pytest_configure._call_history = [] + while self._cleanup: + fin = self._cleanup.pop() + fin() - def warn(self, code, message): + def warn(self, code, message, fslocation=None): """ generate a warning for this test session. """ - self.hook.pytest_logwarning(code=code, message=message, - fslocation=None, nodeid=None) + self.hook.pytest_logwarning.call_historic(kwargs=dict( + code=code, message=message, + fslocation=fslocation, nodeid=None)) def get_terminal_writer(self): - return self.pluginmanager.getplugin("terminalreporter")._tw + return self.pluginmanager.get_plugin("terminalreporter")._tw def pytest_cmdline_parse(self, pluginmanager, args): - assert self == pluginmanager.config, (self, pluginmanager.config) + # REF1 assert self == pluginmanager.config, (self, pluginmanager.config) self.parse(args) return self - def pytest_unconfigure(config): - while config._cleanup: - fin = config._cleanup.pop() - fin() - def notify_exception(self, excinfo, option=None): if option and option.fulltrace: style = "long" @@ -670,18 +877,13 @@ def cwd_relative_nodeid(self, nodeid): @classmethod def fromdictargs(cls, option_dict, args): """ constructor useable for subprocesses. """ - pluginmanager = get_plugin_manager() - config = pluginmanager.config + config = get_config() config._preparse(args, addopts=False) config.option.__dict__.update(option_dict) for x in config.option.plugins: config.pluginmanager.consider_pluginarg(x) return config - def _onimportconftest(self, conftestmodule): - self.trace("loaded conftestmodule %r" %(conftestmodule,)) - self.pluginmanager.consider_conftest(conftestmodule) - def _processopt(self, opt): for name in opt._short_opts + opt._long_opts: self._opt2dest[name] = opt.dest @@ -690,18 +892,16 @@ def _processopt(self, opt): if not hasattr(self.option, opt.dest): setattr(self.option, opt.dest, opt.default) - def _getmatchingplugins(self, fspath): - return self.pluginmanager._plugins + \ - self._conftest.getconftestmodules(fspath) - + @hookimpl(trylast=True) def pytest_load_initial_conftests(self, early_config): - self._conftest.setinitial(early_config.known_args_namespace) - pytest_load_initial_conftests.trylast = True + self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) def _initini(self, args): - parsed_args = self._parser.parse_known_args(args) - r = determine_setup(parsed_args.inifilename, parsed_args.file_or_dir) + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args) self.rootdir, self.inifile, self.inicfg = r + self._parser.extra_info['rootdir'] = self.rootdir + self._parser.extra_info['inifile'] = self.inifile self.invocation_dir = py.path.local() self._parser.addini('addopts', 'extra command line options', 'args') self._parser.addini('minversion', 'minimally required pytest version') @@ -713,9 +913,15 @@ def _preparse(self, args, addopts=True): args[:] = self.getini("addopts") + args self._checkversion() self.pluginmanager.consider_preparse(args) - self.pluginmanager.consider_setuptools_entrypoints() + try: + self.pluginmanager.load_setuptools_entrypoints("pytest11") + except ImportError as e: + self.warn("I2", "could not load setuptools entry import: %s" % (e,)) self.pluginmanager.consider_env() self.known_args_namespace = ns = self._parser.parse_known_args(args) + if self.known_args_namespace.confcutdir is None and self.inifile: + confcutdir = py.path.local(self.inifile).dirname + self.known_args_namespace.confcutdir = confcutdir try: self.hook.pytest_load_initial_conftests(early_config=self, args=args, parser=self._parser) @@ -724,8 +930,7 @@ def _preparse(self, args, addopts=True): if ns.help or ns.version: # we don't want to prevent --help/--version to work # so just let is pass and print a warning at the end - self.pluginmanager._warnings.append( - "could not load initial conftests (%s)\n" % e.path) + self._warn("could not load initial conftests (%s)\n" % e.path) else: raise @@ -746,12 +951,18 @@ def parse(self, args): assert not hasattr(self, 'args'), ( "can only parse cmdline args at most once per Config object") self._origargs = args + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager)) self._preparse(args) # XXX deprecated hook: self.hook.pytest_cmdline_preparse(config=self, args=args) args = self._parser.parse_setoption(args, self.option) if not args: - args.append(os.getcwd()) + cwd = os.getcwd() + if cwd == self.rootdir: + args = self.getini('testpaths') + if not args: + args = [cwd] self.args = args def addinivalue_line(self, name, line): @@ -802,7 +1013,7 @@ def _getini(self, name): def _getconftest_pathlist(self, name, path): try: - mod, relroots = self._conftest.rget_with_confmod(name, path) + mod, relroots = self.pluginmanager._rget_with_confmod(name, path) except KeyError: return None modpath = py.path.local(mod.__file__).dirpath() diff --git a/_pytest/core.py b/_pytest/core.py deleted file mode 100644 index 134a93ed04f..00000000000 --- a/_pytest/core.py +++ /dev/null @@ -1,543 +0,0 @@ -""" -pytest PluginManager, basic initialization and tracing. -""" -import os -import sys -import inspect -import py -# don't import pytest to avoid circular imports - -assert py.__version__.split(".")[:2] >= ['1', '4'], ("installation problem: " - "%s is too old, remove or upgrade 'py'" % (py.__version__)) - -py3 = sys.version_info > (3,0) - -class TagTracer: - def __init__(self): - self._tag2proc = {} - self.writer = None - self.indent = 0 - - def get(self, name): - return TagTracerSub(self, (name,)) - - def format_message(self, tags, args): - if isinstance(args[-1], dict): - extra = args[-1] - args = args[:-1] - else: - extra = {} - - content = " ".join(map(str, args)) - indent = " " * self.indent - - lines = [ - "%s%s [%s]\n" %(indent, content, ":".join(tags)) - ] - - for name, value in extra.items(): - lines.append("%s %s: %s\n" % (indent, name, value)) - return lines - - def processmessage(self, tags, args): - if self.writer is not None and args: - lines = self.format_message(tags, args) - self.writer(''.join(lines)) - try: - self._tag2proc[tags](tags, args) - except KeyError: - pass - - def setwriter(self, writer): - self.writer = writer - - def setprocessor(self, tags, processor): - if isinstance(tags, str): - tags = tuple(tags.split(":")) - else: - assert isinstance(tags, tuple) - self._tag2proc[tags] = processor - -class TagTracerSub: - def __init__(self, root, tags): - self.root = root - self.tags = tags - def __call__(self, *args): - self.root.processmessage(self.tags, args) - def setmyprocessor(self, processor): - self.root.setprocessor(self.tags, processor) - def get(self, name): - return self.__class__(self.root, self.tags + (name,)) - - -def add_method_wrapper(cls, wrapper_func): - """ Substitute the function named "wrapperfunc.__name__" at class - "cls" with a function that wraps the call to the original function. - Return an undo function which can be called to reset the class to use - the old method again. - - wrapper_func is called with the same arguments as the method - it wraps and its result is used as a wrap_controller for - calling the original function. - """ - name = wrapper_func.__name__ - oldcall = getattr(cls, name) - def wrap_exec(*args, **kwargs): - gen = wrapper_func(*args, **kwargs) - return wrapped_call(gen, lambda: oldcall(*args, **kwargs)) - - setattr(cls, name, wrap_exec) - return lambda: setattr(cls, name, oldcall) - -def raise_wrapfail(wrap_controller, msg): - co = wrap_controller.gi_code - raise RuntimeError("wrap_controller at %r %s:%d %s" % - (co.co_name, co.co_filename, co.co_firstlineno, msg)) - -def wrapped_call(wrap_controller, func): - """ Wrap calling to a function with a generator which needs to yield - exactly once. The yield point will trigger calling the wrapped function - and return its CallOutcome to the yield point. The generator then needs - to finish (raise StopIteration) in order for the wrapped call to complete. - """ - try: - next(wrap_controller) # first yield - except StopIteration: - raise_wrapfail(wrap_controller, "did not yield") - call_outcome = CallOutcome(func) - try: - wrap_controller.send(call_outcome) - raise_wrapfail(wrap_controller, "has second yield") - except StopIteration: - pass - return call_outcome.get_result() - - -class CallOutcome: - """ Outcome of a function call, either an exception or a proper result. - Calling the ``get_result`` method will return the result or reraise - the exception raised when the function was called. """ - excinfo = None - def __init__(self, func): - try: - self.result = func() - except BaseException: - self.excinfo = sys.exc_info() - - def force_result(self, result): - self.result = result - self.excinfo = None - - def get_result(self): - if self.excinfo is None: - return self.result - else: - ex = self.excinfo - if py3: - raise ex[1].with_traceback(ex[2]) - py.builtin._reraise(*ex) - - -class PluginManager(object): - def __init__(self, hookspecs=None, prefix="pytest_"): - self._name2plugin = {} - self._plugins = [] - self._conftestplugins = [] - self._plugin2hookcallers = {} - self._warnings = [] - self.trace = TagTracer().get("pluginmanage") - self._plugin_distinfo = [] - self._shutdown = [] - self.hook = HookRelay(hookspecs or [], pm=self, prefix=prefix) - - def set_tracing(self, writer): - self.trace.root.setwriter(writer) - # reconfigure HookCalling to perform tracing - assert not hasattr(self, "_wrapping") - self._wrapping = True - - def _docall(self, methods, kwargs): - trace = self.hookrelay.trace - trace.root.indent += 1 - trace(self.name, kwargs) - box = yield - if box.excinfo is None: - trace("finish", self.name, "-->", box.result) - trace.root.indent -= 1 - - undo = add_method_wrapper(HookCaller, _docall) - self.add_shutdown(undo) - - def do_configure(self, config): - # backward compatibility - config.do_configure() - - def set_register_callback(self, callback): - assert not hasattr(self, "_registercallback") - self._registercallback = callback - - def register(self, plugin, name=None, prepend=False, conftest=False): - if self._name2plugin.get(name, None) == -1: - return - name = name or getattr(plugin, '__name__', str(id(plugin))) - if self.isregistered(plugin, name): - raise ValueError("Plugin already registered: %s=%s\n%s" %( - name, plugin, self._name2plugin)) - #self.trace("registering", name, plugin) - reg = getattr(self, "_registercallback", None) - if reg is not None: - reg(plugin, name) # may call addhooks - hookcallers = list(self.hook._scan_plugin(plugin)) - self._plugin2hookcallers[plugin] = hookcallers - self._name2plugin[name] = plugin - if conftest: - self._conftestplugins.append(plugin) - else: - if not prepend: - self._plugins.append(plugin) - else: - self._plugins.insert(0, plugin) - # finally make sure that the methods of the new plugin take part - for hookcaller in hookcallers: - hookcaller.scan_methods() - return True - - def unregister(self, plugin): - try: - self._plugins.remove(plugin) - except KeyError: - self._conftestplugins.remove(plugin) - for name, value in list(self._name2plugin.items()): - if value == plugin: - del self._name2plugin[name] - hookcallers = self._plugin2hookcallers.pop(plugin) - for hookcaller in hookcallers: - hookcaller.scan_methods() - - def add_shutdown(self, func): - self._shutdown.append(func) - - def ensure_shutdown(self): - while self._shutdown: - func = self._shutdown.pop() - func() - self._plugins = self._conftestplugins = [] - self._name2plugin.clear() - - def isregistered(self, plugin, name=None): - if self.getplugin(name) is not None: - return True - return plugin in self._plugins or plugin in self._conftestplugins - - def addhooks(self, spec, prefix="pytest_"): - self.hook._addhooks(spec, prefix=prefix) - - def getplugins(self): - return self._plugins + self._conftestplugins - - def skipifmissing(self, name): - if not self.hasplugin(name): - import pytest - pytest.skip("plugin %r is missing" % name) - - def hasplugin(self, name): - return bool(self.getplugin(name)) - - def getplugin(self, name): - if name is None: - return None - try: - return self._name2plugin[name] - except KeyError: - return self._name2plugin.get("_pytest." + name, None) - - # API for bootstrapping - # - def _envlist(self, varname): - val = os.environ.get(varname, None) - if val is not None: - return val.split(',') - return () - - def consider_env(self): - for spec in self._envlist("PYTEST_PLUGINS"): - self.import_plugin(spec) - - def consider_setuptools_entrypoints(self): - try: - from pkg_resources import iter_entry_points, DistributionNotFound - except ImportError: - return # XXX issue a warning - for ep in iter_entry_points('pytest11'): - name = ep.name - if name.startswith("pytest_"): - name = name[7:] - if ep.name in self._name2plugin or name in self._name2plugin: - continue - try: - plugin = ep.load() - except DistributionNotFound: - continue - self._plugin_distinfo.append((ep.dist, plugin)) - self.register(plugin, name=name) - - def consider_preparse(self, args): - for opt1,opt2 in zip(args, args[1:]): - if opt1 == "-p": - self.consider_pluginarg(opt2) - - def consider_pluginarg(self, arg): - if arg.startswith("no:"): - name = arg[3:] - plugin = self.getplugin(name) - if plugin is not None: - self.unregister(plugin) - self._name2plugin[name] = -1 - else: - if self.getplugin(arg) is None: - self.import_plugin(arg) - - def consider_conftest(self, conftestmodule): - if self.register(conftestmodule, name=conftestmodule.__file__, - conftest=True): - self.consider_module(conftestmodule) - - def consider_module(self, mod): - attr = getattr(mod, "pytest_plugins", ()) - if attr: - if not isinstance(attr, (list, tuple)): - attr = (attr,) - for spec in attr: - self.import_plugin(spec) - - def import_plugin(self, modname): - assert isinstance(modname, str) - if self.getplugin(modname) is not None: - return - try: - mod = importplugin(modname) - except KeyboardInterrupt: - raise - except ImportError: - if modname.startswith("pytest_"): - return self.import_plugin(modname[7:]) - raise - except: - e = sys.exc_info()[1] - import pytest - if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception): - raise - self._warnings.append("skipped plugin %r: %s" %((modname, e.msg))) - else: - self.register(mod, modname) - self.consider_module(mod) - - def listattr(self, attrname, plugins=None): - if plugins is None: - plugins = self._plugins + self._conftestplugins - l = [] - last = [] - wrappers = [] - for plugin in plugins: - try: - meth = getattr(plugin, attrname) - except AttributeError: - continue - if hasattr(meth, 'hookwrapper'): - wrappers.append(meth) - elif hasattr(meth, 'tryfirst'): - last.append(meth) - elif hasattr(meth, 'trylast'): - l.insert(0, meth) - else: - l.append(meth) - l.extend(last) - l.extend(wrappers) - return l - - def call_plugin(self, plugin, methname, kwargs): - return MultiCall(methods=self.listattr(methname, plugins=[plugin]), - kwargs=kwargs, firstresult=True).execute() - - -def importplugin(importspec): - name = importspec - try: - mod = "_pytest." + name - __import__(mod) - return sys.modules[mod] - except ImportError: - __import__(importspec) - return sys.modules[importspec] - -class MultiCall: - """ execute a call into multiple python functions/methods. """ - - def __init__(self, methods, kwargs, firstresult=False): - self.methods = list(methods) - self.kwargs = kwargs - self.kwargs["__multicall__"] = self - self.results = [] - self.firstresult = firstresult - - def __repr__(self): - status = "%d results, %d meths" % (len(self.results), len(self.methods)) - return "" %(status, self.kwargs) - - def execute(self): - all_kwargs = self.kwargs - while self.methods: - method = self.methods.pop() - args = [all_kwargs[argname] for argname in varnames(method)] - if hasattr(method, "hookwrapper"): - return wrapped_call(method(*args), self.execute) - res = method(*args) - if res is not None: - self.results.append(res) - if self.firstresult: - return res - if not self.firstresult: - return self.results - - -def varnames(func, startindex=None): - """ return argument name tuple for a function, method, class or callable. - - In case of a class, its "__init__" method is considered. - For methods the "self" parameter is not included unless you are passing - an unbound method with Python3 (which has no supports for unbound methods) - """ - cache = getattr(func, "__dict__", {}) - try: - return cache["_varnames"] - except KeyError: - pass - if inspect.isclass(func): - try: - func = func.__init__ - except AttributeError: - return () - startindex = 1 - else: - if not inspect.isfunction(func) and not inspect.ismethod(func): - func = getattr(func, '__call__', func) - if startindex is None: - startindex = int(inspect.ismethod(func)) - - rawcode = py.code.getrawcode(func) - try: - x = rawcode.co_varnames[startindex:rawcode.co_argcount] - except AttributeError: - x = () - else: - defaults = func.__defaults__ - if defaults: - x = x[:-len(defaults)] - try: - cache["_varnames"] = x - except TypeError: - pass - return x - - -class HookRelay: - def __init__(self, hookspecs, pm, prefix="pytest_"): - if not isinstance(hookspecs, list): - hookspecs = [hookspecs] - self._pm = pm - self.trace = pm.trace.root.get("hook") - self.prefix = prefix - for hookspec in hookspecs: - self._addhooks(hookspec, prefix) - - def _addhooks(self, hookspec, prefix): - added = False - isclass = int(inspect.isclass(hookspec)) - for name, method in vars(hookspec).items(): - if name.startswith(prefix): - firstresult = getattr(method, 'firstresult', False) - hc = HookCaller(self, name, firstresult=firstresult, - argnames=varnames(method, startindex=isclass)) - setattr(self, name, hc) - added = True - #print ("setting new hook", name) - if not added: - raise ValueError("did not find new %r hooks in %r" %( - prefix, hookspec,)) - - def _getcaller(self, name, plugins): - caller = getattr(self, name) - methods = self._pm.listattr(name, plugins=plugins) - if methods: - return caller.new_cached_caller(methods) - return caller - - def _scan_plugin(self, plugin): - def fail(msg, *args): - name = getattr(plugin, '__name__', plugin) - raise PluginValidationError("plugin %r\n%s" %(name, msg % args)) - - for name in dir(plugin): - if not name.startswith(self.prefix): - continue - hook = getattr(self, name, None) - method = getattr(plugin, name) - if hook is None: - is_optional = getattr(method, 'optionalhook', False) - if not isgenerichook(name) and not is_optional: - fail("found unknown hook: %r", name) - continue - for arg in varnames(method): - if arg not in hook.argnames: - fail("argument %r not available\n" - "actual definition: %s\n" - "available hookargs: %s", - arg, formatdef(method), - ", ".join(hook.argnames)) - yield hook - - -class HookCaller: - def __init__(self, hookrelay, name, firstresult, argnames, methods=()): - self.hookrelay = hookrelay - self.name = name - self.firstresult = firstresult - self.argnames = ["__multicall__"] - self.argnames.extend(argnames) - assert "self" not in argnames # sanity check - self.methods = methods - - def new_cached_caller(self, methods): - return HookCaller(self.hookrelay, self.name, self.firstresult, - argnames=self.argnames, methods=methods) - - def __repr__(self): - return "" %(self.name,) - - def scan_methods(self): - self.methods = self.hookrelay._pm.listattr(self.name) - - def __call__(self, **kwargs): - return self._docall(self.methods, kwargs) - - def callextra(self, methods, **kwargs): - return self._docall(self.methods + methods, kwargs) - - def _docall(self, methods, kwargs): - return MultiCall(methods, kwargs, - firstresult=self.firstresult).execute() - - -class PluginValidationError(Exception): - """ plugin failed validation. """ - -def isgenerichook(name): - return name == "pytest_plugins" or \ - name.startswith("pytest_funcarg__") - -def formatdef(func): - return "%s%s" % ( - func.__name__, - inspect.formatargspec(*inspect.getargspec(func)) - ) - diff --git a/_pytest/doctest.py b/_pytest/doctest.py index 74dab333b6b..ddbe5fb8e1c 100644 --- a/_pytest/doctest.py +++ b/_pytest/doctest.py @@ -2,7 +2,7 @@ from __future__ import absolute_import import traceback import pytest, py -from _pytest.python import FixtureRequest, FuncFixtureInfo +from _pytest.python import FixtureRequest from py._code.code import TerminalRepr, ReprFileLocation def pytest_addoption(parser): @@ -47,6 +47,7 @@ def __init__(self, name, parent, runner=None, dtest=None): self.dtest = dtest def runtest(self): + _check_all_skipped(self.dtest) self.runner.run(self.dtest) def repr_failure(self, excinfo): @@ -63,7 +64,7 @@ def repr_failure(self, excinfo): lineno = test.lineno + example.lineno + 1 message = excinfo.type.__name__ reprlocation = ReprFileLocation(filename, lineno, message) - checker = doctest.OutputChecker() + checker = _get_unicode_checker() REPORT_UDIFF = doctest.REPORT_UDIFF filelines = py.path.local(filename).readlines(cr=0) lines = [] @@ -100,7 +101,8 @@ def _get_flag_lookup(): NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, ELLIPSIS=doctest.ELLIPSIS, IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, - COMPARISON_FLAGS=doctest.COMPARISON_FLAGS) + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag()) def get_optionflags(parent): optionflags_str = parent.config.getini("doctest_optionflags") @@ -110,29 +112,47 @@ def get_optionflags(parent): flag_acc |= flag_lookup_table[flag] return flag_acc + class DoctestTextfile(DoctestItem, pytest.File): + def runtest(self): import doctest - # satisfy `FixtureRequest` constructor... - self.funcargs = {} - fm = self.session._fixturemanager - def func(): - pass - self._fixtureinfo = fm.getfixtureinfo(node=self, func=func, - cls=None, funcargs=False) - fixture_request = FixtureRequest(self) - fixture_request._fillfixtures() - failed, tot = doctest.testfile( - str(self.fspath), module_relative=False, - optionflags=get_optionflags(self), - extraglobs=dict(getfixture=fixture_request.getfuncargvalue), - raise_on_error=True, verbose=0) + fixture_request = _setup_fixtures(self) + + # inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker + text = self.fspath.read() + filename = str(self.fspath) + name = self.fspath.basename + globs = dict(getfixture=fixture_request.getfuncargvalue) + if '__name__' not in globs: + globs['__name__'] = '__main__' + + optionflags = get_optionflags(self) + runner = doctest.DebugRunner(verbose=0, optionflags=optionflags, + checker=_get_unicode_checker()) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + _check_all_skipped(test) + runner.run(test) + + +def _check_all_skipped(test): + """raises pytest.skip() if all examples in the given DocTest have the SKIP + option set. + """ + import doctest + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + pytest.skip('all tests skipped by +SKIP option') + class DoctestModule(pytest.File): def collect(self): import doctest if self.fspath.basename == "conftest.py": - module = self.config._conftest.importconftest(self.fspath) + module = self.config.pluginmanager._importconftest(self.fspath) else: try: module = self.fspath.pyimport() @@ -142,15 +162,86 @@ def collect(self): else: raise # satisfy `FixtureRequest` constructor... - self.funcargs = {} - self._fixtureinfo = FuncFixtureInfo((), [], {}) - fixture_request = FixtureRequest(self) + fixture_request = _setup_fixtures(self) doctest_globals = dict(getfixture=fixture_request.getfuncargvalue) # uses internal doctest module parsing mechanism finder = doctest.DocTestFinder() optionflags = get_optionflags(self) - runner = doctest.DebugRunner(verbose=0, optionflags=optionflags) + runner = doctest.DebugRunner(verbose=0, optionflags=optionflags, + checker=_get_unicode_checker()) for test in finder.find(module, module.__name__, extraglobs=doctest_globals): if test.examples: # skip empty doctests yield DoctestItem(test.name, self, runner, test) + + +def _setup_fixtures(doctest_item): + """ + Used by DoctestTextfile and DoctestModule to setup fixture information. + """ + def func(): + pass + + doctest_item.funcargs = {} + fm = doctest_item.session._fixturemanager + doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func, + cls=None, funcargs=False) + fixture_request = FixtureRequest(doctest_item) + fixture_request._fillfixtures() + return fixture_request + + +def _get_unicode_checker(): + """ + Returns a doctest.OutputChecker subclass that takes in account the + ALLOW_UNICODE option to ignore u'' prefixes in strings. Useful + when the same doctest should run in Python 2 and Python 3. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + if hasattr(_get_unicode_checker, 'UnicodeOutputChecker'): + return _get_unicode_checker.UnicodeOutputChecker() + + import doctest + import re + + class UnicodeOutputChecker(doctest.OutputChecker): + """ + Copied from doctest_nose_plugin.py from the nltk project: + https://github.com/nltk/nltk + """ + + _literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + + def check_output(self, want, got, optionflags): + res = doctest.OutputChecker.check_output(self, want, got, + optionflags) + if res: + return True + + if not (optionflags & _get_allow_unicode_flag()): + return False + + else: # pragma: no cover + # the code below will end up executed only in Python 2 in + # our tests, and our coverage check runs in Python 3 only + def remove_u_prefixes(txt): + return re.sub(self._literal_re, r'\1\2', txt) + + want = remove_u_prefixes(want) + got = remove_u_prefixes(got) + res = doctest.OutputChecker.check_output(self, want, got, + optionflags) + return res + + _get_unicode_checker.UnicodeOutputChecker = UnicodeOutputChecker + return _get_unicode_checker.UnicodeOutputChecker() + + +def _get_allow_unicode_flag(): + """ + Registers and returns the ALLOW_UNICODE flag. + """ + import doctest + return doctest.register_optionflag('ALLOW_UNICODE') diff --git a/_pytest/genscript.py b/_pytest/genscript.py index ed1f3b5c28c..0572bfcd64a 100755 --- a/_pytest/genscript.py +++ b/_pytest/genscript.py @@ -1,10 +1,9 @@ -""" generate a single-file self-contained version of pytest """ +""" (deprecated) generate a single-file self-contained version of pytest """ import os import sys import pkgutil import py - import _pytest @@ -32,7 +31,15 @@ def pkg_to_mapping(name): else: # package for pyfile in toplevel.visit('*.py'): pkg = pkgname(name, toplevel, pyfile) - name2src[pkg] = pyfile.read() + if pkg == '_pytest.__init__': + # remove the coding comment line to avoid python bug + lines = pyfile.read().splitlines(True) + name2src[pkg] = ''.join(lines[1:]) + else: + name2src[pkg] = pyfile.read() + # with wheels py source code might be not be installed + # and the resulting genscript is useless, just bail out. + assert name2src, "no source code found for %r at %r" %(name, toplevel) return name2src def compress_mapping(mapping): @@ -70,7 +77,9 @@ def pytest_cmdline_main(config): genscript = config.getvalue("genscript") if genscript: tw = _pytest.config.create_terminal_writer(config) - deps = ['py', '_pytest', 'pytest'] + tw.line("WARNING: usage of genscript is deprecated.", + red=True) + deps = ['py', 'pluggy', '_pytest', 'pytest'] if sys.version_info < (2,7): deps.append("argparse") tw.line("generated script will run on python2.6-python3.3++") diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py index 9b928691038..5ede8b371a4 100644 --- a/_pytest/helpconfig.py +++ b/_pytest/helpconfig.py @@ -22,30 +22,28 @@ def pytest_addoption(parser): help="store internal tracing debug information in 'pytestdebug.log'.") -@pytest.mark.hookwrapper +@pytest.hookimpl(hookwrapper=True) def pytest_cmdline_parse(): outcome = yield config = outcome.get_result() if config.option.debug: path = os.path.abspath("pytestdebug.log") - f = open(path, 'w') - config._debugfile = f - f.write("versions pytest-%s, py-%s, " + debugfile = open(path, 'w') + debugfile.write("versions pytest-%s, py-%s, " "python-%s\ncwd=%s\nargs=%s\n\n" %( pytest.__version__, py.__version__, ".".join(map(str, sys.version_info)), os.getcwd(), config._origargs)) - config.pluginmanager.set_tracing(f.write) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() sys.stderr.write("writing pytestdebug information to %s\n" % path) - -@pytest.mark.trylast -def pytest_unconfigure(config): - if hasattr(config, '_debugfile'): - config._debugfile.close() - sys.stderr.write("wrote pytestdebug information to %s\n" % - config._debugfile.name) - config.trace.root.setwriter(None) - + def unset_tracing(): + debugfile.close() + sys.stderr.write("wrote pytestdebug information to %s\n" % + debugfile.name) + config.trace.root.setwriter(None) + undo_tracing() + config.add_cleanup(unset_tracing) def pytest_cmdline_main(config): if config.option.version: @@ -58,14 +56,14 @@ def pytest_cmdline_main(config): sys.stderr.write(line + "\n") return 0 elif config.option.help: - config.do_configure() + config._do_configure() showhelp(config) - config.do_unconfigure() + config._ensure_unconfigure() return 0 def showhelp(config): - import _pytest.config - tw = _pytest.config.create_terminal_writer(config) + reporter = config.pluginmanager.get_plugin('terminalreporter') + tw = reporter._tw tw.write(config._parser.optparser.format_help()) tw.line() tw.line() @@ -88,8 +86,9 @@ def showhelp(config): tw.line("to see available fixtures type: py.test --fixtures") tw.line("(shown according to specified file_or_dir or current dir " "if not specified)") - for warning in config.pluginmanager._warnings: - tw.line("warning: %s" % (warning,), red=True) + tw.line(str(reporter.stats)) + for warningreport in reporter.stats.get('warnings', []): + tw.line("warning : " + warningreport.message, red=True) return @@ -99,10 +98,10 @@ def showhelp(config): def getpluginversioninfo(config): lines = [] - plugininfo = config.pluginmanager._plugin_distinfo + plugininfo = config.pluginmanager.list_plugin_distinfo() if plugininfo: lines.append("setuptools registered plugins:") - for dist, plugin in plugininfo: + for plugin, dist in plugininfo: loc = getattr(plugin, '__file__', repr(plugin)) content = "%s-%s at %s" % (dist.project_name, dist.version, loc) lines.append(" " + content) @@ -120,7 +119,7 @@ def pytest_report_header(config): if config.option.traceconfig: lines.append("active plugins:") - items = config.pluginmanager._name2plugin.items() + items = config.pluginmanager.list_name_plugin() for name, plugin in items: if hasattr(plugin, '__file__'): r = plugin.__file__ @@ -128,5 +127,3 @@ def pytest_report_header(config): r = repr(plugin) lines.append(" %-20s: %s" %(name, r)) return lines - - diff --git a/_pytest/hookspec.py b/_pytest/hookspec.py index 0cc59f2596a..a3a481d6a58 100644 --- a/_pytest/hookspec.py +++ b/_pytest/hookspec.py @@ -1,27 +1,32 @@ """ hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ +from pluggy import HookspecMarker + +hookspec = HookspecMarker("pytest") + # ------------------------------------------------------------------------- -# Initialization +# Initialization hooks called for every plugin # ------------------------------------------------------------------------- +@hookspec(historic=True) def pytest_addhooks(pluginmanager): - """called at plugin load time to allow adding new hooks via a call to - pluginmanager.registerhooks(module).""" + """called at plugin registration time to allow adding new hooks via a call to + pluginmanager.add_hookspecs(module_or_class, prefix).""" +@hookspec(historic=True) def pytest_namespace(): """return dict of name->object to be made globally available in - the pytest namespace. This hook is called before command line options - are parsed. + the pytest namespace. This hook is called at plugin registration + time. """ -def pytest_cmdline_parse(pluginmanager, args): - """return initialized config object, parsing the specified args. """ -pytest_cmdline_parse.firstresult = True +@hookspec(historic=True) +def pytest_plugin_registered(plugin, manager): + """ a new pytest plugin got registered. """ -def pytest_cmdline_preparse(config, args): - """(deprecated) modify command line arguments before option parsing. """ +@hookspec(historic=True) def pytest_addoption(parser): """register argparse-style options and ini-style config values. @@ -47,35 +52,43 @@ def pytest_addoption(parser): via (deprecated) ``pytest.config``. """ +@hookspec(historic=True) +def pytest_configure(config): + """ called after command line options have been parsed + and all plugins and initial conftest files been loaded. + This hook is called for every plugin. + """ + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins as well as directly +# discoverable conftest.py local plugins. +# ------------------------------------------------------------------------- + +@hookspec(firstresult=True) +def pytest_cmdline_parse(pluginmanager, args): + """return initialized config object, parsing the specified args. """ + +def pytest_cmdline_preparse(config, args): + """(deprecated) modify command line arguments before option parsing. """ + +@hookspec(firstresult=True) def pytest_cmdline_main(config): """ called for performing the main command line action. The default implementation will invoke the configure hooks and runtest_mainloop. """ -pytest_cmdline_main.firstresult = True def pytest_load_initial_conftests(args, early_config, parser): """ implements the loading of initial conftest files ahead of command line option parsing. """ -def pytest_configure(config): - """ called after command line options have been parsed - and all plugins and initial conftest files been loaded. - """ - -def pytest_unconfigure(config): - """ called before test process is exited. """ - -def pytest_runtestloop(session): - """ called for performing the main runtest loop - (after collection finished). """ -pytest_runtestloop.firstresult = True # ------------------------------------------------------------------------- # collection hooks # ------------------------------------------------------------------------- +@hookspec(firstresult=True) def pytest_collection(session): """ perform the collection protocol for the given session. """ -pytest_collection.firstresult = True def pytest_collection_modifyitems(session, config, items): """ called after collection has been performed, may filter or re-order @@ -84,16 +97,16 @@ def pytest_collection_modifyitems(session, config, items): def pytest_collection_finish(session): """ called after collection has been performed and modified. """ +@hookspec(firstresult=True) def pytest_ignore_collect(path, config): """ return True to prevent considering this path for collection. This hook is consulted for all files and directories prior to calling more specific hooks. """ -pytest_ignore_collect.firstresult = True +@hookspec(firstresult=True) def pytest_collect_directory(path, parent): """ called before traversing a directory for collection files. """ -pytest_collect_directory.firstresult = True def pytest_collect_file(path, parent): """ return collection Node or None for the given path. Any new node @@ -112,29 +125,29 @@ def pytest_collectreport(report): def pytest_deselected(items): """ called for test items deselected by keyword. """ +@hookspec(firstresult=True) def pytest_make_collect_report(collector): """ perform ``collector.collect()`` and return a CollectReport. """ -pytest_make_collect_report.firstresult = True # ------------------------------------------------------------------------- # Python test function related hooks # ------------------------------------------------------------------------- +@hookspec(firstresult=True) def pytest_pycollect_makemodule(path, parent): """ return a Module collector or None for the given path. This hook will be called for each matching test module path. The pytest_collect_file hook needs to be used if you want to create test modules for files that do not match as a test module. """ -pytest_pycollect_makemodule.firstresult = True +@hookspec(firstresult=True) def pytest_pycollect_makeitem(collector, name, obj): """ return custom item/collector for a python object in a module, or None. """ -pytest_pycollect_makeitem.firstresult = True +@hookspec(firstresult=True) def pytest_pyfunc_call(pyfuncitem): """ call underlying test function. """ -pytest_pyfunc_call.firstresult = True def pytest_generate_tests(metafunc): """ generate (multiple) parametrized calls to a test function.""" @@ -142,9 +155,16 @@ def pytest_generate_tests(metafunc): # ------------------------------------------------------------------------- # generic runtest related hooks # ------------------------------------------------------------------------- + +@hookspec(firstresult=True) +def pytest_runtestloop(session): + """ called for performing the main runtest loop + (after collection finished). """ + def pytest_itemstart(item, node): """ (deprecated, use pytest_runtest_logstart). """ +@hookspec(firstresult=True) def pytest_runtest_protocol(item, nextitem): """ implements the runtest_setup/call/teardown protocol for the given test item, including capturing exceptions and calling @@ -158,7 +178,6 @@ def pytest_runtest_protocol(item, nextitem): :return boolean: True if no further hook implementations should be invoked. """ -pytest_runtest_protocol.firstresult = True def pytest_runtest_logstart(nodeid, location): """ signal the start of running a single test item. """ @@ -178,12 +197,12 @@ def pytest_runtest_teardown(item, nextitem): so that nextitem only needs to call setup-functions. """ +@hookspec(firstresult=True) def pytest_runtest_makereport(item, call): """ return a :py:class:`_pytest.runner.TestReport` object for the given :py:class:`pytest.Item` and :py:class:`_pytest.runner.CallInfo`. """ -pytest_runtest_makereport.firstresult = True def pytest_runtest_logreport(report): """ process a test setup/call/teardown report relating to @@ -199,6 +218,9 @@ def pytest_sessionstart(session): def pytest_sessionfinish(session, exitstatus): """ whole test run finishes. """ +def pytest_unconfigure(config): + """ called before test process is exited. """ + # ------------------------------------------------------------------------- # hooks for customising the assert methods @@ -220,13 +242,15 @@ def pytest_assertrepr_compare(config, op, left, right): def pytest_report_header(config, startdir): """ return a string to be displayed as header info for terminal reporting.""" +@hookspec(firstresult=True) def pytest_report_teststatus(report): """ return result-category, shortletter and verbose word for reporting.""" -pytest_report_teststatus.firstresult = True def pytest_terminal_summary(terminalreporter): """ add additional section in terminal summary reporting. """ + +@hookspec(historic=True) def pytest_logwarning(message, code, nodeid, fslocation): """ process a warning specified by a message, a code string, a nodeid and fslocation (both of which may be None @@ -236,17 +260,14 @@ def pytest_logwarning(message, code, nodeid, fslocation): # doctest hooks # ------------------------------------------------------------------------- +@hookspec(firstresult=True) def pytest_doctest_prepare_content(content): """ return processed content for a given doctest""" -pytest_doctest_prepare_content.firstresult = True # ------------------------------------------------------------------------- # error handling and internal debugging hooks # ------------------------------------------------------------------------- -def pytest_plugin_registered(plugin, manager): - """ a new pytest plugin got registered. """ - def pytest_internalerror(excrepr, excinfo): """ called for internal errors. """ diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py index 8f6c36ff9ab..8b75b139a08 100644 --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -1,5 +1,7 @@ """ report test results in JUnit-XML format, for use with Hudson and build integration servers. +Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd + Based on initial code from Ross Lawley. """ import py @@ -7,6 +9,7 @@ import re import sys import time +import pytest # Python 2.X and 3.X compatibility if sys.version_info[0] < 3: @@ -51,6 +54,20 @@ def repl(matchobj): return unicode('#x%04X') % i return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg))) +@pytest.fixture +def record_xml_property(request): + """Fixture that adds extra xml properties to the tag for the calling test. + The fixture is callable with (name, value), with value being automatically + xml-encoded. + """ + def inner(name, value): + if hasattr(request.config, "_xml"): + request.config._xml.add_custom_property(name, value) + msg = 'record_xml_property is an experimental feature' + request.config.warn(code='C3', message=msg, + fslocation=request.node.location[:2]) + return inner + def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group.addoption('--junitxml', '--junit-xml', action="store", @@ -73,7 +90,6 @@ def pytest_unconfigure(config): del config._xml config.pluginmanager.unregister(xml) - def mangle_testnames(names): names = [x.replace(".py", "") for x in names if x != '()'] names[0] = names[0].replace("/", '.') @@ -87,17 +103,25 @@ def __init__(self, logfile, prefix): self.tests = [] self.passed = self.skipped = 0 self.failed = self.errors = 0 + self.custom_properties = {} + + def add_custom_property(self, name, value): + self.custom_properties[str(name)] = bin_xml_escape(str(value)) def _opentestcase(self, report): names = mangle_testnames(report.nodeid.split("::")) classnames = names[:-1] if self.prefix: classnames.insert(0, self.prefix) - self.tests.append(Junit.testcase( - classname=".".join(classnames), - name=bin_xml_escape(names[-1]), - time=getattr(report, 'duration', 0) - )) + attrs = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": report.location[0], + "time": 0, + } + if report.location[1] is not None: + attrs["line"] = report.location[1] + self.tests.append(Junit.testcase(**attrs)) def _write_captured_output(self, report): for capname in ('out', 'err'): @@ -112,6 +136,10 @@ def _write_captured_output(self, report): def append(self, obj): self.tests[-1].append(obj) + def append_custom_properties(self): + self.tests[-1].attr.__dict__.update(self.custom_properties) + self.custom_properties.clear() + def append_pass(self, report): self.passed += 1 self._write_captured_output(report) @@ -170,18 +198,19 @@ def append_skipped(self, report): self._write_captured_output(report) def pytest_runtest_logreport(self, report): + if report.when == "setup": + self._opentestcase(report) + self.tests[-1].attr.time += getattr(report, 'duration', 0) + self.append_custom_properties() if report.passed: if report.when == "call": # ignore setup/teardown - self._opentestcase(report) self.append_pass(report) elif report.failed: - self._opentestcase(report) if report.when != "call": self.append_error(report) else: self.append_failure(report) elif report.skipped: - self._opentestcase(report) self.append_skipped(report) def pytest_collectreport(self, report): diff --git a/_pytest/main.py b/_pytest/main.py index 6af4dc1cabf..6b2ccb6b7b6 100644 --- a/_pytest/main.py +++ b/_pytest/main.py @@ -19,12 +19,15 @@ EXIT_INTERRUPTED = 2 EXIT_INTERNALERROR = 3 EXIT_USAGEERROR = 4 +EXIT_NOTESTSCOLLECTED = 5 name_re = re.compile("^[a-zA-Z_]\w*$") def pytest_addoption(parser): parser.addini("norecursedirs", "directory patterns to avoid for recursion", type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg']) + parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.", + type="args", default=[]) #parser.addini("dirpatterns", # "patterns specifying possible locations of test files", # type="linelist", default=["**/test_*.txt", @@ -54,6 +57,9 @@ def pytest_addoption(parser): group.addoption('--confcutdir', dest="confcutdir", default=None, metavar="dir", help="only load conftest.py's relative to specified dir.") + group.addoption('--noconftest', action="store_true", + dest="noconftest", default=False, + help="Don't load any conftest.py files.") group = parser.getgroup("debugconfig", "test session debugging and configuration") @@ -77,16 +83,13 @@ def wrap_session(config, doit): initstate = 0 try: try: - config.do_configure() + config._do_configure() initstate = 1 config.hook.pytest_sessionstart(session=session) initstate = 2 doit(config, session) except pytest.UsageError: - args = sys.exc_info()[1].args - for msg in args: - sys.stderr.write("ERROR: %s\n" %(msg,)) - session.exitstatus = EXIT_USAGEERROR + raise except KeyboardInterrupt: excinfo = py.code.ExceptionInfo() config.hook.pytest_keyboard_interrupt(excinfo=excinfo) @@ -98,8 +101,10 @@ def wrap_session(config, doit): if excinfo.errisinstance(SystemExit): sys.stderr.write("mainloop: caught Spurious SystemExit!\n") else: - if session._testsfailed: + if session.testsfailed: session.exitstatus = EXIT_TESTSFAILED + elif session.testscollected == 0: + session.exitstatus = EXIT_NOTESTSCOLLECTED finally: excinfo = None # Explicitly break reference cycle. session.startdir.chdir() @@ -107,9 +112,7 @@ def wrap_session(config, doit): config.hook.pytest_sessionfinish( session=session, exitstatus=session.exitstatus) - if initstate >= 1: - config.do_unconfigure() - config.pluginmanager.ensure_shutdown() + config._ensure_unconfigure() return session.exitstatus def pytest_cmdline_main(config): @@ -153,18 +156,17 @@ def pytest_ignore_collect(path, config): ignore_paths.extend([py.path.local(x) for x in excludeopt]) return path in ignore_paths -class FSHookProxy(object): - def __init__(self, fspath, config): +class FSHookProxy: + def __init__(self, fspath, pm, remove_mods): self.fspath = fspath - self.config = config + self.pm = pm + self.remove_mods = remove_mods def __getattr__(self, name): - plugins = self.config._getmatchingplugins(self.fspath) - x = self.config.hook._getcaller(name, plugins) + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) self.__dict__[name] = x return x - def compatproperty(name): def fget(self): # deprecated - use pytest.name @@ -277,9 +279,9 @@ def warn(self, code, message): else: fslocation = "%s:%s" % fslocation[:2] - self.ihook.pytest_logwarning(code=code, message=message, - nodeid=self.nodeid, - fslocation=fslocation) + self.ihook.pytest_logwarning.call_historic(kwargs=dict( + code=code, message=message, + nodeid=self.nodeid, fslocation=fslocation)) # methods for ordering nodes @property @@ -364,9 +366,6 @@ def listextrakeywords(self): def listnames(self): return [x.name for x in self.listchain()] - def getplugins(self): - return self.config._getmatchingplugins(self.fspath) - def addfinalizer(self, fin): """ register a function to be called when this node is finalized. @@ -512,28 +511,31 @@ class Session(FSCollector): def __init__(self, config): FSCollector.__init__(self, config.rootdir, parent=None, config=config, session=self) - self.config.pluginmanager.register(self, name="session", prepend=True) - self._testsfailed = 0 + self._fs2hookproxy = {} + self.testsfailed = 0 + self.testscollected = 0 self.shouldstop = False self.trace = config.trace.root.get("collection") self._norecursepatterns = config.getini("norecursedirs") self.startdir = py.path.local() - self._fs2hookproxy = {} + self.config.pluginmanager.register(self, name="session") def _makeid(self): return "" + @pytest.hookimpl(tryfirst=True) def pytest_collectstart(self): if self.shouldstop: raise self.Interrupted(self.shouldstop) + @pytest.hookimpl(tryfirst=True) def pytest_runtest_logreport(self, report): if report.failed and not hasattr(report, 'wasxfail'): - self._testsfailed += 1 + self.testsfailed += 1 maxfail = self.config.getvalue("maxfail") - if maxfail and self._testsfailed >= maxfail: + if maxfail and self.testsfailed >= maxfail: self.shouldstop = "stopping after %d failures" % ( - self._testsfailed) + self.testsfailed) pytest_collectreport = pytest_runtest_logreport def isinitpath(self, path): @@ -543,8 +545,20 @@ def gethookproxy(self, fspath): try: return self._fs2hookproxy[fspath] except KeyError: - self._fs2hookproxy[fspath] = x = FSHookProxy(fspath, self.config) - return x + # check if we have the common case of running + # hooks with all conftest.py filesall conftest.py + pm = self.config.pluginmanager + my_conftestmodules = pm._getconftestmodules(fspath) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # one or more conftests are not in use at this fspath + proxy = FSHookProxy(fspath, pm, remove_mods) + else: + # all plugis are active for this fspath + proxy = self.config.hook + + self._fs2hookproxy[fspath] = proxy + return proxy def perform_collect(self, args=None, genitems=True): hook = self.config.hook @@ -554,6 +568,7 @@ def perform_collect(self, args=None, genitems=True): config=self.config, items=items) finally: hook.pytest_collection_finish(session=self) + self.testscollected = len(items) return items def _perform_collect(self, args, genitems): @@ -727,5 +742,3 @@ def genitems(self, node): for x in self.genitems(subnode): yield x node.ihook.pytest_collectreport(report=rep) - - diff --git a/_pytest/mark.py b/_pytest/mark.py index 5f63723974c..50581e0a890 100644 --- a/_pytest/mark.py +++ b/_pytest/mark.py @@ -45,14 +45,14 @@ def pytest_addoption(parser): def pytest_cmdline_main(config): import _pytest.config if config.option.markers: - config.do_configure() + config._do_configure() tw = _pytest.config.create_terminal_writer(config) for line in config.getini("markers"): name, rest = line.split(":", 1) tw.write("@pytest.mark.%s:" % name, bold=True) tw.line(rest) tw.line() - config.do_unconfigure() + config._ensure_unconfigure() return 0 pytest_cmdline_main.tryfirst = True @@ -291,7 +291,7 @@ def __init__(self, name, args, kwargs): #: positional argument list, empty if none specified self.args = args #: keyword argument dictionary, empty if nothing specified - self.kwargs = kwargs + self.kwargs = kwargs.copy() self._arglist = [(args, kwargs.copy())] def __repr__(self): diff --git a/_pytest/monkeypatch.py b/_pytest/monkeypatch.py index 9cb81f64f03..07998650a3c 100644 --- a/_pytest/monkeypatch.py +++ b/_pytest/monkeypatch.py @@ -27,7 +27,7 @@ def pytest_funcarg__monkeypatch(request): -def derive_importpath(import_path): +def derive_importpath(import_path, raising): import pytest if not isinstance(import_path, _basestring) or "." not in import_path: raise TypeError("must be absolute import path string, not %r" % @@ -51,7 +51,8 @@ def derive_importpath(import_path): attr = rest.pop() obj = getattr(obj, attr) attr = rest[0] - getattr(obj, attr) + if raising: + getattr(obj, attr) except AttributeError: __tracebackhide__ = True pytest.fail("object %r has no attribute %r" % (obj, attr)) @@ -95,7 +96,7 @@ def setattr(self, target, name, value=notset, raising=True): "setattr(target, value) with target being a dotted " "import string") value = name - name, target = derive_importpath(target) + name, target = derive_importpath(target, raising) oldval = getattr(target, name, notset) if raising and oldval is notset: @@ -124,7 +125,7 @@ def delattr(self, target, name=notset, raising=True): raise TypeError("use delattr(target, name) or " "delattr(target) with target being a dotted " "import string") - name, target = derive_importpath(target) + name, target = derive_importpath(target, raising) if not hasattr(target, name): if raising: @@ -177,8 +178,8 @@ def syspath_prepend(self, path): sys.path.insert(0, str(path)) def chdir(self, path): - """ Change the current working directory to the specified path - path can be a string or a py.path.local object + """ Change the current working directory to the specified path. + Path can be a string or a py.path.local object. """ if self._cwd is None: self._cwd = os.getcwd() @@ -190,7 +191,17 @@ def chdir(self, path): def undo(self): """ Undo previous changes. This call consumes the undo stack. Calling it a second time has no effect unless - you do more monkeypatching after the undo call.""" + you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + Note that the same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + """ for obj, name, value in self._setattr: if value is not notset: setattr(obj, name, value) diff --git a/_pytest/nose.py b/_pytest/nose.py index 089807b6655..03874686860 100644 --- a/_pytest/nose.py +++ b/_pytest/nose.py @@ -24,7 +24,7 @@ def pytest_runtest_makereport(item, call): call.excinfo = call2.excinfo -@pytest.mark.trylast +@pytest.hookimpl(trylast=True) def pytest_runtest_setup(item): if is_potential_nosetest(item): if isinstance(item.parent, pytest.Generator): diff --git a/_pytest/pastebin.py b/_pytest/pastebin.py index 9f615e4aa42..f5e47915851 100644 --- a/_pytest/pastebin.py +++ b/_pytest/pastebin.py @@ -11,7 +11,7 @@ def pytest_addoption(parser): choices=['failed', 'all'], help="send failed|all info to bpaste.net pastebin service.") -@pytest.mark.trylast +@pytest.hookimpl(trylast=True) def pytest_configure(config): if config.option.pastebin == "all": tr = config.pluginmanager.getplugin('terminalreporter') diff --git a/_pytest/pytester.py b/_pytest/pytester.py index 0a85d3d6036..65ead149f78 100644 --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -1,5 +1,7 @@ """ (disabled by default) support for testing pytest and pytest plugins. """ +import gc import sys +import traceback import os import codecs import re @@ -11,10 +13,142 @@ import py import pytest from py.builtin import print_ -from _pytest.core import HookCaller, add_method_wrapper from _pytest.main import Session, EXIT_OK + +def pytest_addoption(parser): + # group = parser.getgroup("pytester", "pytester (self-tests) options") + parser.addoption('--lsof', + action="store_true", dest="lsof", default=False, + help=("run FD checks if lsof is available")) + + parser.addoption('--runpytest', default="inprocess", dest="runpytest", + choices=("inprocess", "subprocess", ), + help=("run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method")) + + +def pytest_configure(config): + # This might be called multiple times. Only take the first. + global _pytest_fullpath + try: + _pytest_fullpath + except NameError: + _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc")) + _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py") + + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + +class LsofFdLeakChecker(object): + def get_open_files(self): + out = self._exec_lsof() + open_files = self._parse_lsof_output(out) + return open_files + + def _exec_lsof(self): + pid = os.getpid() + return py.process.cmdexec("lsof -Ffn0 -p %d" % pid) + + def _parse_lsof_output(self, out): + def isopen(line): + return line.startswith('f') and ("deleted" not in line and + 'mem' not in line and "txt" not in line and 'cwd' not in line) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split('\0') + fd = fields[0][1:] + filename = fields[1][1:] + if filename.startswith('/'): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self): + try: + py.process.cmdexec("lsof -v") + except (py.process.cmdexec.Error, UnicodeDecodeError): + # cmdexec may raise UnicodeDecodeError on Windows systems + # with locale other than english: + # https://bitbucket.org/pytest-dev/py/issues/66 + return False + else: + return True + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_runtest_item(self, item): + lines1 = self.get_open_files() + yield + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1]) + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [] + error.append("***** %s FD leakage detected" % len(leaked_files)) + error.extend([str(f) for f in leaked_files]) + error.append("*** Before:") + error.extend([str(f) for f in lines1]) + error.append("*** After:") + error.extend([str(f) for f in lines2]) + error.append(error[0]) + error.append("*** function %s:%s: %s " % item.location) + pytest.fail("\n".join(error), pytrace=False) + + +# XXX copied from execnet's conftest.py - needs to be merged +winpymap = { + 'python2.7': r'C:\Python27\python.exe', + 'python2.6': r'C:\Python26\python.exe', + 'python3.1': r'C:\Python31\python.exe', + 'python3.2': r'C:\Python32\python.exe', + 'python3.3': r'C:\Python33\python.exe', + 'python3.4': r'C:\Python34\python.exe', + 'python3.5': r'C:\Python35\python.exe', +} + +def getexecutable(name, cache={}): + try: + return cache[name] + except KeyError: + executable = py.path.local.sysfind(name) + if executable: + if name == "jython": + import subprocess + popen = subprocess.Popen([str(executable), "--version"], + universal_newlines=True, stderr=subprocess.PIPE) + out, err = popen.communicate() + if not err or "2.5" not in err: + executable = None + if "2.5.2" in err: + executable = None # http://bugs.jython.org/issue1790 + cache[name] = executable + return executable + +@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4", + 'pypy', 'pypy3']) +def anypython(request): + name = request.param + executable = getexecutable(name) + if executable is None: + if sys.platform == "win32": + executable = winpymap.get(name, None) + if executable: + executable = py.path.local(executable) + if executable.check(): + return executable + pytest.skip("no suitable %s found" % (name,)) + return executable + # used at least by pytest-xdist plugin @pytest.fixture def _pytest(request): @@ -39,23 +173,6 @@ def get_public_names(l): return [x for x in l if x[0] != "_"] -def pytest_addoption(parser): - group = parser.getgroup("pylib") - group.addoption('--no-tools-on-path', - action="store_true", dest="notoolsonpath", default=False, - help=("discover tools on PATH instead of going through py.cmdline.") - ) - -def pytest_configure(config): - # This might be called multiple times. Only take the first. - global _pytest_fullpath - try: - _pytest_fullpath - except NameError: - _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc")) - _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py") - - class ParsedCall: def __init__(self, name, kwargs): self.__dict__.update(kwargs) @@ -68,15 +185,24 @@ def __repr__(self): class HookRecorder: + """Record all hooks called in a plugin manager. + + This wraps all the hook calls in the plugin manager, recording + each call before propagating the normal calls. + + """ + def __init__(self, pluginmanager): self._pluginmanager = pluginmanager self.calls = [] - def _docall(hookcaller, methods, kwargs): - self.calls.append(ParsedCall(hookcaller.name, kwargs)) - yield - self._undo_wrapping = add_method_wrapper(HookCaller, _docall) - pluginmanager.add_shutdown(self._undo_wrapping) + def before(hook_name, hook_impls, kwargs): + self.calls.append(ParsedCall(hook_name, kwargs)) + + def after(outcome, hook_name, hook_impls, kwargs): + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) def finish_recording(self): self._undo_wrapping() @@ -186,18 +312,36 @@ def clear(self): self.calls[:] = [] -def pytest_funcarg__linecomp(request): +@pytest.fixture +def linecomp(request): return LineComp() + def pytest_funcarg__LineMatcher(request): return LineMatcher -def pytest_funcarg__testdir(request): - tmptestdir = TmpTestdir(request) - return tmptestdir -rex_outcome = re.compile("(\d+) (\w+)") +@pytest.fixture +def testdir(request, tmpdir_factory): + return Testdir(request, tmpdir_factory) + + +rex_outcome = re.compile("(\d+) ([\w-]+)") class RunResult: + """The result of running a command. + + Attributes: + + :ret: The return value. + :outlines: List of lines captured from stdout. + :errlines: List of lines captures from stderr. + :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to + reconstruct stdout or the commonly used + ``stdout.fnmatch_lines()`` method. + :stderrr: :py:class:`LineMatcher` of stderr. + :duration: Duration in seconds. + + """ def __init__(self, ret, outlines, errlines, duration): self.ret = ret self.outlines = outlines @@ -207,6 +351,8 @@ def __init__(self, ret, outlines, errlines, duration): self.duration = duration def parseoutcomes(self): + """ Return a dictionary of outcomestring->num from parsing + the terminal output that the test process produced.""" for line in reversed(self.outlines): if 'seconds' in line: outcomes = rex_outcome.findall(line) @@ -216,12 +362,41 @@ def parseoutcomes(self): d[cat] = int(num) return d -class TmpTestdir: - def __init__(self, request): + def assert_outcomes(self, passed=0, skipped=0, failed=0): + """ assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run.""" + d = self.parseoutcomes() + assert passed == d.get("passed", 0) + assert skipped == d.get("skipped", 0) + assert failed == d.get("failed", 0) + + + +class Testdir: + """Temporary test directory with tools to test/run py.test itself. + + This is based on the ``tmpdir`` fixture but provides a number of + methods which aid with testing py.test itself. Unless + :py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as + current working directory. + + Attributes: + + :tmpdir: The :py:class:`py.path.local` instance of the temporary + directory. + + :plugins: A list of plugins to use with :py:meth:`parseconfig` and + :py:meth:`runpytest`. Initially this is an empty list but + plugins can be added to the list. The type of items to add to + the list depend on the method which uses them so refer to them + for details. + + """ + + def __init__(self, request, tmpdir_factory): self.request = request - self.Config = request.config.__class__ # XXX remove duplication with tmpdir plugin - basetmp = request.config._tmpdirhandler.ensuretemp("testdir") + basetmp = tmpdir_factory.ensuretemp("testdir") name = request.function.__name__ for i in range(100): try: @@ -231,32 +406,58 @@ def __init__(self, request): break self.tmpdir = tmpdir self.plugins = [] - self._syspathremove = [] + self._savesyspath = (list(sys.path), list(sys.meta_path)) + self._savemodulekeys = set(sys.modules) self.chdir() # always chdir self.request.addfinalizer(self.finalize) + method = self.request.config.getoption("--runpytest") + if method == "inprocess": + self._runpytest_method = self.runpytest_inprocess + elif method == "subprocess": + self._runpytest_method = self.runpytest_subprocess def __repr__(self): - return "" % (self.tmpdir,) + return "" % (self.tmpdir,) def finalize(self): - for p in self._syspathremove: - sys.path.remove(p) + """Clean up global state artifacts. + + Some methods modify the global interpreter state and this + tries to clean this up. It does not remove the temporary + directory however so it can be looked at after the test run + has finished. + + """ + sys.path[:], sys.meta_path[:] = self._savesyspath if hasattr(self, '_olddir'): self._olddir.chdir() - # delete modules that have been loaded from tmpdir - for name, mod in list(sys.modules.items()): - if mod: - fn = getattr(mod, '__file__', None) - if fn and fn.startswith(str(self.tmpdir)): - del sys.modules[name] + self.delete_loaded_modules() + + def delete_loaded_modules(self): + """Delete modules that have been loaded during a test. + + This allows the interpreter to catch module changes in case + the module is re-imported. + """ + for name in set(sys.modules).difference(self._savemodulekeys): + # it seems zope.interfaces is keeping some state + # (used by twisted related tests) + if name != "zope.interface": + del sys.modules[name] def make_hook_recorder(self, pluginmanager): + """Create a new :py:class:`HookRecorder` for a PluginManager.""" assert not hasattr(pluginmanager, "reprec") pluginmanager.reprec = reprec = HookRecorder(pluginmanager) self.request.addfinalizer(reprec.finish_recording) return reprec def chdir(self): + """Cd into the temporary directory. + + This is done automatically upon instantiation. + + """ old = self.tmpdir.chdir() if not hasattr(self, '_olddir'): self._olddir = old @@ -285,42 +486,81 @@ def my_totext(s, encoding="utf-8"): ret = p return ret - def makefile(self, ext, *args, **kwargs): + """Create a new file in the testdir. + + ext: The extension the file should use, including the dot. + E.g. ".py". + + args: All args will be treated as strings and joined using + newlines. The result will be written as contents to the + file. The name of the file will be based on the test + function requesting this fixture. + E.g. "testdir.makefile('.txt', 'line1', 'line2')" + + kwargs: Each keyword is the name of a file, while the value of + it will be written as contents of the file. + E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')" + + """ return self._makefile(ext, args, kwargs) def makeconftest(self, source): + """Write a contest.py file with 'source' as contents.""" return self.makepyfile(conftest=source) def makeini(self, source): + """Write a tox.ini file with 'source' as contents.""" return self.makefile('.ini', tox=source) def getinicfg(self, source): + """Return the pytest section from the tox.ini config file.""" p = self.makeini(source) return py.iniconfig.IniConfig(p)['pytest'] def makepyfile(self, *args, **kwargs): + """Shortcut for .makefile() with a .py extension.""" return self._makefile('.py', args, kwargs) def maketxtfile(self, *args, **kwargs): + """Shortcut for .makefile() with a .txt extension.""" return self._makefile('.txt', args, kwargs) def syspathinsert(self, path=None): + """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. + + This is undone automatically after the test. + """ if path is None: path = self.tmpdir sys.path.insert(0, str(path)) - self._syspathremove.append(str(path)) def mkdir(self, name): + """Create a new (sub)directory.""" return self.tmpdir.mkdir(name) def mkpydir(self, name): + """Create a new python package. + + This creates a (sub)direcotry with an empty ``__init__.py`` + file so that is recognised as a python package. + + """ p = self.mkdir(name) p.ensure("__init__.py") return p Session = Session def getnode(self, config, arg): + """Return the collection node of a file. + + :param config: :py:class:`_pytest.config.Config` instance, see + :py:meth:`parseconfig` and :py:meth:`parseconfigure` to + create the configuration. + + :param arg: A :py:class:`py.path.local` instance of the file. + + """ session = Session(config) assert '::' not in str(arg) p = py.path.local(arg) @@ -330,6 +570,15 @@ def getnode(self, config, arg): return res def getpathnode(self, path): + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses + :py:meth:`parseconfigure` to create the (configured) py.test + Config instance. + + :param path: A :py:class:`py.path.local` instance of the file. + + """ config = self.parseconfigure(path) session = Session(config) x = session.fspath.bestrelpath(path) @@ -339,6 +588,12 @@ def getpathnode(self, path): return res def genitems(self, colitems): + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of + all the test items contained within. + + """ session = colitems[0].session result = [] for colitem in colitems: @@ -346,6 +601,14 @@ def genitems(self, colitems): return result def runitem(self, source): + """Run the "test_func" Item. + + The calling test instance (the class which contains the test + method) must provide a ``.getrunner()`` method which should + return a runner which can run the test protocol for a single + item, like e.g. :py:func:`_pytest.runner.runtestprotocol`. + + """ # used from runner functional tests item = self.getitem(source) # the test class where we are called from wants to provide the runner @@ -354,68 +617,176 @@ def runitem(self, source): return runner(item) def inline_runsource(self, source, *cmdlineargs): + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` + instance for the result. + + :param source: The source code of the test module. + + :param cmdlineargs: Any extra command line arguments to use. + + :return: :py:class:`HookRecorder` instance of the result. + + """ p = self.makepyfile(source) l = list(cmdlineargs) + [p] return self.inline_run(*l) - def inline_runsource1(self, *args): - args = list(args) - source = args.pop() - p = self.makepyfile(source) - l = list(args) + [p] - reprec = self.inline_run(*l) - reports = reprec.getreports("pytest_runtest_logreport") - assert len(reports) == 3, reports # setup/call/teardown - return reports[1] - def inline_genitems(self, *args): - return self.inprocess_run(list(args) + ['--collectonly']) + """Run ``pytest.main(['--collectonly'])`` in-process. + + Retuns a tuple of the collected items and a + :py:class:`HookRecorder` instance. - def inprocess_run(self, args, plugins=()): - rec = self.inline_run(*args, plugins=plugins) + This runs the :py:func:`pytest.main` function to run all of + py.test inside the test process itself like + :py:meth:`inline_run`. However the return value is a tuple of + the collection items and a :py:class:`HookRecorder` instance. + + """ + rec = self.inline_run("--collect-only", *args) items = [x.item for x in rec.getcalls("pytest_itemcollected")] return items, rec def inline_run(self, *args, **kwargs): + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + This runs the :py:func:`pytest.main` function to run all of + py.test inside the test process itself. This means it can + return a :py:class:`HookRecorder` instance which gives more + detailed results from then run then can be done by matching + stdout/stderr from :py:meth:`runpytest`. + + :param args: Any command line arguments to pass to + :py:func:`pytest.main`. + + :param plugin: (keyword-only) Extra plugin instances the + ``pytest.main()`` instance should use. + + :return: A :py:class:`HookRecorder` instance. + + """ rec = [] class Collect: def pytest_configure(x, config): rec.append(self.make_hook_recorder(config.pluginmanager)) + plugins = kwargs.get("plugins") or [] plugins.append(Collect()) ret = pytest.main(list(args), plugins=plugins) - assert len(rec) == 1 - reprec = rec[0] + self.delete_loaded_modules() + if len(rec) == 1: + reprec = rec.pop() + else: + class reprec: + pass reprec.ret = ret + + # typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing + if ret == 2 and not kwargs.get("no_reraise_ctrlc"): + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() return reprec - def parseconfig(self, *args): + def runpytest_inprocess(self, *args, **kwargs): + """ Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides. """ + if kwargs.get("syspathinsert"): + self.syspathinsert() + now = time.time() + capture = py.io.StdCapture() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + class reprec: + ret = e.args[0] + except Exception: + traceback.print_exc() + class reprec: + ret = 3 + finally: + out, err = capture.reset() + sys.stdout.write(out) + sys.stderr.write(err) + + res = RunResult(reprec.ret, + out.split("\n"), err.split("\n"), + time.time()-now) + res.reprec = reprec + return res + + def runpytest(self, *args, **kwargs): + """ Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`RunResult`. + + """ + args = self._ensure_basetemp(args) + return self._runpytest_method(*args, **kwargs) + + def _ensure_basetemp(self, args): args = [str(x) for x in args] for x in args: if str(x).startswith('--basetemp'): + #print ("basedtemp exists: %s" %(args,)) break else: args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp')) + #print ("added basetemp: %s" %(args,)) + return args + + def parseconfig(self, *args): + """Return a new py.test Config instance from given commandline args. + + This invokes the py.test bootstrapping code in _pytest.config + to create a new :py:class:`_pytest.core.PluginManager` and + call the pytest_cmdline_parse hook to create new + :py:class:`_pytest.config.Config` instance. + + If :py:attr:`plugins` has been populated they should be plugin + modules which will be registered with the PluginManager. + + """ + args = self._ensure_basetemp(args) + import _pytest.config config = _pytest.config._prepareconfig(args, self.plugins) # we don't know what the test will do with this half-setup config # object and thus we make sure it gets unconfigured properly in any # case (otherwise capturing could still be active, for example) - def ensure_unconfigure(): - if hasattr(config.pluginmanager, "_config"): - config.pluginmanager.do_unconfigure(config) - config.pluginmanager.ensure_shutdown() - - self.request.addfinalizer(ensure_unconfigure) + self.request.addfinalizer(config._ensure_unconfigure) return config def parseconfigure(self, *args): + """Return a new py.test configured Config instance. + + This returns a new :py:class:`_pytest.config.Config` instance + like :py:meth:`parseconfig`, but also calls the + pytest_configure hook. + + """ config = self.parseconfig(*args) - config.do_configure() - self.request.addfinalizer(config.do_unconfigure) + config._do_configure() + self.request.addfinalizer(config._ensure_unconfigure) return config def getitem(self, source, funcname="test_func"): + """Return the test item for a test function. + + This writes the source to a python file and runs py.test's + collection on the resulting module, returning the test item + for the requested function name. + + :param source: The module source. + + :param funcname: The name of the test function for which the + Item must be returned. + + """ items = self.getitems(source) for item in items: if item.name == funcname: @@ -424,10 +795,32 @@ def getitem(self, source, funcname="test_func"): funcname, source, items) def getitems(self, source): + """Return all test items collected from the module. + + This writes the source to a python file and runs py.test's + collection on the resulting module, returning all test items + contained within. + + """ modcol = self.getmodulecol(source) return self.genitems([modcol]) def getmodulecol(self, source, configargs=(), withinit=False): + """Return the module collection node for ``source``. + + This writes ``source`` to a file using :py:meth:`makepyfile` + and then runs the py.test collection on it, returning the + collection node for the test module. + + :param source: The source code of the module to collect. + + :param configargs: Any extra arguments to pass to + :py:meth:`parseconfigure`. + + :param withinit: Whether to also write a ``__init__.py`` file + to the temporarly directory to ensure it is a package. + + """ kw = {self.request.function.__name__: py.code.Source(source).strip()} path = self.makepyfile(**kw) if withinit: @@ -437,11 +830,30 @@ def getmodulecol(self, source, configargs=(), withinit=False): return node def collect_by_name(self, modcol, name): + """Return the collection node for name from the module collection. + + This will search a module collection node for a collection + node matching the given name. + + :param modcol: A module collection node, see + :py:meth:`getmodulecol`. + + :param name: The name of the node to return. + + """ for colitem in modcol._memocollect(): if colitem.name == name: return colitem def popen(self, cmdargs, stdout, stderr, **kw): + """Invoke subprocess.Popen. + + This calls subprocess.Popen making sure the current working + directory is the PYTHONPATH. + + You probably want to use :py:meth:`run` instead. + + """ env = os.environ.copy() env['PYTHONPATH'] = os.pathsep.join(filter(None, [ str(os.getcwd()), env.get('PYTHONPATH', '')])) @@ -450,13 +862,22 @@ def popen(self, cmdargs, stdout, stderr, **kw): stdout=stdout, stderr=stderr, **kw) def run(self, *cmdargs): + """Run a command with arguments. + + Run a process using subprocess.Popen saving the stdout and + stderr. + + Returns a :py:class:`RunResult`. + + """ return self._run(*cmdargs) def _run(self, *cmdargs): cmdargs = [str(x) for x in cmdargs] p1 = self.tmpdir.join("stdout") p2 = self.tmpdir.join("stderr") - print_("running", cmdargs, "curdir=", py.path.local()) + print_("running:", ' '.join(cmdargs)) + print_(" in:", str(py.path.local())) f1 = codecs.open(str(p1), "w", encoding="utf8") f2 = codecs.open(str(p2), "w", encoding="utf8") try: @@ -486,38 +907,35 @@ def _dump_lines(self, lines, fp): except UnicodeEncodeError: print("couldn't print to %s because of encoding" % (fp,)) - def runpybin(self, scriptname, *args): - fullargs = self._getpybinargs(scriptname) + args - return self.run(*fullargs) + def _getpytestargs(self): + # we cannot use "(sys.executable,script)" + # because on windows the script is e.g. a py.test.exe + return (sys.executable, _pytest_fullpath,) # noqa - def _getpybinargs(self, scriptname): - if not self.request.config.getvalue("notoolsonpath"): - # XXX we rely on script referring to the correct environment - # we cannot use "(sys.executable,script)" - # because on windows the script is e.g. a py.test.exe - return (sys.executable, _pytest_fullpath,) # noqa - else: - pytest.skip("cannot run %r with --no-tools-on-path" % scriptname) + def runpython(self, script): + """Run a python script using sys.executable as interpreter. - def runpython(self, script, prepend=True): - if prepend: - s = self._getsysprepend() - if s: - script.write(s + "\n" + script.read()) + Returns a :py:class:`RunResult`. + """ return self.run(sys.executable, script) - def _getsysprepend(self): - if self.request.config.getvalue("notoolsonpath"): - s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath()) - else: - s = "" - return s - def runpython_c(self, command): - command = self._getsysprepend() + command + """Run python -c "command", return a :py:class:`RunResult`.""" return self.run(sys.executable, "-c", command) - def runpytest(self, *args): + def runpytest_subprocess(self, *args, **kwargs): + """Run py.test as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will added + using the ``-p`` command line option. Addtionally + ``--basetemp`` is used put any temporary files and directories + in a numbered directory prefixed with "runpytest-" so they do + not conflict with the normal numberd pytest location for + temporary files and directories. + + Returns a :py:class:`RunResult`. + + """ p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None, rootdir=self.tmpdir) args = ('--basetemp=%s' % p, ) + args @@ -530,17 +948,28 @@ def runpytest(self, *args): plugins = [x for x in self.plugins if isinstance(x, str)] if plugins: args = ('-p', plugins[0]) + args - return self.runpybin("py.test", *args) + args = self._getpytestargs() + args + return self.run(*args) def spawn_pytest(self, string, expect_timeout=10.0): - if self.request.config.getvalue("notoolsonpath"): - pytest.skip("--no-tools-on-path prevents running pexpect-spawn tests") + """Run py.test using pexpect. + + This makes sure to use the right py.test and sets up the + temporary directory locations. + + The pexpect child is returned. + + """ basetemp = self.tmpdir.mkdir("pexpect") - invoke = " ".join(map(str, self._getpybinargs("py.test"))) + invoke = " ".join(map(str, self._getpytestargs())) cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) return self.spawn(cmd, expect_timeout=expect_timeout) def spawn(self, cmd, expect_timeout=10.0): + """Run a command using pexpect. + + The pexpect child is returned. + """ pexpect = pytest.importorskip("pexpect", "3.0") if hasattr(sys, 'pypy_version_info') and '64' in platform.machine(): pytest.skip("pypy-64 bit not supported") @@ -577,11 +1006,23 @@ def assert_contains_lines(self, lines2): lines1 = val.split("\n") return LineMatcher(lines1).fnmatch_lines(lines2) + class LineMatcher: + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing + newlines, i.e. ``text.splitlines()``. + + """ + def __init__(self, lines): self.lines = lines def str(self): + """Return the entire original text.""" return "\n".join(self.lines) def _getlines(self, lines2): @@ -592,6 +1033,12 @@ def _getlines(self, lines2): return lines2 def fnmatch_lines_random(self, lines2): + """Check lines exist in the output. + + The argument is a list of lines which have to occur in the + output, in any order. Each line can contain glob whildcards. + + """ lines2 = self._getlines(lines2) for line in lines2: for x in self.lines: @@ -602,12 +1049,24 @@ def fnmatch_lines_random(self, lines2): raise ValueError("line %r not found in output" % line) def get_lines_after(self, fnline): + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + """ for i, line in enumerate(self.lines): if fnline == line or fnmatch(line, fnline): return self.lines[i+1:] raise ValueError("line %r not found in output" % fnline) def fnmatch_lines(self, lines2): + """Search the text for matching lines. + + The argument is a list of lines which have to match and can + use glob wildcards. If they do not match an pytest.fail() is + called. The matches and non-matches are also printed on + stdout. + + """ def show(arg1, arg2): py.builtin.print_(arg1, arg2, file=sys.stderr) lines2 = self._getlines(lines2) diff --git a/_pytest/python.py b/_pytest/python.py index a66dd90316a..9b04e7861f0 100644 --- a/_pytest/python.py +++ b/_pytest/python.py @@ -1,4 +1,5 @@ """ Python test discovery, setup and run of test functions. """ +import re import fnmatch import functools import py @@ -8,8 +9,18 @@ from _pytest.mark import MarkDecorator, MarkerError from py._code.code import TerminalRepr +try: + import enum +except ImportError: # pragma: no cover + # Only available in Python 3.4+ or as a backport + enum = None + import _pytest -cutdir = py.path.local(_pytest.__file__).dirpath() +import pluggy + +cutdir2 = py.path.local(_pytest.__file__).dirpath() +cutdir1 = py.path.local(pluggy.__file__.rstrip("oc")) + NoneType = type(None) NOTSET = object() @@ -18,9 +29,15 @@ callable = py.builtin.callable # used to work around a python2 exception info leak exc_clear = getattr(sys, 'exc_clear', lambda: None) +# The type of re.compile objects is not exposed in Python. +REGEX_TYPE = type(re.compile('')) + +def filter_traceback(entry): + return entry.path != cutdir1 and not entry.path.relto(cutdir2) + def get_real_func(obj): - """gets the real function object of the (possibly) wrapped object by + """ gets the real function object of the (possibly) wrapped object by functools.wraps or functools.partial. """ while hasattr(obj, "__wrapped__"): @@ -47,6 +64,17 @@ def getimfunc(func): except AttributeError: return func +def safe_getattr(object, name, default): + """ Like getattr but return default upon any Exception. + + Attribute access can potentially fail for 'evil' Python objects. + See issue214 + """ + try: + return getattr(object, name, default) + except Exception: + return default + class FixtureFunctionMarker: def __init__(self, scope, params, @@ -152,10 +180,13 @@ def pytest_cmdline_main(config): def pytest_generate_tests(metafunc): - # this misspelling is common - raise a specific error to alert the user - if hasattr(metafunc.function, 'parameterize'): - msg = "{0} has 'parameterize', spelling should be 'parametrize'" - raise MarkerError(msg.format(metafunc.function.__name__)) + # those alternative spellings are common - raise a specific error to alert + # the user + alt_spellings = ['parameterize', 'parametrise', 'parameterise'] + for attr in alt_spellings: + if hasattr(metafunc.function, attr): + msg = "{0} has '{1}', spelling should be 'parametrize'" + raise MarkerError(msg.format(metafunc.function.__name__, attr)) try: markers = metafunc.function.parametrize except AttributeError: @@ -182,7 +213,7 @@ def pytest_configure(config): def pytest_sessionstart(session): session._fixturemanager = FixtureManager(session) -@pytest.mark.trylast +@pytest.hookimpl(trylast=True) def pytest_namespace(): raises.Exception = pytest.fail.Exception return { @@ -201,7 +232,7 @@ def pytestconfig(request): return request.config -@pytest.mark.trylast +@pytest.hookimpl(trylast=True) def pytest_pyfunc_call(pyfuncitem): testfunction = pyfuncitem.obj if pyfuncitem._isyieldedfunction(): @@ -229,7 +260,7 @@ def pytest_collect_file(path, parent): def pytest_pycollect_makemodule(path, parent): return Module(path, parent) -@pytest.mark.hookwrapper +@pytest.hookimpl(hookwrapper=True) def pytest_pycollect_makeitem(collector, name, obj): outcome = yield res = outcome.get_result() @@ -237,11 +268,10 @@ def pytest_pycollect_makeitem(collector, name, obj): raise StopIteration # nothing was collected elsewhere, let's do it here if isclass(obj): - if collector.classnamefilter(name): + if collector.istestclass(obj, name): Class = collector._getcustomclass("Class") outcome.force_result(Class(name, parent=collector)) - elif collector.funcnamefilter(name) and hasattr(obj, "__call__") and\ - getfixturemarker(obj) is None: + elif collector.istestfunction(obj, name): # mock seems to store unbound methods (issue473), normalize it obj = getattr(obj, "__func__", obj) if not isfunction(obj): @@ -327,9 +357,24 @@ class PyCollector(PyobjMixin, pytest.Collector): def funcnamefilter(self, name): return self._matches_prefix_or_glob_option('python_functions', name) + def isnosetest(self, obj): + """ Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator + """ + return safe_getattr(obj, '__test__', False) + def classnamefilter(self, name): return self._matches_prefix_or_glob_option('python_classes', name) + def istestfunction(self, obj, name): + return ( + (self.funcnamefilter(name) or self.isnosetest(obj)) + and safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None + ) + + def istestclass(self, obj, name): + return self.classnamefilter(name) or self.isnosetest(obj) + def _matches_prefix_or_glob_option(self, option_name, name): """ checks if the given name matches the prefix or glob-pattern defined @@ -385,13 +430,16 @@ def _genfunctions(self, name, funcobj): fixtureinfo = fm.getfixtureinfo(self, funcobj, cls) metafunc = Metafunc(funcobj, fixtureinfo, self.config, cls=cls, module=module) - try: - methods = [module.pytest_generate_tests] - except AttributeError: - methods = [] + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) if hasattr(cls, "pytest_generate_tests"): methods.append(cls().pytest_generate_tests) - self.ihook.pytest_generate_tests.callextra(methods, metafunc=metafunc) + if methods: + self.ihook.pytest_generate_tests.call_extra(methods, + dict(metafunc=metafunc)) + else: + self.ihook.pytest_generate_tests(metafunc=metafunc) Function = self._getcustomclass("Function") if not metafunc._calls: @@ -469,6 +517,19 @@ def __init__(self, argnames, names_closure, name2fixturedefs): self.names_closure = names_closure self.name2fixturedefs = name2fixturedefs + +def _marked(func, mark): + """ Returns True if :func: is already marked with :mark:, False otherwise. + This can happen if marker is applied to class and the test file is + invoked more than once. + """ + try: + func_mark = getattr(func, mark.name) + except AttributeError: + return False + return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs + + def transfer_markers(funcobj, cls, mod): # XXX this should rather be code in the mark plugin or the mark # plugin should merge with the python plugin. @@ -479,9 +540,11 @@ def transfer_markers(funcobj, cls, mod): continue if isinstance(pytestmark, list): for mark in pytestmark: - mark(funcobj) + if not _marked(funcobj, mark): + mark(funcobj) else: - pytestmark(funcobj) + if not _marked(funcobj, pytestmark): + pytestmark(funcobj) class Module(pytest.File, PyCollector): """ Collector for test classes and functions. """ @@ -495,7 +558,7 @@ def collect(self): def _importtestmodule(self): # we assume we are only called once per module try: - mod = self.fspath.pyimport(ensuresyspath=True) + mod = self.fspath.pyimport(ensuresyspath="append") except SyntaxError: raise self.CollectError( py.code.ExceptionInfo().getrepr(style="short")) @@ -611,7 +674,11 @@ def _prunetraceback(self, excinfo): if ntraceback == traceback: ntraceback = ntraceback.cut(path=path) if ntraceback == traceback: - ntraceback = ntraceback.cut(excludepath=cutdir) + #ntraceback = ntraceback.cut(excludepath=cutdir2) + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + excinfo.traceback = ntraceback.filter() # issue364: mark all but first and last frames to # only show a single-line message for each frame @@ -746,11 +813,12 @@ def getparam(self, name): def id(self): return "-".join(map(str, filter(None, self._idlist))) - def setmulti(self, valtype, argnames, valset, id, keywords, scopenum, + def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum, param_index): for arg,val in zip(argnames, valset): self._checkargnotcontained(arg) - getattr(self, valtype)[arg] = val + valtype_for_arg = valtypes[arg] + getattr(self, valtype_for_arg)[arg] = val self.indices[arg] = param_index self._arg2scopenum[arg] = scopenum if val is _notexists: @@ -781,6 +849,27 @@ def funcargnames(self): return self.fixturenames class Metafunc(FuncargnamesCompatAttr): + """ + Metafunc objects are passed to the ``pytest_generate_tests`` hook. + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + + :ivar fixturenames: set of fixture names required by the test function + + :ivar function: underlying python test function + + :ivar cls: class object where the test function is defined in or ``None``. + + :ivar module: the module object where the test function is defined in. + + :ivar config: access to the :class:`_pytest.config.Config` object for the + test session. + + :ivar funcargnames: + .. deprecated:: 2.3 + Use ``fixturenames`` instead. + """ def __init__(self, function, fixtureinfo, config, cls=None, module=None): self.config = config self.module = module @@ -796,7 +885,7 @@ def parametrize(self, argnames, argvalues, indirect=False, ids=None, """ Add new invocations to the underlying test function using the list of argvalues for the given argnames. Parametrization is performed during the collection phase. If you need to setup expensive resources - see about setting indirect=True to do it rather at test setup time. + see about setting indirect to do it rather at test setup time. :arg argnames: a comma-separated string denoting one or more argument names, or a list/tuple of argument strings. @@ -808,7 +897,9 @@ def parametrize(self, argnames, argvalues, indirect=False, ids=None, where each tuple-element specifies a value for its respective argname. - :arg indirect: if True each argvalue corresponding to an argname will + :arg indirect: The list of argnames or boolean. A list of arguments' + names (subset of argnames). If True the list contains all names from + the argnames. Each argvalue corresponding to an argname in this list will be passed as request.param to its respective argname fixture function so that it can perform more expensive setups during the setup phase of a test rather than at collection time. @@ -853,13 +944,22 @@ def parametrize(self, argnames, argvalues, indirect=False, ids=None, if scope is None: scope = "function" scopenum = scopes.index(scope) - if not indirect: - #XXX should we also check for the opposite case? - for arg in argnames: - if arg not in self.fixturenames: - raise ValueError("%r uses no fixture %r" %( + valtypes = {} + for arg in argnames: + if arg not in self.fixturenames: + raise ValueError("%r uses no fixture %r" %(self.function, arg)) + + if indirect is True: + valtypes = dict.fromkeys(argnames, "params") + elif indirect is False: + valtypes = dict.fromkeys(argnames, "funcargs") + elif isinstance(indirect, (tuple, list)): + valtypes = dict.fromkeys(argnames, "funcargs") + for arg in indirect: + if arg not in argnames: + raise ValueError("indirect given to %r: fixture %r doesn't exist" %( self.function, arg)) - valtype = indirect and "params" or "funcargs" + valtypes[arg] = "params" idfn = None if callable(ids): idfn = ids @@ -874,7 +974,7 @@ def parametrize(self, argnames, argvalues, indirect=False, ids=None, for param_index, valset in enumerate(argvalues): assert len(valset) == len(argnames) newcallspec = callspec.copy(self) - newcallspec.setmulti(valtype, argnames, valset, ids[param_index], + newcallspec.setmulti(valtypes, argnames, valset, ids[param_index], newkeywords.get(param_index, {}), scopenum, param_index) newcalls.append(newcallspec) @@ -925,8 +1025,15 @@ def _idval(val, argname, idx, idfn): return s except Exception: pass + if isinstance(val, (float, int, str, bool, NoneType)): return str(val) + elif isinstance(val, REGEX_TYPE): + return val.pattern + elif enum is not None and isinstance(val, enum.Enum): + return str(val) + elif isclass(val) and hasattr(val, '__name__'): + return val.__name__ return str(argname)+str(idx) def _idvalset(idx, valset, argnames, idfn): @@ -1001,8 +1108,8 @@ def getlocation(function, curdir): # builtin pytest.raises helper -def raises(ExpectedException, *args, **kwargs): - """ assert that a code block/function call raises @ExpectedException +def raises(expected_exception, *args, **kwargs): + """ assert that a code block/function call raises @expected_exception and raise a failure exception otherwise. This helper produces a ``py.code.ExceptionInfo()`` object. @@ -1050,23 +1157,23 @@ def raises(ExpectedException, *args, **kwargs): """ __tracebackhide__ = True - if ExpectedException is AssertionError: + if expected_exception is AssertionError: # we want to catch a AssertionError # replace our subclass with the builtin one - # see https://bitbucket.org/pytest-dev/pytest/issue/176/pytestraises + # see https://github.com/pytest-dev/pytest/issues/176 from _pytest.assertion.util import BuiltinAssertionError \ - as ExpectedException + as expected_exception msg = ("exceptions must be old-style classes or" " derived from BaseException, not %s") - if isinstance(ExpectedException, tuple): - for exc in ExpectedException: - if not inspect.isclass(exc): + if isinstance(expected_exception, tuple): + for exc in expected_exception: + if not isclass(exc): raise TypeError(msg % type(exc)) - elif not inspect.isclass(ExpectedException): - raise TypeError(msg % type(ExpectedException)) + elif not isclass(expected_exception): + raise TypeError(msg % type(expected_exception)) if not args: - return RaisesContext(ExpectedException) + return RaisesContext(expected_exception) elif isinstance(args[0], str): code, = args assert isinstance(code, str) @@ -1079,19 +1186,19 @@ def raises(ExpectedException, *args, **kwargs): py.builtin.exec_(code, frame.f_globals, loc) # XXX didn'T mean f_globals == f_locals something special? # this is destroyed here ... - except ExpectedException: + except expected_exception: return py.code.ExceptionInfo() else: func = args[0] try: func(*args[1:], **kwargs) - except ExpectedException: + except expected_exception: return py.code.ExceptionInfo() pytest.fail("DID NOT RAISE") class RaisesContext(object): - def __init__(self, ExpectedException): - self.ExpectedException = ExpectedException + def __init__(self, expected_exception): + self.expected_exception = expected_exception self.excinfo = None def __enter__(self): @@ -1110,7 +1217,7 @@ def __exit__(self, *tp): exc_type, value, traceback = tp tp = exc_type, exc_type(value), traceback self.excinfo.__init__(tp) - return issubclass(self.excinfo.type, self.ExpectedException) + return issubclass(self.excinfo.type, self.expected_exception) # # the basic pytest Function item @@ -1309,7 +1416,7 @@ def session(self): return self._pyfuncitem.session def addfinalizer(self, finalizer): - """add finalizer/teardown function to be called after the + """ add finalizer/teardown function to be called after the last test within the requesting test context finished execution. """ # XXX usually this method is shadowed by fixturedef specific ones @@ -1631,7 +1738,6 @@ def __init__(self, session): self.session = session self.config = session.config self._arg2fixturedefs = {} - self._seenplugins = set() self._holderobjseen = set() self._arg2finish = {} self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] @@ -1656,11 +1762,7 @@ def getfixtureinfo(self, node, func, cls, funcargs=True): node) return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs) - ### XXX this hook should be called for historic events like pytest_configure - ### so that we don't have to do the below pytest_configure hook def pytest_plugin_registered(self, plugin): - if plugin in self._seenplugins: - return nodeid = None try: p = py.path.local(plugin.__file__) @@ -1675,13 +1777,6 @@ def pytest_plugin_registered(self, plugin): if p.sep != "/": nodeid = nodeid.replace(p.sep, "/") self.parsefactories(plugin, nodeid) - self._seenplugins.add(plugin) - - @pytest.mark.tryfirst - def pytest_configure(self, config): - plugins = config.pluginmanager.getplugins() - for plugin in plugins: - self.pytest_plugin_registered(plugin) def _getautousenames(self, nodeid): """ return a tuple of fixture names to be used. """ @@ -1735,7 +1830,10 @@ def pytest_generate_tests(self, metafunc): if fixturedef.params is not None: func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]]) # skip directly parametrized arguments - if argname not in func_params and argname not in func_params[0]: + argnames = func_params[0] + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + if argname not in func_params and argname not in argnames: metafunc.parametrize(argname, fixturedef.params, indirect=True, scope=fixturedef.scope, ids=fixturedef.ids) diff --git a/_pytest/recwarn.py b/_pytest/recwarn.py index 875cb510e75..abefdfac129 100644 --- a/_pytest/recwarn.py +++ b/_pytest/recwarn.py @@ -1,10 +1,14 @@ """ recording warnings during test function execution. """ +import inspect +import py import sys import warnings +import pytest -def pytest_funcarg__recwarn(request): +@pytest.yield_fixture +def recwarn(request): """Return a WarningsRecorder instance that provides these methods: * ``pop(category=None)``: return last warning matching the category. @@ -13,83 +17,173 @@ def pytest_funcarg__recwarn(request): See http://docs.python.org/library/warnings.html for information on warning categories. """ - if sys.version_info >= (2,7): - oldfilters = warnings.filters[:] - warnings.simplefilter('default') - def reset_filters(): - warnings.filters[:] = oldfilters - request.addfinalizer(reset_filters) wrec = WarningsRecorder() - request.addfinalizer(wrec.finalize) - return wrec + with wrec: + warnings.simplefilter('default') + yield wrec + def pytest_namespace(): - return {'deprecated_call': deprecated_call} + return {'deprecated_call': deprecated_call, + 'warns': warns} + def deprecated_call(func, *args, **kwargs): - """ assert that calling ``func(*args, **kwargs)`` - triggers a DeprecationWarning. + """Assert that ``func(*args, **kwargs)`` triggers a DeprecationWarning. """ - l = [] - oldwarn_explicit = getattr(warnings, 'warn_explicit') - def warn_explicit(*args, **kwargs): - l.append(args) - oldwarn_explicit(*args, **kwargs) - oldwarn = getattr(warnings, 'warn') - def warn(*args, **kwargs): - l.append(args) - oldwarn(*args, **kwargs) - - warnings.warn_explicit = warn_explicit - warnings.warn = warn - try: + wrec = WarningsRecorder() + with wrec: + warnings.simplefilter('always') # ensure all warnings are triggered ret = func(*args, **kwargs) - finally: - warnings.warn_explicit = oldwarn_explicit - warnings.warn = oldwarn - if not l: + + if not any(r.category is DeprecationWarning for r in wrec): __tracebackhide__ = True - raise AssertionError("%r did not produce DeprecationWarning" %(func,)) + raise AssertionError("%r did not produce DeprecationWarning" % (func,)) + return ret -class RecordedWarning: - def __init__(self, message, category, filename, lineno, line): +def warns(expected_warning, *args, **kwargs): + """Assert that code raises a particular class of warning. + + Specifically, the input @expected_warning can be a warning class or + tuple of warning classes, and the code must return that warning + (if a single class) or one of those warnings (if a tuple). + + This helper produces a list of ``warnings.WarningMessage`` objects, + one for each warning raised. + + This function can be used as a context manager, or any of the other ways + ``pytest.raises`` can be used:: + + >>> with warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + """ + wcheck = WarningsChecker(expected_warning) + if not args: + return wcheck + elif isinstance(args[0], str): + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + + with wcheck: + code = py.code.Source(code).compile() + py.builtin.exec_(code, frame.f_globals, loc) + else: + func = args[0] + with wcheck: + return func(*args[1:], **kwargs) + + +class RecordedWarning(object): + def __init__(self, message, category, filename, lineno, file, line): self.message = message self.category = category self.filename = filename self.lineno = lineno + self.file = file self.line = line -class WarningsRecorder: - def __init__(self): - self.list = [] - def showwarning(message, category, filename, lineno, line=0): - self.list.append(RecordedWarning( - message, category, filename, lineno, line)) - try: - self.old_showwarning(message, category, - filename, lineno, line=line) - except TypeError: - # < python2.6 - self.old_showwarning(message, category, filename, lineno) - self.old_showwarning = warnings.showwarning - warnings.showwarning = showwarning + +class WarningsRecorder(object): + """A context manager to record raised warnings. + + Adapted from `warnings.catch_warnings`. + """ + + def __init__(self, module=None): + self._module = sys.modules['warnings'] if module is None else module + self._entered = False + self._list = [] + + @property + def list(self): + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i): + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self): + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self): + """The number of recorded warnings.""" + return len(self._list) def pop(self, cls=Warning): - """ pop the first recorded warning, raise exception if not exists.""" - for i, w in enumerate(self.list): + """Pop the first recorded warning, raise exception if not exists.""" + for i, w in enumerate(self._list): if issubclass(w.category, cls): - return self.list.pop(i) + return self._list.pop(i) __tracebackhide__ = True - assert 0, "%r not found in %r" %(cls, self.list) - - #def resetregistry(self): - # warnings.onceregistry.clear() - # warnings.__warningregistry__.clear() + raise AssertionError("%r not found in warning list" % cls) def clear(self): - self.list[:] = [] + """Clear the list of recorded warnings.""" + self._list[:] = [] + + def __enter__(self): + if self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot enter %r twice" % self) + self._entered = True + self._filters = self._module.filters + self._module.filters = self._filters[:] + self._showwarning = self._module.showwarning + + def showwarning(message, category, filename, lineno, + file=None, line=None): + self._list.append(RecordedWarning( + message, category, filename, lineno, file, line)) + + # still perform old showwarning functionality + self._showwarning( + message, category, filename, lineno, file=file, line=line) + + self._module.showwarning = showwarning + + # allow the same warning to be raised more than once + self._module.simplefilter('always', append=True) + + return self + + def __exit__(self, *exc_info): + if not self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot exit %r without entering first" % self) + self._module.filters = self._filters + self._module.showwarning = self._showwarning + + +class WarningsChecker(WarningsRecorder): + def __init__(self, expected_warning=None, module=None): + super(WarningsChecker, self).__init__(module=module) + + msg = ("exceptions must be old-style classes or " + "derived from Warning, not %s") + if isinstance(expected_warning, tuple): + for exc in expected_warning: + if not inspect.isclass(exc): + raise TypeError(msg % type(exc)) + elif inspect.isclass(expected_warning): + expected_warning = (expected_warning,) + elif expected_warning is not None: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning + + def __exit__(self, *exc_info): + super(WarningsChecker, self).__exit__(*exc_info) - def finalize(self): - warnings.showwarning = self.old_showwarning + # only check if we're not currently handling an exception + if all(a is None for a in exc_info): + if self.expected_warning is not None: + if not any(r.category in self.expected_warning for r in self): + __tracebackhide__ = True + pytest.fail("DID NOT WARN") diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 2f931d87967..36e54d7d8ae 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -145,7 +145,7 @@ def getexplanation(self): return expl -@pytest.mark.tryfirst +@pytest.hookimpl(tryfirst=True) def pytest_runtest_setup(item): evalskip = MarkEvaluator(item, 'skipif') if evalskip.istrue(): @@ -164,7 +164,7 @@ def check_xfail_no_run(item): if not evalxfail.get('run', True): pytest.xfail("[NOTRUN] " + evalxfail.getexplanation()) -@pytest.mark.hookwrapper +@pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() diff --git a/_pytest/standalonetemplate.py b/_pytest/standalonetemplate.py index 46d5e41f41a..484d5d1b25f 100755 --- a/_pytest/standalonetemplate.py +++ b/_pytest/standalonetemplate.py @@ -68,6 +68,11 @@ def get_source(self, name): return res if __name__ == "__main__": + try: + import pkg_resources # noqa + except ImportError: + sys.stderr.write("ERROR: setuptools not installed\n") + sys.exit(2) if sys.version_info >= (3, 0): exec("def do_exec(co, loc): exec(co, loc)\n") import pickle @@ -80,6 +85,5 @@ def get_source(self, name): importer = DictImporter(sources) sys.meta_path.insert(0, importer) - entry = "@ENTRY@" do_exec(entry, locals()) # noqa diff --git a/_pytest/terminal.py b/_pytest/terminal.py index deff75b1c51..b69cc329fa6 100644 --- a/_pytest/terminal.py +++ b/_pytest/terminal.py @@ -2,10 +2,14 @@ This is a good source for looking at the various reporting hooks. """ +from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \ + EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED import pytest +import pluggy import py import sys import time +import platform def pytest_addoption(parser): @@ -17,7 +21,7 @@ def pytest_addoption(parser): group._addoption('-r', action="store", dest="reportchars", default=None, metavar="chars", help="show extra test summary info as specified by chars (f)ailed, " - "(E)error, (s)skipped, (x)failed, (X)passed (w)warnings.") + "(E)error, (s)skipped, (x)failed, (X)passed (w)warnings (a)all.") group._addoption('-l', '--showlocals', action="store_true", dest="showlocals", default=False, help="show locals in tracebacks (disabled by default).") @@ -62,8 +66,10 @@ def getreportopt(config): reportchars = config.option.reportchars if reportchars: for char in reportchars: - if char not in reportopts: + if char not in reportopts and char != 'a': reportopts += char + elif char == 'a': + reportopts = 'fEsxXw' return reportopts def pytest_report_teststatus(report): @@ -162,6 +168,8 @@ def pytest_internalerror(self, excrepr): def pytest_logwarning(self, code, fslocation, message, nodeid): warnings = self.stats.setdefault("warnings", []) + if isinstance(fslocation, tuple): + fslocation = "%s:%d" % fslocation warning = WarningReport(code=code, fslocation=fslocation, message=message, nodeid=nodeid) warnings.append(warning) @@ -263,18 +271,19 @@ def report_collect(self, final=False): def pytest_collection_modifyitems(self): self.report_collect(True) - @pytest.mark.trylast + @pytest.hookimpl(trylast=True) def pytest_sessionstart(self, session): self._sessionstarttime = time.time() if not self.showheader: return self.write_sep("=", "test session starts", bold=True) - verinfo = ".".join(map(str, sys.version_info[:3])) + verinfo = platform.python_version() msg = "platform %s -- Python %s" % (sys.platform, verinfo) if hasattr(sys, 'pypy_version_info'): verinfo = ".".join(map(str, sys.pypy_version_info[:3])) msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3]) - msg += " -- py-%s -- pytest-%s" % (py.__version__, pytest.__version__) + msg += ", pytest-%s, py-%s, pluggy-%s" % ( + pytest.__version__, py.__version__, pluggy.__version__) if self.verbosity > 0 or self.config.option.debug or \ getattr(self.config.option, 'pastebin', None): msg += " -- " + str(sys.executable) @@ -290,15 +299,12 @@ def pytest_report_header(self, config): if config.inifile: inifile = config.rootdir.bestrelpath(config.inifile) lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)] - plugininfo = config.pluginmanager._plugin_distinfo + + plugininfo = config.pluginmanager.list_plugin_distinfo() if plugininfo: - l = [] - for dist, plugin in plugininfo: - name = dist.project_name - if name.startswith("pytest-"): - name = name[7:] - l.append(name) - lines.append("plugins: %s" % ", ".join(l)) + + lines.append( + "plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) return lines def pytest_collection_finish(self, session): @@ -348,17 +354,20 @@ def _printcollecteditems(self, items): indent = (len(stack) - 1) * " " self._tw.line("%s%s" % (indent, col)) - @pytest.mark.hookwrapper + @pytest.hookimpl(hookwrapper=True) def pytest_sessionfinish(self, exitstatus): outcome = yield outcome.get_result() self._tw.line("") - if exitstatus in (0, 1, 2, 4): + summary_exit_codes = ( + EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR, + EXIT_NOTESTSCOLLECTED) + if exitstatus in summary_exit_codes: self.summary_errors() self.summary_failures() self.summary_warnings() self.config.hook.pytest_terminal_summary(terminalreporter=self) - if exitstatus == 2: + if exitstatus == EXIT_INTERRUPTED: self._report_keyboardinterrupt() del self._keyboardinterrupt_memo self.summary_deselected() @@ -431,7 +440,7 @@ def summary_warnings(self): warnings = self.stats.get("warnings") if not warnings: return - self.write_sep("=", "warning summary") + self.write_sep("=", "pytest-warning summary") for w in warnings: self._tw.line("W%s %s %s" % (w.code, w.fslocation, w.message)) @@ -479,26 +488,9 @@ def _outrep_summary(self, rep): def summary_stats(self): session_duration = time.time() - self._sessionstarttime - - keys = ("failed passed skipped deselected " - "xfailed xpassed warnings").split() - for key in self.stats.keys(): - if key not in keys: - keys.append(key) - parts = [] - for key in keys: - if key: # setup/teardown reports have an empty key, ignore them - val = self.stats.get(key, None) - if val: - parts.append("%d %s" % (len(val), key)) - line = ", ".join(parts) + (line, color) = build_summary_stats_line(self.stats) msg = "%s in %.2f seconds" % (line, session_duration) - - markup = {'bold': True} - if 'failed' in self.stats or 'error' in self.stats: - markup = {'red': True, 'bold': True} - else: - markup = {'green': True, 'bold': True} + markup = {color: True, 'bold': True} if self.verbosity >= 0: self.write_sep("=", msg, **markup) @@ -534,3 +526,46 @@ def flatten(l): else: yield x +def build_summary_stats_line(stats): + keys = ("failed passed skipped deselected " + "xfailed xpassed warnings error").split() + key_translation = {'warnings': 'pytest-warnings'} + unknown_key_seen = False + for key in stats.keys(): + if key not in keys: + if key: # setup/teardown reports have an empty key, ignore them + keys.append(key) + unknown_key_seen = True + parts = [] + for key in keys: + val = stats.get(key, None) + if val: + key_name = key_translation.get(key, key) + parts.append("%d %s" % (len(val), key_name)) + line = ", ".join(parts) + + if 'failed' in stats or 'error' in stats: + color = 'red' + elif 'warnings' in stats or unknown_key_seen: + color = 'yellow' + elif 'passed' in stats: + color = 'green' + else: + color = 'yellow' + + return (line, color) + + +def _plugin_nameversions(plugininfo): + l = [] + for plugin, dist in plugininfo: + # gets us name and version! + name = '{dist.project_name}-{dist.version}'.format(dist=dist) + # questionable convenience, but it keeps things short + if name.startswith("pytest-"): + name = name[7:] + # we decided to print python package names + # they can have more than one plugin + if name not in l: + l.append(name) + return l diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py index 5e83ec9310e..44e980e2e26 100644 --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -6,7 +6,12 @@ from _pytest.monkeypatch import monkeypatch -class TempdirHandler: +class TempdirFactory: + """Factory for temporary directories under the common base temp directory. + + The base directory can be configured using the ``--basetemp`` option. + """ + def __init__(self, config): self.config = config self.trace = config.trace.get("tmpdir") @@ -22,6 +27,10 @@ def ensuretemp(self, string, dir=1): return self.getbasetemp().ensure(string, dir=dir) def mktemp(self, basename, numbered=True): + """Create a subdirectory of the base temporary directory and return it. + If ``numbered``, ensure the directory is unique by adding a number + prefix greater than any existing one. + """ basetemp = self.getbasetemp() if not numbered: p = basetemp.mkdir(basename) @@ -47,7 +56,7 @@ def getbasetemp(self): # make_numbered_dir() call import getpass temproot = py.path.local.get_temproot() - rootdir = temproot.join('pytest-%s' % getpass.getuser()) + rootdir = temproot.join('pytest-of-%s' % getpass.getuser()) rootdir.ensure(dir=1) basetemp = py.path.local.make_numbered_dir(prefix='pytest-', rootdir=rootdir) @@ -58,15 +67,33 @@ def getbasetemp(self): def finish(self): self.trace("finish") +# backward compatibility +TempdirHandler = TempdirFactory + + def pytest_configure(config): + """Create a TempdirFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmpdir_factory session fixture. + """ mp = monkeypatch() - t = TempdirHandler(config) + t = TempdirFactory(config) config._cleanup.extend([mp.undo, t.finish]) mp.setattr(config, '_tmpdirhandler', t, raising=False) mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False) + +@pytest.fixture(scope='session') +def tmpdir_factory(request): + """Return a TempdirFactory instance for the test session. + """ + return request.config._tmpdirhandler + + @pytest.fixture -def tmpdir(request): +def tmpdir(request, tmpdir_factory): """return a temporary directory path object which is unique to each test function invocation, created as a sub directory of the base temporary @@ -78,5 +105,5 @@ def tmpdir(request): MAXVAL = 30 if len(name) > MAXVAL: name = name[:MAXVAL] - x = request.config._tmpdirhandler.mktemp(name, numbered=True) + x = tmpdir_factory.mktemp(name, numbered=True) return x diff --git a/_pytest/unittest.py b/_pytest/unittest.py index 0666fba8776..3b08c9f90bf 100644 --- a/_pytest/unittest.py +++ b/_pytest/unittest.py @@ -143,7 +143,7 @@ def _prunetraceback(self, excinfo): if traceback: excinfo.traceback = traceback -@pytest.mark.tryfirst +@pytest.hookimpl(tryfirst=True) def pytest_runtest_makereport(item, call): if isinstance(item, TestCaseFunction): if item._excinfo: @@ -155,7 +155,7 @@ def pytest_runtest_makereport(item, call): # twisted trial support -@pytest.mark.hookwrapper +@pytest.hookimpl(hookwrapper=True) def pytest_runtest_protocol(item): if isinstance(item, TestCaseFunction) and \ 'twisted.trial.unittest' in sys.modules: diff --git a/doc/en/Makefile b/doc/en/Makefile index ce62f0ce6a8..e54b6b09a6a 100644 --- a/doc/en/Makefile +++ b/doc/en/Makefile @@ -46,7 +46,7 @@ installall: clean install installpdf @echo "done" regen: - PYTHONDONTWRITEBYTECODE=1 COLUMNS=76 regendoc --update *.txt */*.txt + PYTHONDONTWRITEBYTECODE=1 COLUMNS=76 regendoc --update *.rst */*.rst html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html diff --git a/doc/en/_getdoctarget.py b/doc/en/_getdoctarget.py index 70427f7452e..20e487bb738 100755 --- a/doc/en/_getdoctarget.py +++ b/doc/en/_getdoctarget.py @@ -6,7 +6,7 @@ def get_version_string(): fn = py.path.local(__file__).join("..", "..", "..", "_pytest", "__init__.py") for line in fn.readlines(): - if "version" in line: + if "version" in line and not line.strip().startswith('#'): return eval(line.split("=")[-1]) def get_minor_version_string(): diff --git a/doc/en/adopt.txt b/doc/en/adopt.rst similarity index 97% rename from doc/en/adopt.txt rename to doc/en/adopt.rst index 868f16b5d65..aead96e7f3d 100644 --- a/doc/en/adopt.txt +++ b/doc/en/adopt.rst @@ -9,7 +9,7 @@ We will pair experienced pytest users with open source projects, for a month's e In 2015 we are trying this for the first time. In February and March 2015 we will gather volunteers on both sides, in April we will do the work, and in May we will evaluate how it went. This effort is being coordinated by Brianna Laugher. If you have any questions or comments, you can raise them on the `@pytestdotorg twitter account `_ the `issue tracker`_ or the `pytest-dev mailing list`_. -.. _`issue tracker`: https://bitbucket.org/pytest-dev/pytest/issue/676/adopt-pytest-month-2015 +.. _`issue tracker`: https://github.com/pytest-dev/pytest/issues/676 .. _`pytest-dev mailing list`: https://mail.python.org/mailman/listinfo/pytest-dev diff --git a/doc/en/announce/index.txt b/doc/en/announce/index.rst similarity index 97% rename from doc/en/announce/index.txt rename to doc/en/announce/index.rst index 884308a7f32..b5c0228d7c2 100644 --- a/doc/en/announce/index.txt +++ b/doc/en/announce/index.rst @@ -5,6 +5,7 @@ Release announcements .. toctree:: :maxdepth: 2 + release-2.7.2 release-2.7.1 release-2.7.0 release-2.6.3 diff --git a/doc/en/announce/release-2.0.0.txt b/doc/en/announce/release-2.0.0.rst similarity index 100% rename from doc/en/announce/release-2.0.0.txt rename to doc/en/announce/release-2.0.0.rst diff --git a/doc/en/announce/release-2.0.1.txt b/doc/en/announce/release-2.0.1.rst similarity index 100% rename from doc/en/announce/release-2.0.1.txt rename to doc/en/announce/release-2.0.1.rst diff --git a/doc/en/announce/release-2.0.2.txt b/doc/en/announce/release-2.0.2.rst similarity index 100% rename from doc/en/announce/release-2.0.2.txt rename to doc/en/announce/release-2.0.2.rst diff --git a/doc/en/announce/release-2.0.3.txt b/doc/en/announce/release-2.0.3.rst similarity index 100% rename from doc/en/announce/release-2.0.3.txt rename to doc/en/announce/release-2.0.3.rst diff --git a/doc/en/announce/release-2.1.0.txt b/doc/en/announce/release-2.1.0.rst similarity index 100% rename from doc/en/announce/release-2.1.0.txt rename to doc/en/announce/release-2.1.0.rst diff --git a/doc/en/announce/release-2.1.1.txt b/doc/en/announce/release-2.1.1.rst similarity index 100% rename from doc/en/announce/release-2.1.1.txt rename to doc/en/announce/release-2.1.1.rst diff --git a/doc/en/announce/release-2.1.2.txt b/doc/en/announce/release-2.1.2.rst similarity index 100% rename from doc/en/announce/release-2.1.2.txt rename to doc/en/announce/release-2.1.2.rst diff --git a/doc/en/announce/release-2.1.3.txt b/doc/en/announce/release-2.1.3.rst similarity index 100% rename from doc/en/announce/release-2.1.3.txt rename to doc/en/announce/release-2.1.3.rst diff --git a/doc/en/announce/release-2.2.0.txt b/doc/en/announce/release-2.2.0.rst similarity index 100% rename from doc/en/announce/release-2.2.0.txt rename to doc/en/announce/release-2.2.0.rst diff --git a/doc/en/announce/release-2.2.1.txt b/doc/en/announce/release-2.2.1.rst similarity index 100% rename from doc/en/announce/release-2.2.1.txt rename to doc/en/announce/release-2.2.1.rst diff --git a/doc/en/announce/release-2.2.2.txt b/doc/en/announce/release-2.2.2.rst similarity index 100% rename from doc/en/announce/release-2.2.2.txt rename to doc/en/announce/release-2.2.2.rst diff --git a/doc/en/announce/release-2.2.4.txt b/doc/en/announce/release-2.2.4.rst similarity index 100% rename from doc/en/announce/release-2.2.4.txt rename to doc/en/announce/release-2.2.4.rst diff --git a/doc/en/announce/release-2.3.0.txt b/doc/en/announce/release-2.3.0.rst similarity index 100% rename from doc/en/announce/release-2.3.0.txt rename to doc/en/announce/release-2.3.0.rst diff --git a/doc/en/announce/release-2.3.1.txt b/doc/en/announce/release-2.3.1.rst similarity index 100% rename from doc/en/announce/release-2.3.1.txt rename to doc/en/announce/release-2.3.1.rst diff --git a/doc/en/announce/release-2.3.2.txt b/doc/en/announce/release-2.3.2.rst similarity index 100% rename from doc/en/announce/release-2.3.2.txt rename to doc/en/announce/release-2.3.2.rst diff --git a/doc/en/announce/release-2.3.3.txt b/doc/en/announce/release-2.3.3.rst similarity index 100% rename from doc/en/announce/release-2.3.3.txt rename to doc/en/announce/release-2.3.3.rst diff --git a/doc/en/announce/release-2.3.4.txt b/doc/en/announce/release-2.3.4.rst similarity index 100% rename from doc/en/announce/release-2.3.4.txt rename to doc/en/announce/release-2.3.4.rst diff --git a/doc/en/announce/release-2.3.5.txt b/doc/en/announce/release-2.3.5.rst similarity index 100% rename from doc/en/announce/release-2.3.5.txt rename to doc/en/announce/release-2.3.5.rst diff --git a/doc/en/announce/release-2.4.0.txt b/doc/en/announce/release-2.4.0.rst similarity index 100% rename from doc/en/announce/release-2.4.0.txt rename to doc/en/announce/release-2.4.0.rst diff --git a/doc/en/announce/release-2.4.1.txt b/doc/en/announce/release-2.4.1.rst similarity index 100% rename from doc/en/announce/release-2.4.1.txt rename to doc/en/announce/release-2.4.1.rst diff --git a/doc/en/announce/release-2.4.2.txt b/doc/en/announce/release-2.4.2.rst similarity index 100% rename from doc/en/announce/release-2.4.2.txt rename to doc/en/announce/release-2.4.2.rst diff --git a/doc/en/announce/release-2.5.0.txt b/doc/en/announce/release-2.5.0.rst similarity index 100% rename from doc/en/announce/release-2.5.0.txt rename to doc/en/announce/release-2.5.0.rst diff --git a/doc/en/announce/release-2.5.1.txt b/doc/en/announce/release-2.5.1.rst similarity index 100% rename from doc/en/announce/release-2.5.1.txt rename to doc/en/announce/release-2.5.1.rst diff --git a/doc/en/announce/release-2.5.2.txt b/doc/en/announce/release-2.5.2.rst similarity index 100% rename from doc/en/announce/release-2.5.2.txt rename to doc/en/announce/release-2.5.2.rst diff --git a/doc/en/announce/release-2.6.0.txt b/doc/en/announce/release-2.6.0.rst similarity index 100% rename from doc/en/announce/release-2.6.0.txt rename to doc/en/announce/release-2.6.0.rst diff --git a/doc/en/announce/release-2.6.1.txt b/doc/en/announce/release-2.6.1.rst similarity index 100% rename from doc/en/announce/release-2.6.1.txt rename to doc/en/announce/release-2.6.1.rst diff --git a/doc/en/announce/release-2.6.2.txt b/doc/en/announce/release-2.6.2.rst similarity index 100% rename from doc/en/announce/release-2.6.2.txt rename to doc/en/announce/release-2.6.2.rst diff --git a/doc/en/announce/release-2.6.3.txt b/doc/en/announce/release-2.6.3.rst similarity index 100% rename from doc/en/announce/release-2.6.3.txt rename to doc/en/announce/release-2.6.3.rst diff --git a/doc/en/announce/release-2.7.0.txt b/doc/en/announce/release-2.7.0.rst similarity index 100% rename from doc/en/announce/release-2.7.0.txt rename to doc/en/announce/release-2.7.0.rst diff --git a/doc/en/announce/release-2.7.1.txt b/doc/en/announce/release-2.7.1.rst similarity index 100% rename from doc/en/announce/release-2.7.1.txt rename to doc/en/announce/release-2.7.1.rst diff --git a/doc/en/announce/release-2.7.2.txt b/doc/en/announce/release-2.7.2.rst similarity index 100% rename from doc/en/announce/release-2.7.2.txt rename to doc/en/announce/release-2.7.2.rst diff --git a/doc/en/apiref.rst b/doc/en/apiref.rst new file mode 100644 index 00000000000..6b9a6a5e357 --- /dev/null +++ b/doc/en/apiref.rst @@ -0,0 +1,27 @@ + +.. _apiref: + +pytest reference documentation +================================================ + +.. toctree:: + :maxdepth: 2 + + builtin + customize + assert + fixture + yieldfixture + parametrize + xunit_setup + capture + monkeypatch + xdist + tmpdir + mark + skipping + recwarn + unittest + nose + doctest + diff --git a/doc/en/apiref.txt b/doc/en/apiref.txt deleted file mode 100644 index 5cf1c0d9875..00000000000 --- a/doc/en/apiref.txt +++ /dev/null @@ -1,27 +0,0 @@ - -.. _apiref: - -pytest reference documentation -================================================ - -.. toctree:: - :maxdepth: 2 - - builtin.txt - customize.txt - assert.txt - fixture.txt - yieldfixture.txt - parametrize.txt - xunit_setup.txt - capture.txt - monkeypatch.txt - xdist.txt - tmpdir.txt - mark.txt - skipping.txt - recwarn.txt - unittest.txt - nose.txt - doctest.txt - diff --git a/doc/en/assert.txt b/doc/en/assert.rst similarity index 87% rename from doc/en/assert.txt rename to doc/en/assert.rst index 0c07de0c143..c0576ec5d3a 100644 --- a/doc/en/assert.txt +++ b/doc/en/assert.rst @@ -25,15 +25,15 @@ to assert that your function returns a certain value. If this assertion fails you will see the return value of the function call:: $ py.test test_assert1.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-87, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_assert1.py F - ================================= FAILURES ================================= - ______________________________ test_function _______________________________ + ======= FAILURES ======== + _______ test_function ________ def test_function(): > assert f() == 4 @@ -41,7 +41,7 @@ you will see the return value of the function call:: E + where 3 = f() test_assert1.py:5: AssertionError - ========================= 1 failed in 0.01 seconds ========================= + ======= 1 failed in 0.12 seconds ======== ``pytest`` has support for showing the values of the most common subexpressions including calls, attributes, comparisons, and binary and unary @@ -114,6 +114,16 @@ like documenting unfixed bugs (where the test describes what "should" happen) or bugs in dependencies. +.. _`assertwarns`: + +Assertions about expected warnings +----------------------------------------- + +.. versionadded:: 2.8 + +You can check that code raises a particular warning using +:ref:`pytest.warns `. + .. _newreport: @@ -135,15 +145,15 @@ when it encounters comparisons. For example:: if you run this module:: $ py.test test_assert2.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-87, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_assert2.py F - ================================= FAILURES ================================= - ___________________________ test_set_comparison ____________________________ + ======= FAILURES ======== + _______ test_set_comparison ________ def test_set_comparison(): set1 = set("1308") @@ -157,7 +167,7 @@ if you run this module:: E Use -v to get the full diff test_assert2.py:5: AssertionError - ========================= 1 failed in 0.01 seconds ========================= + ======= 1 failed in 0.12 seconds ======== Special comparisons are done for a number of cases: @@ -202,8 +212,8 @@ the conftest file:: $ py.test -q test_foocompare.py F - ================================= FAILURES ================================= - _______________________________ test_compare _______________________________ + ======= FAILURES ======== + _______ test_compare ________ def test_compare(): f1 = Foo(1) @@ -213,7 +223,7 @@ the conftest file:: E vals: 1 != 2 test_foocompare.py:8: AssertionError - 1 failed in 0.01 seconds + 1 failed in 0.12 seconds .. _assert-details: .. _`assert introspection`: @@ -228,9 +238,7 @@ Reporting details about a failing assertion is achieved either by rewriting assert statements before they are run or re-evaluating the assert expression and recording the intermediate values. Which technique is used depends on the location of the assert, ``pytest`` configuration, and Python version being used -to run ``pytest``. Note that for assert statements with a manually provided -message, i.e. ``assert expr, message``, no assertion introspection takes place -and the manually provided message will be rendered in tracebacks. +to run ``pytest``. By default, if the Python version is greater than or equal to 2.6, ``pytest`` rewrites assert statements in test modules. Rewritten assert statements put diff --git a/doc/en/bash-completion.txt b/doc/en/bash-completion.rst similarity index 100% rename from doc/en/bash-completion.txt rename to doc/en/bash-completion.rst diff --git a/doc/en/builtin.txt b/doc/en/builtin.rst similarity index 99% rename from doc/en/builtin.txt rename to doc/en/builtin.rst index d3cfc8b0c66..713e625db1e 100644 --- a/doc/en/builtin.txt +++ b/doc/en/builtin.rst @@ -115,4 +115,4 @@ You can ask for available builtin or project-custom directory. The returned object is a `py.path.local`_ path object. - in 0.00 seconds + in 0.12 seconds diff --git a/doc/en/capture.txt b/doc/en/capture.rst similarity index 85% rename from doc/en/capture.txt rename to doc/en/capture.rst index a8c6e6c7d4c..c21ebf54b37 100644 --- a/doc/en/capture.txt +++ b/doc/en/capture.rst @@ -63,31 +63,33 @@ and running this module will show you precisely the output of the failing function and hide the other one:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-90, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py .F - ================================= FAILURES ================================= - ________________________________ test_func2 ________________________________ + ======= FAILURES ======== + _______ test_func2 ________ def test_func2(): > assert False E assert False test_module.py:9: AssertionError - -------------------------- Captured stdout setup --------------------------- - setting up - ==================== 1 failed, 1 passed in 0.01 seconds ==================== + ---------------------------- Captured stdout setup ----------------------------- + setting up + ======= 1 failed, 1 passed in 0.12 seconds ======== Accessing captured output from a test function --------------------------------------------------- The ``capsys`` and ``capfd`` fixtures allow to access stdout/stderr output created during test execution. Here is an example test function -that performs some output related checks:: +that performs some output related checks: + +.. code-block:: python def test_myoutput(capsys): # or use "capfd" for fd-level print ("hello") diff --git a/doc/en/changelog.txt b/doc/en/changelog.rst similarity index 100% rename from doc/en/changelog.txt rename to doc/en/changelog.rst diff --git a/doc/en/conf.py b/doc/en/conf.py index 3df505434e4..aca0442c5d6 100644 --- a/doc/en/conf.py +++ b/doc/en/conf.py @@ -47,7 +47,7 @@ templates_path = ['_templates'] # The suffix of source filenames. -source_suffix = '.txt' +source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' @@ -73,13 +73,13 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['links.inc', '_build', 'naming20.txt', 'test/*', +exclude_patterns = ['links.inc', '_build', 'naming20.rst', 'test/*', "old_*", '*attic*', '*/attic*', - 'funcargs.txt', - 'setup.txt', - 'example/remoteinterp.txt', + 'funcargs.rst', + 'setup.rst', + 'example/remoteinterp.rst', ] diff --git a/doc/en/contact.txt b/doc/en/contact.rst similarity index 95% rename from doc/en/contact.txt rename to doc/en/contact.rst index 6799ae0f7c1..d4a1a03dee3 100644 --- a/doc/en/contact.txt +++ b/doc/en/contact.rst @@ -29,7 +29,7 @@ Contact channels - `merlinux.eu`_ offers pytest and tox-related professional teaching and consulting. -.. _`pytest issue tracker`: http://bitbucket.org/pytest-dev/pytest/issues/ +.. _`pytest issue tracker`: https://github.com/pytest-dev/pytest/issues .. _`old issue tracker`: http://bitbucket.org/hpk42/py-trunk/issues/ .. _`merlinux.eu`: http://merlinux.eu diff --git a/doc/en/contents.txt b/doc/en/contents.rst similarity index 90% rename from doc/en/contents.txt rename to doc/en/contents.rst index 4a85d9cc00f..dabfcbecb8e 100644 --- a/doc/en/contents.txt +++ b/doc/en/contents.rst @@ -17,11 +17,11 @@ Full pytest documentation example/index talks contributing - funcarg_compare.txt + funcarg_compare announce/index .. toctree:: :hidden: - changelog.txt + changelog diff --git a/doc/en/contributing.txt b/doc/en/contributing.rst similarity index 100% rename from doc/en/contributing.txt rename to doc/en/contributing.rst diff --git a/doc/en/customize.txt b/doc/en/customize.rst similarity index 78% rename from doc/en/customize.txt rename to doc/en/customize.rst index c2d4c261770..1c16556971a 100644 --- a/doc/en/customize.txt +++ b/doc/en/customize.rst @@ -89,7 +89,9 @@ How to change command line options defaults It can be tedious to type the same series of command line options every time you use ``pytest``. For example, if you always want to see detailed info on skipped and xfailed tests, as well as have terser "dot" -progress output, you can write it into a configuration file:: +progress output, you can write it into a configuration file: + +.. code-block:: ini # content of pytest.ini # (or tox.ini or setup.cfg) @@ -117,14 +119,16 @@ Builtin configuration file options .. confval:: addopts Add the specified ``OPTS`` to the set of command line arguments as if they - had been specified by the user. Example: if you have this ini file content:: + had been specified by the user. Example: if you have this ini file content: + + .. code-block:: ini - [pytest] - addopts = --maxfail=2 -rf # exit after 2 failures, report fail info + [pytest] + addopts = --maxfail=2 -rf # exit after 2 failures, report fail info issuing ``py.test test_hello.py`` actually means:: - py.test --maxfail=2 -rf test_hello.py + py.test --maxfail=2 -rf test_hello.py Default is to add no options. @@ -142,15 +146,36 @@ Builtin configuration file options Default patterns are ``'.*', 'CVS', '_darcs', '{arch}', '*.egg'``. Setting a ``norecursedirs`` replaces the default. Here is an example of - how to avoid certain directories:: + how to avoid certain directories: - # content of setup.cfg - [pytest] - norecursedirs = .svn _build tmp* + .. code-block:: ini + + # content of setup.cfg + [pytest] + norecursedirs = .svn _build tmp* This would tell ``pytest`` to not look into typical subversion or sphinx-build directories or into any ``tmp`` prefixed directory. +.. confval:: testpaths + + .. versionadded:: 2.8 + + Sets list of directories that should be searched for tests when + no specific directories or files are given in the command line when + executing pytest from the :ref:`rootdir ` directory. + Useful when all project tests are in a known location to speed up + test collection and to avoid picking up undesired tests by accident. + + .. code-block:: ini + + # content of pytest.ini + [pytest] + testpaths = testing doc + + This tells pytest to only look for tests in ``testing`` and ``doc`` + directories when executing from the root directory. + .. confval:: python_files One or more Glob-style file patterns determining which python files @@ -160,11 +185,13 @@ Builtin configuration file options One or more name prefixes or glob-style patterns determining which classes are considered for test collection. Here is an example of how to collect - tests from classes that end in ``Suite``:: + tests from classes that end in ``Suite``: - # content of pytest.ini - [pytest] - python_classes = *Suite + .. code-block:: ini + + # content of pytest.ini + [pytest] + python_classes = *Suite Note that ``unittest.TestCase`` derived classes are always collected regardless of this option, as ``unittest``'s own collection framework is used @@ -174,11 +201,13 @@ Builtin configuration file options One or more name prefixes or glob-patterns determining which test functions and methods are considered tests. Here is an example of how - to collect test functions and methods that end in ``_test``:: + to collect test functions and methods that end in ``_test``: - # content of pytest.ini - [pytest] - python_functions = *_test + .. code-block:: ini + + # content of pytest.ini + [pytest] + python_functions = *_test Note that this has no effect on methods that live on a ``unittest .TestCase`` derived class, as ``unittest``'s own collection framework is used @@ -190,3 +219,10 @@ Builtin configuration file options One or more doctest flag names from the standard ``doctest`` module. :doc:`See how py.test handles doctests `. + +.. confval:: confcutdir + + Sets a directory where search upwards for ``conftest.py`` files stops. + By default, pytest will stop searching for ``conftest.py`` files upwards + from ``pytest.ini``/``tox.ini``/``setup.cfg`` of the project if any, + or up to the file-system root. diff --git a/doc/en/doctest.txt b/doc/en/doctest.rst similarity index 68% rename from doc/en/doctest.txt rename to doc/en/doctest.rst index 1dbc8c3d476..a456488e33a 100644 --- a/doc/en/doctest.txt +++ b/doc/en/doctest.rst @@ -15,7 +15,9 @@ python test modules):: py.test --doctest-modules You can make these changes permanent in your project by -putting them into a pytest.ini file like this:: +putting them into a pytest.ini file like this: + +.. code-block:: ini # content of pytest.ini [pytest] @@ -43,14 +45,14 @@ and another like this:: then you can just invoke ``py.test`` without command line options:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-96, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 1 items mymodule.py . - ========================= 1 passed in 0.06 seconds ========================= + ======= 1 passed in 0.12 seconds ======== It is possible to use fixtures using the ``getfixture`` helper:: @@ -70,3 +72,18 @@ ignore lengthy exception stack traces you can just write:: # content of pytest.ini [pytest] doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL + + +py.test also introduces a new ``ALLOW_UNICODE`` option flag: when enabled, the +``u`` prefix is stripped from unicode strings in expected doctest output. This +allows doctests which use unicode to run in Python 2 and 3 unchanged. + +As with any other option flag, this flag can be enabled in ``pytest.ini`` using +the ``doctest_optionflags`` ini option or by an inline comment in the doc test +itself:: + + # content of example.rst + >>> get_unicode_greeting() # doctest: +ALLOW_UNICODE + 'Hello' + + diff --git a/doc/en/example/assertion/test_failures.py b/doc/en/example/assertion/test_failures.py index f1720f2d4c2..2e5cd20b194 100644 --- a/doc/en/example/assertion/test_failures.py +++ b/doc/en/example/assertion/test_failures.py @@ -7,7 +7,7 @@ def test_failure_demo_fails_properly(testdir): target = testdir.tmpdir.join(failure_demo.basename) failure_demo.copy(target) failure_demo.copy(testdir.tmpdir.join(failure_demo.basename)) - result = testdir.runpytest(target) + result = testdir.runpytest(target, syspathinsert=True) result.stdout.fnmatch_lines([ "*42 failed*" ]) diff --git a/doc/en/example/attic.txt b/doc/en/example/attic.rst similarity index 100% rename from doc/en/example/attic.txt rename to doc/en/example/attic.rst diff --git a/doc/en/example/index.txt b/doc/en/example/index.rst similarity index 87% rename from doc/en/example/index.txt rename to doc/en/example/index.rst index eb02e6c69ed..363de5ab714 100644 --- a/doc/en/example/index.txt +++ b/doc/en/example/index.rst @@ -25,10 +25,10 @@ The following examples aim at various use cases you might encounter. .. toctree:: :maxdepth: 2 - reportingdemo.txt - simple.txt - parametrize.txt - markers.txt - special.txt - pythoncollection.txt - nonpython.txt + reportingdemo + simple + parametrize + markers + special + pythoncollection + nonpython diff --git a/doc/en/example/markers.txt b/doc/en/example/markers.rst similarity index 75% rename from doc/en/example/markers.txt rename to doc/en/example/markers.rst index b694e8ee949..f001965aec7 100644 --- a/doc/en/example/markers.txt +++ b/doc/en/example/markers.rst @@ -30,30 +30,30 @@ You can "mark" a test function with custom metadata like this:: You can then restrict a test run to only run tests marked with ``webtest``:: $ py.test -v -m webtest - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_send_http PASSED - =================== 3 tests deselected by "-m 'webtest'" =================== - ================== 1 passed, 3 deselected in 0.01 seconds ================== + ======= 3 tests deselected by "-m 'webtest'" ======== + ======= 1 passed, 3 deselected in 0.12 seconds ======== Or the inverse, running all tests except the webtest ones:: $ py.test -v -m "not webtest" - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_something_quick PASSED test_server.py::test_another PASSED test_server.py::TestClass::test_method PASSED - ================= 1 tests deselected by "-m 'not webtest'" ================= - ================== 3 passed, 1 deselected in 0.01 seconds ================== + ======= 1 tests deselected by "-m 'not webtest'" ======== + ======= 3 passed, 1 deselected in 0.12 seconds ======== Selecting tests based on their node ID -------------------------------------- @@ -63,39 +63,39 @@ arguments to select only specified tests. This makes it easy to select tests based on their module, class, method, or function name:: $ py.test -v test_server.py::TestClass::test_method - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 5 items test_server.py::TestClass::test_method PASSED - ========================= 1 passed in 0.01 seconds ========================= + ======= 1 passed in 0.12 seconds ======== You can also select on the class:: $ py.test -v test_server.py::TestClass - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::TestClass::test_method PASSED - ========================= 1 passed in 0.01 seconds ========================= + ======= 1 passed in 0.12 seconds ======== Or select multiple nodes:: $ py.test -v test_server.py::TestClass test_server.py::test_send_http - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items test_server.py::TestClass::test_method PASSED test_server.py::test_send_http PASSED - ========================= 2 passed in 0.01 seconds ========================= + ======= 2 passed in 0.12 seconds ======== .. _node-id: @@ -124,44 +124,44 @@ exact match on markers that ``-m`` provides. This makes it easy to select tests based on their names:: $ py.test -v -k http # running with the above defined example module - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_send_http PASSED - ====================== 3 tests deselected by '-khttp' ====================== - ================== 1 passed, 3 deselected in 0.01 seconds ================== + ======= 3 tests deselected by '-khttp' ======== + ======= 1 passed, 3 deselected in 0.12 seconds ======== And you can also run all tests except the ones that match the keyword:: $ py.test -k "not send_http" -v - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_something_quick PASSED test_server.py::test_another PASSED test_server.py::TestClass::test_method PASSED - ================= 1 tests deselected by '-knot send_http' ================== - ================== 3 passed, 1 deselected in 0.01 seconds ================== + ======= 1 tests deselected by '-knot send_http' ======== + ======= 3 passed, 1 deselected in 0.12 seconds ======== Or to select "http" and "quick" tests:: $ py.test -k "http or quick" -v - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_send_http PASSED test_server.py::test_something_quick PASSED - ================= 2 tests deselected by '-khttp or quick' ================== - ================== 2 passed, 2 deselected in 0.01 seconds ================== + ======= 2 tests deselected by '-khttp or quick' ======== + ======= 2 passed, 2 deselected in 0.12 seconds ======== .. note:: @@ -341,26 +341,26 @@ and an example invocations specifying a different environment than what the test needs:: $ py.test -E stage2 - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_someenv.py s - ======================== 1 skipped in 0.01 seconds ========================= + ======= 1 skipped in 0.12 seconds ======== and here is one that specifies exactly the environment needed:: $ py.test -E stage1 - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_someenv.py . - ========================= 1 passed in 0.01 seconds ========================= + ======= 1 passed in 0.12 seconds ======== The ``--markers`` option always gives you a list of available markers:: @@ -420,7 +420,7 @@ Let's run this without capturing output and see what we get:: glob args=('class',) kwargs={'x': 2} glob args=('module',) kwargs={'x': 1} . - 1 passed in 0.01 seconds + 1 passed in 0.12 seconds marking platform specific tests with pytest -------------------------------------------------------------- @@ -472,29 +472,29 @@ Let's do a little test file to show how this looks like:: then you will see two test skipped and two executed tests as expected:: $ py.test -rs # this option reports skip reasons - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - test_plat.py sss. - ========================= short test summary info ========================== - SKIP [3] /tmp/doc-exec-157/conftest.py:12: cannot run on platform linux + test_plat.py s.s. + ======= short test summary info ======== + SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux2 - =================== 1 passed, 3 skipped in 0.01 seconds ==================== + ======= 2 passed, 2 skipped in 0.12 seconds ======== Note that if you specify a platform via the marker-command line option like this:: $ py.test -m linux2 - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - test_plat.py s + test_plat.py . - =================== 3 tests deselected by "-m 'linux2'" ==================== - ================= 1 skipped, 3 deselected in 0.01 seconds ================== + ======= 3 tests deselected by "-m 'linux2'" ======== + ======= 1 passed, 3 deselected in 0.12 seconds ======== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -538,47 +538,47 @@ We want to dynamically define two markers and can do it in a We can now use the ``-m option`` to select one set:: $ py.test -m interface --tb=short - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items test_module.py FF - ================================= FAILURES ================================= - __________________________ test_interface_simple ___________________________ + ======= FAILURES ======== + _______ test_interface_simple ________ test_module.py:3: in test_interface_simple assert 0 E assert 0 - __________________________ test_interface_complex __________________________ + _______ test_interface_complex ________ test_module.py:6: in test_interface_complex assert 0 E assert 0 - ================== 2 tests deselected by "-m 'interface'" ================== - ================== 2 failed, 2 deselected in 0.02 seconds ================== + ======= 2 tests deselected by "-m 'interface'" ======== + ======= 2 failed, 2 deselected in 0.12 seconds ======== or to select both "event" and "interface" tests:: $ py.test -m "interface or event" --tb=short - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items test_module.py FFF - ================================= FAILURES ================================= - __________________________ test_interface_simple ___________________________ + ======= FAILURES ======== + _______ test_interface_simple ________ test_module.py:3: in test_interface_simple assert 0 E assert 0 - __________________________ test_interface_complex __________________________ + _______ test_interface_complex ________ test_module.py:6: in test_interface_complex assert 0 E assert 0 - ____________________________ test_event_simple _____________________________ + _______ test_event_simple ________ test_module.py:9: in test_event_simple assert 0 E assert 0 - ============= 1 tests deselected by "-m 'interface or event'" ============== - ================== 3 failed, 1 deselected in 0.02 seconds ================== + ======= 1 tests deselected by "-m 'interface or event'" ======== + ======= 3 failed, 1 deselected in 0.12 seconds ======== diff --git a/doc/en/example/nonpython.txt b/doc/en/example/nonpython.rst similarity index 64% rename from doc/en/example/nonpython.txt rename to doc/en/example/nonpython.rst index dd344dfbf68..49bc2c30b33 100644 --- a/doc/en/example/nonpython.txt +++ b/doc/en/example/nonpython.rst @@ -26,19 +26,19 @@ and if you installed `PyYAML`_ or a compatible YAML-parser you can now execute the test specification:: nonpython $ py.test test_simple.yml - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $PWD/doc/en, inifile: pytest.ini collected 2 items test_simple.yml .F - ================================= FAILURES ================================= - ______________________________ usecase: hello ______________________________ + ======= FAILURES ======== + _______ usecase: hello ________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.19 seconds ==================== + ======= 1 failed, 1 passed in 0.12 seconds ======== You get one dot for the passing ``sub1: sub1`` check and one failure. Obviously in the above ``conftest.py`` you'll want to implement a more @@ -56,31 +56,31 @@ your own domain specific testing language this way. consulted when reporting in ``verbose`` mode:: nonpython $ py.test -v - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $PWD/doc/en, inifile: pytest.ini collecting ... collected 2 items test_simple.yml::ok PASSED test_simple.yml::hello FAILED - ================================= FAILURES ================================= - ______________________________ usecase: hello ______________________________ + ======= FAILURES ======== + _______ usecase: hello ________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.05 seconds ==================== + ======= 1 failed, 1 passed in 0.12 seconds ======== While developing your custom test collection and execution it's also interesting to just look at the collection tree:: nonpython $ py.test --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $PWD/doc/en, inifile: pytest.ini collected 2 items - ============================= in 0.04 seconds ============================= + ======= in 0.12 seconds ======== diff --git a/doc/en/example/parametrize.txt b/doc/en/example/parametrize.rst similarity index 77% rename from doc/en/example/parametrize.txt rename to doc/en/example/parametrize.rst index 623ef2192bf..3d0c778f5a2 100644 --- a/doc/en/example/parametrize.txt +++ b/doc/en/example/parametrize.rst @@ -46,15 +46,15 @@ This means that we only run 2 tests if we do not pass ``--all``:: $ py.test -q test_compute.py .. - 2 passed in 0.01 seconds + 2 passed in 0.12 seconds We run only two computations, so we see two dots. let's run the full monty:: $ py.test -q --all ....F - ================================= FAILURES ================================= - _____________________________ test_compute[4] ______________________________ + ======= FAILURES ======== + _______ test_compute[4] ________ param1 = 4 @@ -63,7 +63,7 @@ let's run the full monty:: E assert 4 < 4 test_compute.py:3: AssertionError - 1 failed, 4 passed in 0.02 seconds + 1 failed, 4 passed in 0.12 seconds As expected when running the full range of ``param1`` values we'll get an error on the last one. @@ -81,7 +81,7 @@ Numbers, strings, booleans and None will have their usual string representation used in the test ID. For other objects, pytest will make a string based on the argument name:: - # contents of test_time.py + # content of test_time.py from datetime import datetime, timedelta @@ -126,11 +126,11 @@ objects, they are still using the default pytest representation:: $ py.test test_time.py --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-159, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: - ============================= in 0.00 seconds ============================= + ======= in 0.12 seconds ======== ERROR: file not found: test_time.py A quick port of "testscenarios" @@ -170,22 +170,22 @@ only have to work a bit to construct the correct arguments for pytest's this is a fully self-contained example which you can run with:: $ py.test test_scenarios.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-159, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items test_scenarios.py .... - ========================= 4 passed in 0.02 seconds ========================= + ======= 4 passed in 0.12 seconds ======== If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: $ py.test --collect-only test_scenarios.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-159, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items @@ -195,7 +195,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - ============================= in 0.01 seconds ============================= + ======= in 0.12 seconds ======== Note that we told ``metafunc.parametrize()`` that your scenario values should be considered class-scoped. With pytest-2.3 this leads to a @@ -248,24 +248,24 @@ creates a database object for the actual test invocations:: Let's first see how it looks like at collection time:: $ py.test test_backends.py --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-159, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - ============================= in 0.01 seconds ============================= + ======= in 0.12 seconds ======== And then when we run the test:: $ py.test -q test_backends.py .F - ================================= FAILURES ================================= - _________________________ test_db_initialized[d2] __________________________ + ======= FAILURES ======== + _______ test_db_initialized[d2] ________ - db = + db = def test_db_initialized(db): # a dummy test @@ -274,12 +274,52 @@ And then when we run the test:: E Failed: deliberately failing for demo purposes test_backends.py:6: Failed - 1 failed, 1 passed in 0.01 seconds + 1 failed, 1 passed in 0.12 seconds The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase. .. regendoc:wipe +Apply indirect on particular arguments +--------------------------------------------------- + +Very often parametrization uses more than one argument name. There is opportunity to apply ``indirect`` +parameter on particular arguments. It can be done by passing list or tuple of +arguments' names to ``indirect``. In the example below there is a function ``test_indirect`` which uses +two fixtures: ``x`` and ``y``. Here we give to indirect the list, which contains the name of the +fixture ``x``. The indirect parameter will be applied to this argument only, and the value ``a`` +will be passed to respective fixture function. + + # content of test_indirect_list.py + + import pytest + @pytest.fixture(scope='function') + def x(request): + return request.param * 3 + + @pytest.fixture(scope='function') + def y(request): + return request.param * 2 + + @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x']) + def test_indirect(x,y): + assert x == 'aaa' + assert y == 'b' + +The result of this test will be successful: + + $ py.test test_indirect_list.py --collect-only + ============================= test session starts ============================== + platform linux2 -- Python 2.7.3, pytest-2.8.0.dev4, py-1.4.30, pluggy-0.3.0 + rootdir: /home/elizabeth/work/pytest, inifile: tox.ini + collected 1 items + + + + =============================== in 0.02 seconds =============================== + +.. regendoc:wipe + Parametrizing test methods through per-class configuration -------------------------------------------------------------- @@ -318,17 +358,17 @@ argument sets to use for each test function. Let's run it:: $ py.test -q F.. - ================================= FAILURES ================================= - ________________________ TestClass.test_equals[2-1] ________________________ + ======= FAILURES ======== + _______ TestClass.test_equals[1-2] ________ - self = , a = 1, b = 2 + self = , a = 1, b = 2 def test_equals(self, a, b): > assert a == b E assert 1 == 2 test_parametrize.py:18: AssertionError - 1 failed, 2 passed in 0.02 seconds + 1 failed, 2 passed in 0.12 seconds Indirect parametrization with multiple fixtures -------------------------------------------------------------- @@ -347,8 +387,11 @@ is to be run with different sets of arguments for its three arguments: Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize):: . $ py.test -rs -q multipython.py - ........................... - 27 passed in 4.14 seconds + ssssssssssss...ssssssssssss + ======= short test summary info ======== + SKIP [12] $PWD/doc/en/example/multipython.py:22: 'python3.3' not found + SKIP [12] $PWD/doc/en/example/multipython.py:22: 'python2.6' not found + 3 passed, 24 skipped in 0.12 seconds Indirect parametrization of optional implementations/imports -------------------------------------------------------------------- @@ -394,16 +437,16 @@ And finally a little test module:: If you run this with reporting for skips enabled:: $ py.test -rs test_module.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-159, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py .s - ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-159/conftest.py:10: could not import 'opt2' + ======= short test summary info ======== + SKIP [1] $REGENDOC_TMPDIR/conftest.py:10: could not import 'opt2' - =================== 1 passed, 1 skipped in 0.01 seconds ==================== + ======= 1 passed, 1 skipped in 0.12 seconds ======== You'll see that we don't have a ``opt2`` module and thus the second test run of our ``test_func1`` was skipped. A few notes: diff --git a/doc/en/example/pythoncollection.txt b/doc/en/example/pythoncollection.rst similarity index 83% rename from doc/en/example/pythoncollection.txt rename to doc/en/example/pythoncollection.rst index b8abdb2624e..c35d7e99214 100644 --- a/doc/en/example/pythoncollection.txt +++ b/doc/en/example/pythoncollection.rst @@ -42,9 +42,9 @@ that match ``*_check``. For example, if we have:: then the test collection looks like this:: $ py.test --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-160, inifile: setup.cfg + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: setup.cfg collected 2 items @@ -52,7 +52,7 @@ then the test collection looks like this:: - ============================= in 0.01 seconds ============================= + ======= in 0.12 seconds ======== .. note:: @@ -88,9 +88,9 @@ Finding out what is collected You can always peek at the collection tree without running tests like this:: . $ py.test --collect-only pythoncollection.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $PWD/doc/en, inifile: pytest.ini collected 3 items @@ -99,7 +99,7 @@ You can always peek at the collection tree without running tests like this:: - ============================= in 0.01 seconds ============================= + ======= in 0.12 seconds ======== customizing test collection to find all .py files --------------------------------------------------------- @@ -142,12 +142,14 @@ then a pytest run on python2 will find the one test when run with a python2 interpreters and will leave out the setup.py file:: $ py.test --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-160, inifile: pytest.ini - collected 0 items + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini + collected 1 items + + - ============================= in 0.01 seconds ============================= + ======= in 0.12 seconds ======== If you run with a Python3 interpreter the moduled added through the conftest.py file will not be considered for test collection. diff --git a/doc/en/example/reportingdemo.txt b/doc/en/example/reportingdemo.rst similarity index 65% rename from doc/en/example/reportingdemo.txt rename to doc/en/example/reportingdemo.rst index 6d62c4211fa..31c8738a5ef 100644 --- a/doc/en/example/reportingdemo.txt +++ b/doc/en/example/reportingdemo.rst @@ -12,15 +12,15 @@ get on the terminal - we are working on that): .. code-block:: python assertion $ py.test failure_demo.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $PWD/doc/en, inifile: pytest.ini collected 42 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF - ================================= FAILURES ================================= - ____________________________ test_generative[0] ____________________________ + ======= FAILURES ======== + _______ test_generative[0] ________ param1 = 3, param2 = 6 @@ -29,9 +29,9 @@ get on the terminal - we are working on that): E assert (3 * 2) < 6 failure_demo.py:15: AssertionError - _________________________ TestFailing.test_simple __________________________ + _______ TestFailing.test_simple ________ - self = + self = def test_simple(self): def f(): @@ -41,13 +41,13 @@ get on the terminal - we are working on that): > assert f() == g() E assert 42 == 43 - E + where 42 = .f at 0x7f65f2315510>() - E + and 43 = .g at 0x7f65f2323510>() + E + where 42 = () + E + and 43 = () failure_demo.py:28: AssertionError - ____________________ TestFailing.test_simple_multiline _____________________ + _______ TestFailing.test_simple_multiline ________ - self = + self = def test_simple_multiline(self): otherfunc_multi( @@ -55,7 +55,7 @@ get on the terminal - we are working on that): > 6*9) failure_demo.py:33: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 42, b = 54 @@ -65,21 +65,21 @@ get on the terminal - we are working on that): E assert 42 == 54 failure_demo.py:11: AssertionError - ___________________________ TestFailing.test_not ___________________________ + _______ TestFailing.test_not ________ - self = + self = def test_not(self): def f(): return 42 > assert not f() E assert not 42 - E + where 42 = .f at 0x7f65f2323598>() + E + where 42 = () failure_demo.py:38: AssertionError - _________________ TestSpecialisedExplanations.test_eq_text _________________ + _______ TestSpecialisedExplanations.test_eq_text ________ - self = + self = def test_eq_text(self): > assert 'spam' == 'eggs' @@ -88,9 +88,9 @@ get on the terminal - we are working on that): E + eggs failure_demo.py:42: AssertionError - _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ + _______ TestSpecialisedExplanations.test_eq_similar_text ________ - self = + self = def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' @@ -101,9 +101,9 @@ get on the terminal - we are working on that): E ? ^ failure_demo.py:45: AssertionError - ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ + _______ TestSpecialisedExplanations.test_eq_multiline_text ________ - self = + self = def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -114,9 +114,9 @@ get on the terminal - we are working on that): E bar failure_demo.py:48: AssertionError - ______________ TestSpecialisedExplanations.test_eq_long_text _______________ + _______ TestSpecialisedExplanations.test_eq_long_text ________ - self = + self = def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 @@ -131,9 +131,9 @@ get on the terminal - we are working on that): E ? ^ failure_demo.py:53: AssertionError - _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ + _______ TestSpecialisedExplanations.test_eq_long_text_multiline ________ - self = + self = def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 @@ -155,9 +155,9 @@ get on the terminal - we are working on that): E 2 failure_demo.py:58: AssertionError - _________________ TestSpecialisedExplanations.test_eq_list _________________ + _______ TestSpecialisedExplanations.test_eq_list ________ - self = + self = def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] @@ -166,9 +166,9 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:61: AssertionError - ______________ TestSpecialisedExplanations.test_eq_list_long _______________ + _______ TestSpecialisedExplanations.test_eq_list_long ________ - self = + self = def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 @@ -179,9 +179,9 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:66: AssertionError - _________________ TestSpecialisedExplanations.test_eq_dict _________________ + _______ TestSpecialisedExplanations.test_eq_dict ________ - self = + self = def test_eq_dict(self): > assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} @@ -196,9 +196,9 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:69: AssertionError - _________________ TestSpecialisedExplanations.test_eq_set __________________ + _______ TestSpecialisedExplanations.test_eq_set ________ - self = + self = def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) @@ -213,9 +213,9 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:72: AssertionError - _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ + _______ TestSpecialisedExplanations.test_eq_longer_list ________ - self = + self = def test_eq_longer_list(self): > assert [1,2] == [1,2,3] @@ -224,18 +224,18 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:75: AssertionError - _________________ TestSpecialisedExplanations.test_in_list _________________ + _______ TestSpecialisedExplanations.test_in_list ________ - self = + self = def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] E assert 1 in [0, 2, 3, 4, 5] failure_demo.py:78: AssertionError - __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ + _______ TestSpecialisedExplanations.test_not_in_text_multiline ________ - self = + self = def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' @@ -251,9 +251,9 @@ get on the terminal - we are working on that): E tail failure_demo.py:82: AssertionError - ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ + _______ TestSpecialisedExplanations.test_not_in_text_single ________ - self = + self = def test_not_in_text_single(self): text = 'single foo line' @@ -264,9 +264,9 @@ get on the terminal - we are working on that): E ? +++ failure_demo.py:86: AssertionError - _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ + _______ TestSpecialisedExplanations.test_not_in_text_single_long ________ - self = + self = def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 @@ -277,9 +277,9 @@ get on the terminal - we are working on that): E ? +++ failure_demo.py:90: AssertionError - ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ + _______ TestSpecialisedExplanations.test_not_in_text_single_long_term ________ - self = + self = def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 @@ -290,7 +290,7 @@ get on the terminal - we are working on that): E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ failure_demo.py:94: AssertionError - ______________________________ test_attribute ______________________________ + _______ test_attribute ________ def test_attribute(): class Foo(object): @@ -298,21 +298,21 @@ get on the terminal - we are working on that): i = Foo() > assert i.b == 2 E assert 1 == 2 - E + where 1 = .Foo object at 0x7f65f1c814e0>.b + E + where 1 = .b failure_demo.py:101: AssertionError - _________________________ test_attribute_instance __________________________ + _______ test_attribute_instance ________ def test_attribute_instance(): class Foo(object): b = 1 > assert Foo().b == 2 E assert 1 == 2 - E + where 1 = .Foo object at 0x7f65f1c7f7f0>.b - E + where .Foo object at 0x7f65f1c7f7f0> = .Foo'>() + E + where 1 = .b + E + where = () failure_demo.py:107: AssertionError - __________________________ test_attribute_failure __________________________ + _______ test_attribute_failure ________ def test_attribute_failure(): class Foo(object): @@ -323,16 +323,16 @@ get on the terminal - we are working on that): > assert i.b == 2 failure_demo.py:116: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - self = .Foo object at 0x7f65f1c97dd8> + self = def _get_b(self): > raise Exception('Failed to get attrib') E Exception: Failed to get attrib failure_demo.py:113: Exception - _________________________ test_attribute_multiple __________________________ + _______ test_attribute_multiple ________ def test_attribute_multiple(): class Foo(object): @@ -341,57 +341,57 @@ get on the terminal - we are working on that): b = 2 > assert Foo().b == Bar().b E assert 1 == 2 - E + where 1 = .Foo object at 0x7f65f1c9b630>.b - E + where .Foo object at 0x7f65f1c9b630> = .Foo'>() - E + and 2 = .Bar object at 0x7f65f1c9b2b0>.b - E + where .Bar object at 0x7f65f1c9b2b0> = .Bar'>() + E + where 1 = .b + E + where = () + E + and 2 = .b + E + where = () failure_demo.py:124: AssertionError - __________________________ TestRaises.test_raises __________________________ + _______ TestRaises.test_raises ________ - self = + self = def test_raises(self): s = 'qwe' > raises(TypeError, "int(s)") failure_demo.py:133: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - <0-codegen /tmp/sandbox/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:1075>:1: ValueError - ______________________ TestRaises.test_raises_doesnt _______________________ + <0-codegen $PWD/_pytest/python.py:1091>:1: ValueError + _______ TestRaises.test_raises_doesnt ________ - self = + self = def test_raises_doesnt(self): > raises(IOError, "int('3')") E Failed: DID NOT RAISE failure_demo.py:136: Failed - __________________________ TestRaises.test_raise ___________________________ + _______ TestRaises.test_raise ________ - self = + self = def test_raise(self): > raise ValueError("demo error") E ValueError: demo error failure_demo.py:139: ValueError - ________________________ TestRaises.test_tupleerror ________________________ + _______ TestRaises.test_tupleerror ________ - self = + self = def test_tupleerror(self): > a,b = [1] E ValueError: need more than 1 value to unpack failure_demo.py:142: ValueError - ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ + _______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ________ - self = + self = def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] @@ -400,18 +400,18 @@ get on the terminal - we are working on that): E TypeError: 'int' object is not iterable failure_demo.py:147: TypeError - --------------------------- Captured stdout call --------------------------- + ----------------------------- Captured stdout call ----------------------------- l is [1, 2, 3] - ________________________ TestRaises.test_some_error ________________________ + _______ TestRaises.test_some_error ________ - self = + self = def test_some_error(self): > if namenotexi: - E NameError: name 'namenotexi' is not defined + E NameError: global name 'namenotexi' is not defined failure_demo.py:150: NameError - ____________________ test_dynamic_compile_shows_nicely _____________________ + _______ test_dynamic_compile_shows_nicely ________ def test_dynamic_compile_shows_nicely(): src = 'def foo():\n assert 1 == 0\n' @@ -423,16 +423,16 @@ get on the terminal - we are working on that): > module.foo() failure_demo.py:165: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def foo(): > assert 1 == 0 E assert 1 == 0 - <2-codegen 'abc-123' /tmp/sandbox/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError - ____________________ TestMoreErrors.test_complex_error _____________________ + <2-codegen 'abc-123' $PWD/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError + _______ TestMoreErrors.test_complex_error ________ - self = + self = def test_complex_error(self): def f(): @@ -442,10 +442,10 @@ get on the terminal - we are working on that): > somefunc(f(), g()) failure_demo.py:175: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ failure_demo.py:8: in somefunc otherfunc(x,y) - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 44, b = 43 @@ -454,9 +454,9 @@ get on the terminal - we are working on that): E assert 44 == 43 failure_demo.py:5: AssertionError - ___________________ TestMoreErrors.test_z1_unpack_error ____________________ + _______ TestMoreErrors.test_z1_unpack_error ________ - self = + self = def test_z1_unpack_error(self): l = [] @@ -464,9 +464,9 @@ get on the terminal - we are working on that): E ValueError: need more than 0 values to unpack failure_demo.py:179: ValueError - ____________________ TestMoreErrors.test_z2_type_error _____________________ + _______ TestMoreErrors.test_z2_type_error ________ - self = + self = def test_z2_type_error(self): l = 3 @@ -474,21 +474,21 @@ get on the terminal - we are working on that): E TypeError: 'int' object is not iterable failure_demo.py:183: TypeError - ______________________ TestMoreErrors.test_startswith ______________________ + _______ TestMoreErrors.test_startswith ________ - self = + self = def test_startswith(self): s = "123" g = "456" > assert s.startswith(g) - E assert ('456') - E + where = '123'.startswith + E assert ('456') + E + where = '123'.startswith failure_demo.py:188: AssertionError - __________________ TestMoreErrors.test_startswith_nested ___________________ + _______ TestMoreErrors.test_startswith_nested ________ - self = + self = def test_startswith_nested(self): def f(): @@ -496,15 +496,15 @@ get on the terminal - we are working on that): def g(): return "456" > assert f().startswith(g()) - E assert ('456') - E + where = '123'.startswith - E + where '123' = .f at 0x7f65f1c32950>() - E + and '456' = .g at 0x7f65f1c32ea0>() + E assert ('456') + E + where = '123'.startswith + E + where '123' = () + E + and '456' = () failure_demo.py:195: AssertionError - _____________________ TestMoreErrors.test_global_func ______________________ + _______ TestMoreErrors.test_global_func ________ - self = + self = def test_global_func(self): > assert isinstance(globf(42), float) @@ -512,20 +512,20 @@ get on the terminal - we are working on that): E + where 43 = globf(42) failure_demo.py:198: AssertionError - _______________________ TestMoreErrors.test_instance _______________________ + _______ TestMoreErrors.test_instance ________ - self = + self = def test_instance(self): self.x = 6*7 > assert self.x != 42 E assert 42 != 42 - E + where 42 = .x + E + where 42 = .x failure_demo.py:202: AssertionError - _______________________ TestMoreErrors.test_compare ________________________ + _______ TestMoreErrors.test_compare ________ - self = + self = def test_compare(self): > assert globf(10) < 5 @@ -533,9 +533,9 @@ get on the terminal - we are working on that): E + where 11 = globf(10) failure_demo.py:205: AssertionError - _____________________ TestMoreErrors.test_try_finally ______________________ + _______ TestMoreErrors.test_try_finally ________ - self = + self = def test_try_finally(self): x = 1 @@ -544,9 +544,9 @@ get on the terminal - we are working on that): E assert 1 == 0 failure_demo.py:210: AssertionError - ___________________ TestCustomAssertMsg.test_single_line ___________________ + _______ TestCustomAssertMsg.test_single_line ________ - self = + self = def test_single_line(self): class A: @@ -555,12 +555,12 @@ get on the terminal - we are working on that): > assert A.a == b, "A.a appears not to be b" E AssertionError: A.a appears not to be b E assert 1 == 2 - E + where 1 = .A'>.a + E + where 1 = .a failure_demo.py:221: AssertionError - ____________________ TestCustomAssertMsg.test_multiline ____________________ + _______ TestCustomAssertMsg.test_multiline ________ - self = + self = def test_multiline(self): class A: @@ -572,12 +572,12 @@ get on the terminal - we are working on that): E or does not appear to be b E one of those E assert 1 == 2 - E + where 1 = .A'>.a + E + where 1 = .a failure_demo.py:227: AssertionError - ___________________ TestCustomAssertMsg.test_custom_repr ___________________ + _______ TestCustomAssertMsg.test_custom_repr ________ - self = + self = def test_custom_repr(self): class JSON: @@ -595,4 +595,4 @@ get on the terminal - we are working on that): E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a failure_demo.py:237: AssertionError - ======================== 42 failed in 0.35 seconds ========================= + ======= 42 failed in 0.12 seconds ======== diff --git a/doc/en/example/simple.txt b/doc/en/example/simple.rst similarity index 71% rename from doc/en/example/simple.txt rename to doc/en/example/simple.rst index 6d49036544c..3c770eed98b 100644 --- a/doc/en/example/simple.txt +++ b/doc/en/example/simple.rst @@ -39,11 +39,11 @@ Let's run this without supplying our new option:: $ py.test -q test_sample.py F - ================================= FAILURES ================================= - _______________________________ test_answer ________________________________ - + ======= FAILURES ======== + _______ test_answer ________ + cmdopt = 'type1' - + def test_answer(cmdopt): if cmdopt == "type1": print ("first") @@ -51,21 +51,21 @@ Let's run this without supplying our new option:: print ("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError - --------------------------- Captured stdout call --------------------------- + ----------------------------- Captured stdout call ----------------------------- first - 1 failed in 0.01 seconds + 1 failed in 0.12 seconds And now with supplying a command line option:: $ py.test -q --cmdopt=type2 F - ================================= FAILURES ================================= - _______________________________ test_answer ________________________________ - + ======= FAILURES ======== + _______ test_answer ________ + cmdopt = 'type2' - + def test_answer(cmdopt): if cmdopt == "type1": print ("first") @@ -73,11 +73,11 @@ And now with supplying a command line option:: print ("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError - --------------------------- Captured stdout call --------------------------- + ----------------------------- Captured stdout call ----------------------------- second - 1 failed in 0.01 seconds + 1 failed in 0.12 seconds You can see that the command line option arrived in our test. This completes the basic pattern. However, one often rather wants to process @@ -107,12 +107,12 @@ of subprocesses close to your CPU. Running in an empty directory with the above conftest.py:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - - ============================= in 0.00 seconds ============================= + + ======= in 0.12 seconds ======== .. _`excontrolskip`: @@ -152,28 +152,28 @@ We can now write a test module like this:: and when running it will see a skipped "slow" test:: $ py.test -rs # "-rs" means report details on the little 's' - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .s - ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-162/conftest.py:9: need --runslow option to run - - =================== 1 passed, 1 skipped in 0.01 seconds ==================== + ======= short test summary info ======== + SKIP [1] $REGENDOC_TMPDIR/conftest.py:9: need --runslow option to run + + ======= 1 passed, 1 skipped in 0.12 seconds ======== Or run it including the ``slow`` marked test:: $ py.test --runslow - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .. - - ========================= 2 passed in 0.01 seconds ========================= + + ======= 2 passed in 0.12 seconds ======== Writing well integrated assertion helpers -------------------------------------------------- @@ -203,15 +203,15 @@ Let's run our little function:: $ py.test -q test_checkconfig.py F - ================================= FAILURES ================================= - ______________________________ test_something ______________________________ - + ======= FAILURES ======== + _______ test_something ________ + def test_something(): > checkconfig(42) E Failed: not configured: 42 - + test_checkconfig.py:8: Failed - 1 failed in 0.02 seconds + 1 failed in 0.12 seconds Detect if running from within a pytest run -------------------------------------------------------------- @@ -258,13 +258,13 @@ It's easy to present extra information in a ``pytest`` run:: which will add the string to the test header accordingly:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 project deps: mylib-1.1 + rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - - ============================= in 0.00 seconds ============================= + + ======= in 0.12 seconds ======== .. regendoc:wipe @@ -282,24 +282,24 @@ you present more information appropriately:: which will add info only when run with "--v":: $ py.test -v - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 info1: did you know that ... did you? + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 0 items - - ============================= in 0.00 seconds ============================= + + ======= in 0.12 seconds ======== and nothing when run plainly:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - - ============================= in 0.00 seconds ============================= + + ======= in 0.12 seconds ======== profiling test duration -------------------------- @@ -327,18 +327,18 @@ out which tests are the slowest. Let's make an artifical test suite:: Now we can profile which test functions execute the slowest:: $ py.test --durations=3 - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_some_are_slow.py ... - - ========================= slowest 3 test durations ========================= + + ======= slowest 3 test durations ======== 0.20s call test_some_are_slow.py::test_funcslow2 0.10s call test_some_are_slow.py::test_funcslow1 - 0.00s setup test_some_are_slow.py::test_funcslow2 - ========================= 3 passed in 0.31 seconds ========================= + 0.00s setup test_some_are_slow.py::test_funcfast + ======= 3 passed in 0.12 seconds ======== incremental testing - test steps --------------------------------------------------- @@ -389,27 +389,27 @@ tests in a class. Here is a test module example:: If we run this:: $ py.test -rx - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_step.py .Fx. - - ================================= FAILURES ================================= - ____________________ TestUserHandling.test_modification ____________________ - - self = - + + ======= FAILURES ======== + _______ TestUserHandling.test_modification ________ + + self = + def test_modification(self): > assert 0 E assert 0 - + test_step.py:9: AssertionError - ========================= short test summary info ========================== + ======= short test summary info ======== XFAIL test_step.py::TestUserHandling::()::test_deletion reason: previous test failed (test_modification) - ============== 1 failed, 2 passed, 1 xfailed in 0.02 seconds =============== + ======= 1 failed, 2 passed, 1 xfailed in 0.12 seconds ======== We'll see that ``test_deletion`` was not executed because ``test_modification`` failed. It is reported as an "expected failure". @@ -460,56 +460,56 @@ the ``db`` fixture:: We can run this:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 7 items - + test_step.py .Fx. a/test_db.py F a/test_db2.py F b/test_error.py E - - ================================== ERRORS ================================== - _______________________ ERROR at setup of test_root ________________________ - file /tmp/doc-exec-162/b/test_error.py, line 1 + + ======= ERRORS ======== + _______ ERROR at setup of test_root ________ + file $REGENDOC_TMPDIR/b/test_error.py, line 1 def test_root(db): # no db here, will error out fixture 'db' not found - available fixtures: pytestconfig, capsys, recwarn, monkeypatch, tmpdir, capfd + available fixtures: pytestconfig, recwarn, monkeypatch, capfd, capsys, tmpdir use 'py.test --fixtures [testpath]' for help on them. - - /tmp/doc-exec-162/b/test_error.py:1 - ================================= FAILURES ================================= - ____________________ TestUserHandling.test_modification ____________________ - - self = - + + $REGENDOC_TMPDIR/b/test_error.py:1 + ======= FAILURES ======== + _______ TestUserHandling.test_modification ________ + + self = + def test_modification(self): > assert 0 E assert 0 - + test_step.py:9: AssertionError - _________________________________ test_a1 __________________________________ - - db = - + _______ test_a1 ________ + + db = + def test_a1(db): > assert 0, db # to show value - E AssertionError: + E AssertionError: E assert 0 - + a/test_db.py:2: AssertionError - _________________________________ test_a2 __________________________________ - - db = - + _______ test_a2 ________ + + db = + def test_a2(db): > assert 0, db # to show value - E AssertionError: + E AssertionError: E assert 0 - + a/test_db2.py:2: AssertionError - ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.05 seconds ========== + ======= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ======== The two test modules in the ``a`` directory see the same ``db`` fixture instance while the one test in the sister-directory ``b`` doesn't see it. We could of course @@ -534,23 +534,24 @@ case we just write some informations out to a ``failures`` file:: import pytest import os.path - @pytest.mark.tryfirst - def pytest_runtest_makereport(item, call, __multicall__): + @pytest.hookimpl(tryfirst=True, hookwrapper=True) + def pytest_runtest_makereport(item, call): # execute all other hooks to obtain the report object - rep = __multicall__.execute() + outcome = yield + rep = outcome.get_result() # we only look at actual failing test calls, not setup/teardown if rep.when == "call" and rep.failed: mode = "a" if os.path.exists("failures") else "w" with open("failures", mode) as f: # let's also access a fixture for the fun of it - if "tmpdir" in item.funcargs: + if "tmpdir" in item.fixturenames: extra = " (%s)" % item.funcargs["tmpdir"] else: extra = "" f.write(rep.nodeid + extra + "\n") - return rep + if you then have failing tests:: @@ -563,37 +564,36 @@ if you then have failing tests:: and run them:: $ py.test test_module.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py FF - - ================================= FAILURES ================================= - ________________________________ test_fail1 ________________________________ - - tmpdir = local('/tmp/pytest-22/test_fail10') - + + ======= FAILURES ======== + _______ test_fail1 ________ + + tmpdir = local('/tmp/pytest-NaN/test_fail10') + def test_fail1(tmpdir): > assert 0 E assert 0 - + test_module.py:2: AssertionError - ________________________________ test_fail2 ________________________________ - + _______ test_fail2 ________ + def test_fail2(): > assert 0 E assert 0 - + test_module.py:4: AssertionError - ========================= 2 failed in 0.02 seconds ========================= + ======= 2 failed in 0.12 seconds ======== you will have a "failures" file which contains the failing test ids:: $ cat failures - test_module.py::test_fail1 (/tmp/pytest-22/test_fail10) - test_module.py::test_fail2 + cat: failures: No such file or directory Making test result information available in fixtures ----------------------------------------------------------- @@ -607,16 +607,16 @@ here is a little example implemented via a local plugin:: import pytest - @pytest.mark.tryfirst - def pytest_runtest_makereport(item, call, __multicall__): + @pytest.hookimpl(tryfirst=True, hookwrapper=True) + def pytest_runtest_makereport(item, call): # execute all other hooks to obtain the report object - rep = __multicall__.execute() + outcome = yield + rep = outcome.get_result() # set an report attribute for each phase of a call, which can # be "setup", "call", "teardown" setattr(item, "rep_" + rep.when, rep) - return rep @pytest.fixture @@ -654,42 +654,42 @@ if you then have failing tests:: and run it:: $ py.test -s test_module.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - - test_module.py Esetting up a test failed! test_module.py::test_setup_fails - Fexecuting test failed test_module.py::test_call_fails + + test_module.py E('setting up a test failed!', 'test_module.py::test_setup_fails') + F('executing test failed', 'test_module.py::test_call_fails') F - - ================================== ERRORS ================================== - ____________________ ERROR at setup of test_setup_fails ____________________ - + + ======= ERRORS ======== + _______ ERROR at setup of test_setup_fails ________ + @pytest.fixture def other(): > assert 0 E assert 0 - + test_module.py:6: AssertionError - ================================= FAILURES ================================= - _____________________________ test_call_fails ______________________________ - + ======= FAILURES ======== + _______ test_call_fails ________ + something = None - + def test_call_fails(something): > assert 0 E assert 0 - + test_module.py:12: AssertionError - ________________________________ test_fail2 ________________________________ - + _______ test_fail2 ________ + def test_fail2(): > assert 0 E assert 0 - + test_module.py:15: AssertionError - ==================== 2 failed, 1 error in 0.02 seconds ===================== + ======= 2 failed, 1 warnings, 1 error in 0.12 seconds ======== You'll see that the fixture finalizers could use the precise reporting information. @@ -743,5 +743,4 @@ over to ``pytest`` instead. For example:: This makes it convenient to execute your tests from within your frozen application, using standard ``py.test`` command-line options:: - $ ./app_main --pytest --verbose --tb=long --junit-xml=results.xml test-suite/ - /bin/sh: 1: ./app_main: not found + ./app_main --pytest --verbose --tb=long --junit-xml=results.xml test-suite/ diff --git a/doc/en/example/special.txt b/doc/en/example/special.rst similarity index 98% rename from doc/en/example/special.txt rename to doc/en/example/special.rst index ba58a1cd7df..58e66d44e34 100644 --- a/doc/en/example/special.txt +++ b/doc/en/example/special.rst @@ -69,4 +69,4 @@ If you run this without output capturing:: .test other .test_unit1 method called . - 4 passed in 0.03 seconds + 4 passed in 0.12 seconds diff --git a/doc/en/faq.txt b/doc/en/faq.rst similarity index 100% rename from doc/en/faq.txt rename to doc/en/faq.rst diff --git a/doc/en/fixture.txt b/doc/en/fixture.rst similarity index 87% rename from doc/en/fixture.txt rename to doc/en/fixture.rst index 7ed8399de03..55b79b37a24 100644 --- a/doc/en/fixture.txt +++ b/doc/en/fixture.rst @@ -74,17 +74,17 @@ will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>` marked ``smtp`` fixture function. Running the test looks like this:: $ py.test test_smtpsimple.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-98, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_smtpsimple.py F - ================================= FAILURES ================================= - ________________________________ test_ehlo _________________________________ + ======= FAILURES ======== + _______ test_ehlo ________ - smtp = + smtp = def test_ehlo(smtp): response, msg = smtp.ehlo() @@ -93,7 +93,7 @@ marked ``smtp`` fixture function. Running the test looks like this:: E assert 0 test_smtpsimple.py:11: AssertionError - ========================= 1 failed in 1.07 seconds ========================= + ======= 1 failed in 0.12 seconds ======== In the failure traceback we see that the test function was called with a ``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture @@ -192,28 +192,29 @@ We deliberately insert failing ``assert 0`` statements in order to inspect what is going on and can now run the tests:: $ py.test test_module.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-98, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py FF - ================================= FAILURES ================================= - ________________________________ test_ehlo _________________________________ + ======= FAILURES ======== + _______ test_ehlo ________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 - > assert "merlinux" in response[1] - E TypeError: Type str doesn't support the buffer API + assert "merlinux" in response[1] + > assert 0 # for demo purposes + E assert 0 - test_module.py:5: TypeError - ________________________________ test_noop _________________________________ + test_module.py:6: AssertionError + _______ test_noop ________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -222,7 +223,7 @@ inspect what is going on and can now run the tests:: E assert 0 test_module.py:11: AssertionError - ========================= 2 failed in 0.82 seconds ========================= + ======= 2 failed in 0.12 seconds ======== You see the two ``assert 0`` failing and more importantly you can also see that the same (module-scoped) ``smtp`` object was passed into the two @@ -231,7 +232,9 @@ traceback. As a result, the two test functions using ``smtp`` run as quick as a single one because they reuse the same instance. If you decide that you rather want to have a session-scoped ``smtp`` -instance, you can simply declare it:: +instance, you can simply declare it: + +.. code-block:: python @pytest.fixture(scope="session") def smtp(...): @@ -270,7 +273,7 @@ Let's execute it:: $ py.test -s -q --tb=no FFteardown smtp - 2 failed in 1.44 seconds + 2 failed in 0.12 seconds We see that the ``smtp`` instance is finalized after the two tests finished execution. Note that if we decorated our fixture @@ -302,7 +305,7 @@ read an optional server URL from the test module which uses our fixture:: def fin(): print ("finalizing %s (%s)" % (smtp, server)) smtp.close() - + request.addfinalizer(fin) return smtp We use the ``request.module`` attribute to optionally obtain an @@ -310,8 +313,9 @@ We use the ``request.module`` attribute to optionally obtain an again, nothing much has changed:: $ py.test -s -q --tb=no - FF - 2 failed in 0.62 seconds + FFteardown smtp + + 2 failed in 0.12 seconds Let's quickly create another test module that actually sets the server URL in its module namespace:: @@ -327,11 +331,11 @@ Running it:: $ py.test -qq --tb=short test_anothersmtp.py F - ================================= FAILURES ================================= - ______________________________ test_showhelo _______________________________ + ======= FAILURES ======== + _______ test_showhelo ________ test_anothersmtp.py:5: in test_showhelo assert 0, smtp.helo() - E AssertionError: (250, b'mail.python.org') + E AssertionError: (250, 'hq.merlinux.eu') E assert 0 voila! The ``smtp`` fixture function picked up our mail server name @@ -376,21 +380,22 @@ So let's just do another run:: $ py.test -q test_module.py FFFF - ================================= FAILURES ================================= - __________________________ test_ehlo[merlinux.eu] __________________________ + ======= FAILURES ======== + _______ test_ehlo[merlinux.eu] ________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 - > assert "merlinux" in response[1] - E TypeError: Type str doesn't support the buffer API + assert "merlinux" in response[1] + > assert 0 # for demo purposes + E assert 0 - test_module.py:5: TypeError - __________________________ test_noop[merlinux.eu] __________________________ + test_module.py:6: AssertionError + _______ test_noop[merlinux.eu] ________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -399,22 +404,22 @@ So let's just do another run:: E assert 0 test_module.py:11: AssertionError - ________________________ test_ehlo[mail.python.org] ________________________ + _______ test_ehlo[mail.python.org] ________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 > assert "merlinux" in response[1] - E TypeError: Type str doesn't support the buffer API + E assert 'merlinux' in 'mail.python.org\nSIZE 51200000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' - test_module.py:5: TypeError - -------------------------- Captured stdout setup --------------------------- - finalizing - ________________________ test_noop[mail.python.org] ________________________ + test_module.py:5: AssertionError + ---------------------------- Captured stdout setup ----------------------------- + finalizing + _______ test_noop[mail.python.org] ________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -423,7 +428,7 @@ So let's just do another run:: E assert 0 test_module.py:11: AssertionError - 4 failed in 1.75 seconds + 4 failed in 0.12 seconds We see that our two test functions each ran twice, against the different ``smtp`` instances. Note also, that with the ``mail.python.org`` @@ -473,9 +478,9 @@ return ``None`` then pytest's auto-generated ID will be used. Running the above tests results in the following test IDs being used:: $ py.test --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-98, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 6 items @@ -486,7 +491,7 @@ Running the above tests results in the following test IDs being used:: - ============================= in 0.02 seconds ============================= + ======= in 0.12 seconds ======== .. _`interdependent fixtures`: @@ -519,15 +524,15 @@ Here we declare an ``app`` fixture which receives the previously defined ``smtp`` fixture and instantiates an ``App`` object with it. Let's run it:: $ py.test -v test_appsetup.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-98, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items test_appsetup.py::test_smtp_exists[merlinux.eu] PASSED test_appsetup.py::test_smtp_exists[mail.python.org] PASSED - ========================= 2 passed in 1.09 seconds ========================= + ======= 2 passed in 0.12 seconds ======== Due to the parametrization of ``smtp`` the test will run twice with two different ``App`` instances and respective smtp servers. There is no @@ -584,31 +589,31 @@ to show the setup/teardown flow:: Let's run the tests in verbose mode and with looking at the print-output:: $ py.test -v -s test_module.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-98, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items - test_module.py::test_0[1] test0 1 + test_module.py::test_0[1] (' test0', 1) PASSED - test_module.py::test_0[2] test0 2 + test_module.py::test_0[2] (' test0', 2) PASSED - test_module.py::test_1[mod1] create mod1 - test1 mod1 + test_module.py::test_1[mod1] ('create', 'mod1') + (' test1', 'mod1') PASSED - test_module.py::test_2[1-mod1] test2 1 mod1 + test_module.py::test_2[1-mod1] (' test2', 1, 'mod1') PASSED - test_module.py::test_2[2-mod1] test2 2 mod1 + test_module.py::test_2[2-mod1] (' test2', 2, 'mod1') PASSED - test_module.py::test_1[mod2] create mod2 - test1 mod2 + test_module.py::test_1[mod2] ('create', 'mod2') + (' test1', 'mod2') PASSED - test_module.py::test_2[1-mod2] test2 1 mod2 + test_module.py::test_2[1-mod2] (' test2', 1, 'mod2') PASSED - test_module.py::test_2[2-mod2] test2 2 mod2 + test_module.py::test_2[2-mod2] (' test2', 2, 'mod2') PASSED - ========================= 8 passed in 0.02 seconds ========================= + ======= 8 passed in 0.12 seconds ======== You can see that the parametrized module-scoped ``modarg`` resource caused an ordering of test execution that lead to the fewest possible "active" resources. The finalizer for the ``mod1`` parametrized resource was executed @@ -664,22 +669,27 @@ to verify our fixture is activated and the tests pass:: $ py.test -q .. - 2 passed in 0.01 seconds + 2 passed in 0.12 seconds + +You can specify multiple fixtures like this: -You can specify multiple fixtures like this:: +.. code-block:: python @pytest.mark.usefixtures("cleandir", "anotherfixture") and you may specify fixture usage at the test module level, using -a generic feature of the mark mechanism:: +a generic feature of the mark mechanism: + +.. code-block:: python pytestmark = pytest.mark.usefixtures("cleandir") Lastly you can put fixtures required by all tests in your project -into an ini-file:: +into an ini-file: - # content of pytest.ini +.. code-block:: ini + # content of pytest.ini [pytest] usefixtures = cleandir @@ -736,7 +746,7 @@ If we run it, we get two passing tests:: $ py.test -q .. - 2 passed in 0.01 seconds + 2 passed in 0.12 seconds Here is how autouse fixtures work in other scopes: @@ -744,14 +754,14 @@ Here is how autouse fixtures work in other scopes: functions automatically use it. - if an autouse fixture is defined in a conftest.py file then all tests in - all test modules belows its directory will invoke the fixture. + all test modules below its directory will invoke the fixture. - lastly, and **please use that with care**: if you define an autouse fixture in a plugin, it will be invoked for all tests in all projects where the plugin is installed. This can be useful if a fixture only anyway works in the presence of certain settings e. g. in the ini-file. Such a global fixture should always quickly determine if it should do - any work and avoid expensive imports or computation otherwise. + any work and avoid otherwise expensive imports or computation. Note that the above ``transact`` fixture may very well be a fixture that you want to make available in your project without having it generally diff --git a/doc/en/funcarg_compare.txt b/doc/en/funcarg_compare.rst similarity index 99% rename from doc/en/funcarg_compare.txt rename to doc/en/funcarg_compare.rst index e951f87cae3..4d233178759 100644 --- a/doc/en/funcarg_compare.txt +++ b/doc/en/funcarg_compare.rst @@ -205,7 +205,7 @@ fixtures: ``request.cached_setup()`` calls and allowed using other funcargs via ``request.getfuncargvalue()`` calls. These intricate APIs made it hard to do proper parametrization and implement resource caching. The - new :py:func:`pytest.fixture`` decorator allows to declare the scope + new :py:func:`pytest.fixture` decorator allows to declare the scope and let pytest figure things out for you. * if you used parametrization and funcarg factories which made use of diff --git a/doc/en/funcargs.txt b/doc/en/funcargs.rst similarity index 100% rename from doc/en/funcargs.txt rename to doc/en/funcargs.rst diff --git a/doc/en/getting-started.txt b/doc/en/getting-started.rst similarity index 84% rename from doc/en/getting-started.txt rename to doc/en/getting-started.rst index 5800bd34084..0bc0fe484c3 100644 --- a/doc/en/getting-started.txt +++ b/doc/en/getting-started.rst @@ -27,7 +27,7 @@ Installation options:: To check your installation has installed the correct version:: $ py.test --version - This is pytest version 2.7.1, imported from /tmp/sandbox/pytest/.tox/regen/lib/python3.4/site-packages/pytest.py + This is pytest version 2.8.0.dev4, imported from $PWD/pytest.pyc If you get an error checkout :ref:`installation issues`. @@ -48,15 +48,15 @@ Let's create a first test file with a simple test function:: That's it. You can execute the test function now:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-101, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_sample.py F - ================================= FAILURES ================================= - _______________________________ test_answer ________________________________ + ======= FAILURES ======== + _______ test_answer ________ def test_answer(): > assert func(3) == 5 @@ -64,7 +64,7 @@ That's it. You can execute the test function now:: E + where 4 = func(3) test_sample.py:5: AssertionError - ========================= 1 failed in 0.01 seconds ========================= + ======= 1 failed in 0.12 seconds ======== ``pytest`` found the ``test_answer`` function by following :ref:`standard test discovery rules `, basically detecting the ``test_`` prefixes. We got a failure report because our little ``func(3)`` call did not return ``5``. @@ -98,7 +98,7 @@ Running it with, this time in "quiet" reporting mode:: $ py.test -q test_sysexit.py . - 1 passed in 0.01 seconds + 1 passed in 0.12 seconds .. todo:: For further ways to assert exceptions see the `raises` @@ -125,10 +125,10 @@ run the module by passing its filename:: $ py.test -q test_class.py .F - ================================= FAILURES ================================= - ____________________________ TestClass.test_two ____________________________ + ======= FAILURES ======== + _______ TestClass.test_two ________ - self = + self = def test_two(self): x = "hello" @@ -136,7 +136,7 @@ run the module by passing its filename:: E assert hasattr('hello', 'check') test_class.py:8: AssertionError - 1 failed, 1 passed in 0.01 seconds + 1 failed, 1 passed in 0.12 seconds The first test passed, the second failed. Again we can easily see the intermediate values used in the assertion, helping us to @@ -161,10 +161,10 @@ before performing the test function call. Let's just run it:: $ py.test -q test_tmpdir.py F - ================================= FAILURES ================================= - _____________________________ test_needsfiles ______________________________ + ======= FAILURES ======== + _______ test_needsfiles ________ - tmpdir = local('/tmp/pytest-18/test_needsfiles0') + tmpdir = local('/tmp/pytest-NaN/test_needsfiles0') def test_needsfiles(tmpdir): print (tmpdir) @@ -172,9 +172,9 @@ before performing the test function call. Let's just run it:: E assert 0 test_tmpdir.py:3: AssertionError - --------------------------- Captured stdout call --------------------------- - /tmp/pytest-18/test_needsfiles0 - 1 failed in 0.05 seconds + ----------------------------- Captured stdout call ----------------------------- + /tmp/pytest-NaN/test_needsfiles0 + 1 failed in 0.12 seconds Before the test runs, a unique-per-test-invocation temporary directory was created. More info at :ref:`tmpdir handling`. diff --git a/doc/en/goodpractises.txt b/doc/en/goodpractises.rst similarity index 92% rename from doc/en/goodpractises.txt rename to doc/en/goodpractises.rst index ecb938078ee..675ac6a9f7f 100644 --- a/doc/en/goodpractises.txt +++ b/doc/en/goodpractises.rst @@ -154,8 +154,8 @@ to create a JUnitXML file that Jenkins_ can pick up and generate reports. .. _standalone: .. _`genscript method`: -Create a pytest standalone script -------------------------------------------- +(deprecated) Create a pytest standalone script +----------------------------------------------- If you are a maintainer or application developer and want people who don't deal with python much to easily run tests you may generate @@ -169,6 +169,14 @@ You can tell people to download the script and then e.g. run it like this:: python runtests.py +.. note:: + + You must have pytest and its dependencies installed as an sdist, not + as wheels because genscript need the source code for generating a + standalone script. + + + Integrating with distutils / ``python setup.py test`` -------------------------------------------------------- @@ -266,6 +274,11 @@ using the ``--pytest-args`` or ``-a`` command-line option. For example:: is equivalent to running ``py.test --durations=5``. +.. seealso:: + + For a more powerful solution, take a look at the + `pytest-runner `_ plugin. + .. _`test discovery`: .. _`Python test discovery`: @@ -274,8 +287,11 @@ Conventions for Python test discovery ``pytest`` implements the following standard test discovery: -* collection starts from the initial command line arguments - which may be directories, filenames or test ids. +* collection starts from paths specified in :confval:`testpaths` if configured, + otherwise from initial command line arguments which may be directories, + filenames or test ids. If :confval:`testpaths` is not configured and no + directories or files were given in the command line, start collection from + the current directory. * recurse into directories, unless they match :confval:`norecursedirs` * ``test_*.py`` or ``*_test.py`` files, imported by their `test package name`_. * ``Test`` prefixed test classes (without an ``__init__`` method) diff --git a/doc/en/index.txt b/doc/en/index.rst similarity index 97% rename from doc/en/index.txt rename to doc/en/index.rst index c32c62ba39a..37842f4b519 100644 --- a/doc/en/index.txt +++ b/doc/en/index.rst @@ -51,6 +51,7 @@ pytest: helps you write better programs - all collection, reporting, running aspects are delegated to hook functions - customizations can be per-directory, per-project or per PyPI released plugin - it is easy to add command line options or customize existing behaviour + - :ref:`easy to write your own plugins ` .. _`easy`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html diff --git a/doc/en/mark.txt b/doc/en/mark.rst similarity index 89% rename from doc/en/mark.txt rename to doc/en/mark.rst index 62f77726e60..ab9546d317c 100644 --- a/doc/en/mark.txt +++ b/doc/en/mark.rst @@ -20,6 +20,11 @@ It's easy to create custom markers or to apply markers to whole test classes or modules. See :ref:`mark examples` for examples which also serve as documentation. +.. note:: + + Marks can only be applied to tests, having no effect on + :ref:`fixtures `. + API reference for mark related objects ------------------------------------------------ diff --git a/doc/en/monkeypatch.txt b/doc/en/monkeypatch.rst similarity index 99% rename from doc/en/monkeypatch.txt rename to doc/en/monkeypatch.rst index fa2b250df20..4155a3a345b 100644 --- a/doc/en/monkeypatch.txt +++ b/doc/en/monkeypatch.rst @@ -44,7 +44,6 @@ If you want to prevent the "requests" library from performing http requests in all your tests, you can do:: # content of conftest.py - import pytest @pytest.fixture(autouse=True) def no_requests(monkeypatch): diff --git a/doc/en/naming20.txt b/doc/en/naming20.rst similarity index 100% rename from doc/en/naming20.txt rename to doc/en/naming20.rst diff --git a/doc/en/nose.txt b/doc/en/nose.rst similarity index 90% rename from doc/en/nose.txt rename to doc/en/nose.rst index 7acef7900a1..3b92e04cffd 100644 --- a/doc/en/nose.txt +++ b/doc/en/nose.rst @@ -39,13 +39,13 @@ Unsupported idioms / known issues it doesn't seem useful to duplicate the unittest-API like nose does. If you however rather think pytest should support the unittest-spelling on plain classes please post `to this issue - `_. + `_. - nose imports test modules with the same import path (e.g. ``tests.test_mod``) but different file system paths (e.g. ``tests/test_mode.py`` and ``other/tests/test_mode.py``) by extending sys.path/import semantics. pytest does not do that - but there is discussion in `issue268 `_ for adding some support. Note that + but there is discussion in `issue268 `_ for adding some support. Note that `nose2 choose to avoid this sys.path/import hackery `_. - nose-style doctests are not collected and executed correctly, diff --git a/doc/en/overview.txt b/doc/en/overview.rst similarity index 61% rename from doc/en/overview.txt rename to doc/en/overview.rst index 321d79d8c34..b0003effbc7 100644 --- a/doc/en/overview.txt +++ b/doc/en/overview.rst @@ -5,10 +5,10 @@ Getting started basics .. toctree:: :maxdepth: 2 - index.txt - getting-started.txt - usage.txt - goodpractises.txt - projects.txt - faq.txt + index + getting-started + usage + goodpractises + projects + faq diff --git a/doc/en/parametrize.txt b/doc/en/parametrize.rst similarity index 72% rename from doc/en/parametrize.txt rename to doc/en/parametrize.rst index b93491abced..05f18b23ba8 100644 --- a/doc/en/parametrize.txt +++ b/doc/en/parametrize.rst @@ -30,7 +30,9 @@ pytest supports test parametrization in several well-integrated ways: .. regendoc: wipe -.. versionadded:: 2.2, improved in 2.4 +.. versionadded:: 2.2 +.. versionchanged:: 2.4 + Several improvements. The builtin ``pytest.mark.parametrize`` decorator enables parametrization of arguments for a test function. Here is a typical example @@ -52,15 +54,15 @@ tuples so that the ``test_eval`` function will run three times using them in turn:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-109, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items test_expectation.py ..F - ================================= FAILURES ================================= - ____________________________ test_eval[6*9-42] _____________________________ + ======= FAILURES ======== + _______ test_eval[6*9-42] ________ input = '6*9', expected = 42 @@ -75,7 +77,7 @@ them in turn:: E + where 54 = eval('6*9') test_expectation.py:8: AssertionError - ==================== 1 failed, 2 passed in 0.02 seconds ==================== + ======= 1 failed, 2 passed in 0.12 seconds ======== As designed in this example, only one pair of input/output values fails the simple test function. And as usual with test function arguments, @@ -100,18 +102,30 @@ for example with the builtin ``mark.xfail``:: Let's run this:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-109, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items test_expectation.py ..x - =================== 2 passed, 1 xfailed in 0.02 seconds ==================== + ======= 2 passed, 1 xfailed in 0.12 seconds ======== The one parameter set which caused a failure previously now shows up as an "xfailed (expected to fail)" test. +To get all combinations of multiple parametrized arguments you can stack +``parametrize`` decorators:: + + import pytest + @pytest.mark.parametrize("x", [0, 1]) + @pytest.mark.parametrize("y", [2, 3]) + def test_foo(x, y): + pass + +This will run the test with the arguments set to x=0/y=2, x=0/y=3, x=1/y=2 and +x=1/y=3. + .. note:: In versions prior to 2.4 one needed to specify the argument @@ -159,24 +173,24 @@ If we now pass two stringinput values, our test will run twice:: $ py.test -q --stringinput="hello" --stringinput="world" test_strings.py .. - 2 passed in 0.01 seconds + 2 passed in 0.12 seconds Let's also run with a stringinput that will lead to a failing test:: $ py.test -q --stringinput="!" test_strings.py F - ================================= FAILURES ================================= - ___________________________ test_valid_string[!] ___________________________ + ======= FAILURES ======== + _______ test_valid_string[!] ________ stringinput = '!' def test_valid_string(stringinput): > assert stringinput.isalpha() - E assert () - E + where = '!'.isalpha + E assert () + E + where = '!'.isalpha test_strings.py:3: AssertionError - 1 failed in 0.01 seconds + 1 failed in 0.12 seconds As expected our test function fails. @@ -186,9 +200,9 @@ listlist:: $ py.test -q -rs test_strings.py s - ========================= short test summary info ========================== - SKIP [1] /tmp/sandbox/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:1185: got empty parameter set, function test_valid_string at /tmp/doc-exec-109/test_strings.py:1 - 1 skipped in 0.01 seconds + ======= short test summary info ======== + SKIP [1] $PWD/_pytest/python.py:1201: got empty parameter set, function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1 + 1 skipped in 0.12 seconds For further examples, you might want to look at :ref:`more parametrization examples `. @@ -199,25 +213,7 @@ The **metafunc** object ------------------------------------------- .. currentmodule:: _pytest.python +.. autoclass:: Metafunc -metafunc objects are passed to the ``pytest_generate_tests`` hook. -They help to inspect a testfunction and to generate tests -according to test configuration or values specified -in the class or module where a test function is defined: - -``metafunc.fixturenames``: set of required function arguments for given function - -``metafunc.function``: underlying python test function - -``metafunc.cls``: class object where the test function is defined in or None. - -``metafunc.module``: the module object where the test function is defined in. - -``metafunc.config``: access to command line opts and general config - -``metafunc.funcargnames``: alias for ``fixturenames``, for pre-2.3 compatibility - -.. automethod:: Metafunc.parametrize -.. automethod:: Metafunc.addcall(funcargs=None,id=_notexists,param=_notexists) - - + .. automethod:: Metafunc.parametrize + .. automethod:: Metafunc.addcall(funcargs=None,id=_notexists,param=_notexists) diff --git a/doc/en/plugins.rst b/doc/en/plugins.rst new file mode 100644 index 00000000000..48160dafaa3 --- /dev/null +++ b/doc/en/plugins.rst @@ -0,0 +1,147 @@ +.. _`external plugins`: +.. _`extplugins`: +.. _`using plugins`: + +Installing and Using plugins +============================ + +This section talks about installing and using third party plugins. +For writing your own plugins, please refer to :ref:`writing-plugins`. + +Installing a third party plugin can be easily done with ``pip``:: + + pip install pytest-NAME + pip uninstall pytest-NAME + +If a plugin is installed, ``pytest`` automatically finds and integrates it, +there is no need to activate it. We have a :doc:`page listing +all 3rd party plugins and their status against the latest py.test version +` and here is a little annotated list +for some popular plugins: + +.. _`django`: https://www.djangoproject.com/ + +* `pytest-django `_: write tests + for `django`_ apps, using pytest integration. + +* `pytest-twisted `_: write tests + for `twisted `_ apps, starting a reactor and + processing deferreds from test functions. + +* `pytest-capturelog `_: + to capture and assert about messages from the logging module + +* `pytest-cov `_: + coverage reporting, compatible with distributed testing + +* `pytest-xdist `_: + to distribute tests to CPUs and remote hosts, to run in boxed + mode which allows to survive segmentation faults, to run in + looponfailing mode, automatically re-running failing tests + on file changes, see also :ref:`xdist` + +* `pytest-instafail `_: + to report failures while the test run is happening. + +* `pytest-bdd `_ and + `pytest-konira `_ + to write tests using behaviour-driven testing. + +* `pytest-timeout `_: + to timeout tests based on function marks or global definitions. + +* `pytest-cache `_: + to interactively re-run failing tests and help other plugins to + store test run information across invocations. + +* `pytest-pep8 `_: + a ``--pep8`` option to enable PEP8 compliance checking. + +* `oejskit `_: + a plugin to run javascript unittests in life browsers + +To see a complete list of all plugins with their latest testing +status against different py.test and Python versions, please visit +`plugincompat `_. + +You may also discover more plugins through a `pytest- pypi.python.org search`_. + +.. _`available installable plugins`: +.. _`pytest- pypi.python.org search`: http://pypi.python.org/pypi?%3Aaction=search&term=pytest-&submit=search + + +Requiring/Loading plugins in a test module or conftest file +----------------------------------------------------------- + +You can require plugins in a test module or a conftest file like this:: + + pytest_plugins = "myapp.testsupport.myplugin", + +When the test module or conftest plugin is loaded the specified plugins +will be loaded as well. + + pytest_plugins = "myapp.testsupport.myplugin" + +which will import the specified module as a ``pytest`` plugin. + +.. _`findpluginname`: + +Finding out which plugins are active +------------------------------------ + +If you want to find out which plugins are active in your +environment you can type:: + + py.test --traceconfig + +and will get an extended test header which shows activated plugins +and their names. It will also print local plugins aka +:ref:`conftest.py ` files when they are loaded. + +.. _`cmdunregister`: + +Deactivating / unregistering a plugin by name +--------------------------------------------- + +You can prevent plugins from loading or unregister them:: + + py.test -p no:NAME + +This means that any subsequent try to activate/load the named +plugin will it already existing. See :ref:`findpluginname` for +how to obtain the name of a plugin. + +.. _`builtin plugins`: + +pytest default plugin reference +=============================== + + +You can find the source code for the following plugins +in the `pytest repository `_. + +.. autosummary:: + + _pytest.assertion + _pytest.capture + _pytest.config + _pytest.doctest + _pytest.genscript + _pytest.helpconfig + _pytest.junitxml + _pytest.mark + _pytest.monkeypatch + _pytest.nose + _pytest.pastebin + _pytest.pdb + _pytest.pytester + _pytest.python + _pytest.recwarn + _pytest.resultlog + _pytest.runner + _pytest.main + _pytest.skipping + _pytest.terminal + _pytest.tmpdir + _pytest.unittest + diff --git a/doc/en/plugins_index/index.rst b/doc/en/plugins_index/index.rst new file mode 100644 index 00000000000..9bd74fab828 --- /dev/null +++ b/doc/en/plugins_index/index.rst @@ -0,0 +1,290 @@ +.. _plugins_index: + +List of Third-Party Plugins +=========================== + +The table below contains a listing of plugins found in PyPI and +their status when tested when using latest py.test and python versions. + +A complete listing can also be found at +`plugincompat `_, which contains tests +status against other py.test releases. + + +============================================================================================ ===================================================================================================================== ===================================================================================================================== =========================================================================== ============================================================================================================================================= + Name Py27 Py34 Home Summary +============================================================================================ ===================================================================================================================== ===================================================================================================================== =========================================================================== ============================================================================================================================================= + `pytest-allure-adaptor `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-allure-adaptor-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-allure-adaptor-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Plugin for py.test to generate allure xml reports + :target: http://plugincompat.herokuapp.com/output/pytest-allure-adaptor-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-allure-adaptor-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/allure-framework/allure-python + `pytest-ansible `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-ansible-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-ansible-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Plugin for py.test to allow running ansible + :target: http://plugincompat.herokuapp.com/output/pytest-ansible-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-ansible-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/jlaska/pytest-ansible + `pytest-asyncio `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-asyncio-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-asyncio-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Pytest support for asyncio. + :target: http://plugincompat.herokuapp.com/output/pytest-asyncio-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-asyncio-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/pytest-dev/pytest-asyncio + `pytest-autochecklog `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-autochecklog-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-autochecklog-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png automatically check condition and log all the checks + :target: http://plugincompat.herokuapp.com/output/pytest-autochecklog-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-autochecklog-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/steven004/python-autochecklog + `pytest-bdd `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-bdd-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-bdd-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png BDD for pytest + :target: http://plugincompat.herokuapp.com/output/pytest-bdd-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-bdd-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/pytest-dev/pytest-bdd + `pytest-beakerlib `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-beakerlib-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-beakerlib-latest?py=py34&pytest=2.8.0.dev4 `link `_ A pytest plugin that reports test results to the BeakerLib framework + :target: http://plugincompat.herokuapp.com/output/pytest-beakerlib-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-beakerlib-latest?py=py34&pytest=2.8.0.dev4 + `pytest-beds `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-beds-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-beds-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Fixtures for testing Google Appengine (GAE) apps + :target: http://plugincompat.herokuapp.com/output/pytest-beds-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-beds-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/kaste/pytest-beds + `pytest-bench `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-bench-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-bench-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Benchmark utility that plugs into pytest. + :target: http://plugincompat.herokuapp.com/output/pytest-bench-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-bench-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/concordusapps/pytest-bench + `pytest-benchmark `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-benchmark-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-benchmark-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test fixture for benchmarking code + :target: http://plugincompat.herokuapp.com/output/pytest-benchmark-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-benchmark-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/ionelmc/pytest-benchmark + `pytest-blockage `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-blockage-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-blockage-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Disable network requests during a test run. + :target: http://plugincompat.herokuapp.com/output/pytest-blockage-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-blockage-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/rob-b/pytest-blockage + `pytest-bpdb `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-bpdb-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-bpdb-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png A py.test plug-in to enable drop to bpdb debugger on test failure. + :target: http://plugincompat.herokuapp.com/output/pytest-bpdb-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-bpdb-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/slafs/pytest-bpdb + `pytest-browsermob-proxy `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-browsermob-proxy-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-browsermob-proxy-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png BrowserMob proxy plugin for py.test. + :target: http://plugincompat.herokuapp.com/output/pytest-browsermob-proxy-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-browsermob-proxy-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/davehunt/pytest-browsermob-proxy + `pytest-bugzilla `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-bugzilla-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-bugzilla-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test bugzilla integration plugin + :target: http://plugincompat.herokuapp.com/output/pytest-bugzilla-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-bugzilla-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/nibrahim/pytest_bugzilla + `pytest-marker-bugzilla `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-marker-bugzilla-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-marker-bugzilla-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test bugzilla integration plugin, using markers + :target: http://plugincompat.herokuapp.com/output/pytest-marker-bugzilla-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-marker-bugzilla-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/eanxgeek/pytest_marker_bugzilla + `pytest-remove-stale-bytecode `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-remove-stale-bytecode-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-remove-stale-bytecode-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png py.test plugin to remove stale byte code files. + :target: http://plugincompat.herokuapp.com/output/pytest-remove-stale-bytecode-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-remove-stale-bytecode-latest?py=py34&pytest=2.8.0.dev4 :target: https://bitbucket.org/gocept/pytest-remove-stale-bytecode/ + `pytest-cache `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cache-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-cache-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png pytest plugin with mechanisms for caching across test runs + :target: http://plugincompat.herokuapp.com/output/pytest-cache-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-cache-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/hpk42/pytest-cache/ + `pytest-cagoule `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cagoule-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-cagoule-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Pytest plugin to only run tests affected by changes + :target: http://plugincompat.herokuapp.com/output/pytest-cagoule-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-cagoule-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/davidszotten/pytest-cagoule + `pytest-capturelog `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-capturelog-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-capturelog-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png py.test plugin to capture log messages + :target: http://plugincompat.herokuapp.com/output/pytest-capturelog-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-capturelog-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/memedough/pytest-capturelog/overview + `pytest-django-casperjs `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-django-casperjs-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-django-casperjs-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Integrate CasperJS with your django tests as a pytest fixture. + :target: http://plugincompat.herokuapp.com/output/pytest-django-casperjs-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-django-casperjs-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/EnTeQuAk/pytest-django-casperjs/ + `pytest-catchlog `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-catchlog-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-catchlog-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to catch log messages. This is a fork of pytest-capturelog. + :target: http://plugincompat.herokuapp.com/output/pytest-catchlog-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-catchlog-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/eisensheng/pytest-catchlog + `pytest-circleci `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-circleci-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-circleci-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin for CircleCI + :target: http://plugincompat.herokuapp.com/output/pytest-circleci-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-circleci-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/micktwomey/pytest-circleci + `pytest-cloud `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cloud-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-cloud-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Distributed tests planner plugin for pytest testing framework. + :target: http://plugincompat.herokuapp.com/output/pytest-cloud-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-cloud-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/pytest-dev/pytest-cloud + `pytest-codecheckers `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-codecheckers-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-codecheckers-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png pytest plugin to add source code sanity checks (pep8 and friends) + :target: http://plugincompat.herokuapp.com/output/pytest-codecheckers-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-codecheckers-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/RonnyPfannschmidt/pytest-codecheckers/ + `pytest-colordots `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-colordots-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-colordots-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Colorizes the progress indicators + :target: http://plugincompat.herokuapp.com/output/pytest-colordots-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-colordots-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/svenstaro/pytest-colordots + `pytest-paste-config `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-paste-config-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-paste-config-latest?py=py34&pytest=2.8.0.dev4 ? Allow setting the path to a paste config file + :target: http://plugincompat.herokuapp.com/output/pytest-paste-config-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-paste-config-latest?py=py34&pytest=2.8.0.dev4 + `pytest-config `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-config-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-config-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Base configurations and utilities for developing your Python project test suite with pytest. + :target: http://plugincompat.herokuapp.com/output/pytest-config-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-config-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/buzzfeed/pytest_config + `pytest-contextfixture `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-contextfixture-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-contextfixture-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Define pytest fixtures as context managers. + :target: http://plugincompat.herokuapp.com/output/pytest-contextfixture-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-contextfixture-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/pelme/pytest-contextfixture/ + `pytest-couchdbkit `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-couchdbkit-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-couchdbkit-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png py.test extension for per-test couchdb databases using couchdbkit + :target: http://plugincompat.herokuapp.com/output/pytest-couchdbkit-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-couchdbkit-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/RonnyPfannschmidt/pytest-couchdbkit + `pytest-cov `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cov-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-cov-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin for coverage reporting with support for both centralised and distributed testing, including subprocesses and multiprocessing + :target: http://plugincompat.herokuapp.com/output/pytest-cov-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-cov-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/schlamar/pytest-cov + `pytest-cover `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cover-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-cover-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Pytest plugin for measuring coverage. Forked from `pytest-cov`. + :target: http://plugincompat.herokuapp.com/output/pytest-cover-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-cover-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/ionelmc/pytest-cover + `pytest-coverage `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-coverage-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-coverage-latest?py=py34&pytest=2.8.0.dev4 `link `_ Pytest plugin for measuring coverage. Forked from `pytest-cov`. + :target: http://plugincompat.herokuapp.com/output/pytest-coverage-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-coverage-latest?py=py34&pytest=2.8.0.dev4 + `pytest-cpp `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cpp-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-cpp-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Use pytest's runner to discover and execute C++ tests + :target: http://plugincompat.herokuapp.com/output/pytest-cpp-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-cpp-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/pytest-dev/pytest-cpp + `pytest-curl-report `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-curl-report-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-curl-report-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png pytest plugin to generate curl command line report + :target: http://plugincompat.herokuapp.com/output/pytest-curl-report-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-curl-report-latest?py=py34&pytest=2.8.0.dev4 :target: https://bitbucket.org/pytest-dev/pytest-curl-report + `pytest-dbfixtures `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-dbfixtures-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-dbfixtures-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Databases fixtures plugin for py.test. + :target: http://plugincompat.herokuapp.com/output/pytest-dbfixtures-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-dbfixtures-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/ClearcodeHQ/pytest-dbfixtures + `pytest-dbus-notification `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-dbus-notification-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-dbus-notification-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png D-BUS notifications for pytest results. + :target: http://plugincompat.herokuapp.com/output/pytest-dbus-notification-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-dbus-notification-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/bmathieu33/pytest-dbus-notification + `pytest-describe `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-describe-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-describe-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Describe-style plugin for pytest + :target: http://plugincompat.herokuapp.com/output/pytest-describe-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-describe-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/ropez/pytest-describe + `pytest-diffeo `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-diffeo-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-diffeo-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Common py.test support for Diffeo packages + :target: http://plugincompat.herokuapp.com/output/pytest-diffeo-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-diffeo-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/diffeo/pytest-diffeo + `pytest-django-sqlcounts `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-django-sqlcounts-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-django-sqlcounts-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin for reporting the number of SQLs executed per django testcase. + :target: http://plugincompat.herokuapp.com/output/pytest-django-sqlcounts-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-django-sqlcounts-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/stj/pytest-django-sqlcount + `pytest-django-sqlcount `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-django-sqlcount-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-django-sqlcount-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin for reporting the number of SQLs executed per django testcase. + :target: http://plugincompat.herokuapp.com/output/pytest-django-sqlcount-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-django-sqlcount-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/stj/pytest-django-sqlcount + `pytest-django-haystack `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-django-haystack-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-django-haystack-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Cleanup your Haystack indexes between tests + :target: http://plugincompat.herokuapp.com/output/pytest-django-haystack-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-django-haystack-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/rouge8/pytest-django-haystack + `pytest-django-lite `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-django-lite-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-django-lite-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png The bare minimum to integrate py.test with Django. + :target: http://plugincompat.herokuapp.com/output/pytest-django-lite-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-django-lite-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/dcramer/pytest-django-lite + `pytest-django `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-django-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-django-latest?py=py34&pytest=2.8.0.dev4 `link `_ A Django plugin for py.test. + :target: http://plugincompat.herokuapp.com/output/pytest-django-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-django-latest?py=py34&pytest=2.8.0.dev4 + `pytest-doc `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-doc-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-doc-latest?py=py34&pytest=2.8.0.dev4 `link `_ A documentation plugin for py.test. + :target: http://plugincompat.herokuapp.com/output/pytest-doc-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-doc-latest?py=py34&pytest=2.8.0.dev4 + `pytest-dump2json `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-dump2json-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-dump2json-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png A pytest plugin for dumping test results to json. + :target: http://plugincompat.herokuapp.com/output/pytest-dump2json-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-dump2json-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/d6e/pytest-dump2json + `pytest-echo `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-echo-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-echo-latest?py=py34&pytest=2.8.0.dev4 `link `_ pytest plugin with mechanisms for echoing environment variables, package version and generic attributes + :target: http://plugincompat.herokuapp.com/output/pytest-echo-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-echo-latest?py=py34&pytest=2.8.0.dev4 + `pytest-env `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-env-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-env-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin that allows you to add environment variables. + :target: http://plugincompat.herokuapp.com/output/pytest-env-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-env-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/MobileDynasty/pytest-env + `pytest-eradicate `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-eradicate-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-eradicate-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to check for commented out code + :target: http://plugincompat.herokuapp.com/output/pytest-eradicate-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-eradicate-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/spil-johan/pytest-eradicate + `pytest-factoryboy `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-factoryboy-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-factoryboy-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Factory Boy support for pytest. + :target: http://plugincompat.herokuapp.com/output/pytest-factoryboy-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-factoryboy-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/pytest-dev/pytest-factoryboy + `pytest-poo-fail `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-poo-fail-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-poo-fail-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Visualize your failed tests with poo + :target: http://plugincompat.herokuapp.com/output/pytest-poo-fail-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-poo-fail-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/alyssa.barela/pytest-poo-fail + `pytest-faker `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-faker-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-faker-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Faker integration for pytest framework. + :target: http://plugincompat.herokuapp.com/output/pytest-faker-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-faker-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/pytest-dev/pytest-faker + `pytest-faulthandler `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-faulthandler-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-faulthandler-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin that activates the fault handler module for tests + :target: http://plugincompat.herokuapp.com/output/pytest-faulthandler-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-faulthandler-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/pytest-dev/pytest-faulthandler + `pytest-fauxfactory `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-fauxfactory-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-fauxfactory-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Integration of fauxfactory into pytest. + :target: http://plugincompat.herokuapp.com/output/pytest-fauxfactory-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-fauxfactory-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/mfalesni/pytest-fauxfactory + `pytest-figleaf `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-figleaf-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-figleaf-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png py.test figleaf coverage plugin + :target: http://plugincompat.herokuapp.com/output/pytest-figleaf-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-figleaf-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/hpk42/pytest-figleaf + `pytest-fixture-tools `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-fixture-tools-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-fixture-tools-latest?py=py34&pytest=2.8.0.dev4 ? Plugin for pytest which provides tools for fixtures + :target: http://plugincompat.herokuapp.com/output/pytest-fixture-tools-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-fixture-tools-latest?py=py34&pytest=2.8.0.dev4 + `pytest-flake8 `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-flake8-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-flake8-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to check FLAKE8 requirements + :target: http://plugincompat.herokuapp.com/output/pytest-flake8-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-flake8-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/tholo/pytest-flake8 + `pytest-flakes `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-flakes-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-flakes-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to check source code with pyflakes + :target: http://plugincompat.herokuapp.com/output/pytest-flakes-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-flakes-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/fschulze/pytest-flakes + `pytest-flask `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-flask-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-flask-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png A set of py.test fixtures to test Flask applications. + :target: http://plugincompat.herokuapp.com/output/pytest-flask-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-flask-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/vitalk/pytest-flask + `pytest-gitignore `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-gitignore-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-gitignore-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to ignore the same files as git + :target: http://plugincompat.herokuapp.com/output/pytest-gitignore-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-gitignore-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/tgs/pytest-gitignore + `pytest-greendots `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-greendots-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-greendots-latest?py=py34&pytest=2.8.0.dev4 ? Green progress dots + :target: http://plugincompat.herokuapp.com/output/pytest-greendots-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-greendots-latest?py=py34&pytest=2.8.0.dev4 + `pytest-growl `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-growl-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-growl-latest?py=py34&pytest=2.8.0.dev4 ? Growl notifications for pytest results. + :target: http://plugincompat.herokuapp.com/output/pytest-growl-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-growl-latest?py=py34&pytest=2.8.0.dev4 + `pytest-html `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-html-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-html-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin for generating HTML reports + :target: http://plugincompat.herokuapp.com/output/pytest-html-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-html-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/davehunt/pytest-html + `pytest-httpbin `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-httpbin-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-httpbin-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Easily test your HTTP library against a local copy of httpbin + :target: http://plugincompat.herokuapp.com/output/pytest-httpbin-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-httpbin-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/kevin1024/pytest-httpbin + `pytest-httpretty `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-httpretty-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-httpretty-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png A thin wrapper of HTTPretty for pytest + :target: http://plugincompat.herokuapp.com/output/pytest-httpretty-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-httpretty-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/papaeye/pytest-httpretty + `pytest-incremental `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-incremental-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-incremental-latest?py=py34&pytest=2.8.0.dev4 `link `_ an incremental test runner (pytest plugin) + :target: http://plugincompat.herokuapp.com/output/pytest-incremental-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-incremental-latest?py=py34&pytest=2.8.0.dev4 + `pytest-instafail `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-instafail-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-instafail-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to show failures instantly + :target: http://plugincompat.herokuapp.com/output/pytest-instafail-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-instafail-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/jpvanhal/pytest-instafail + `pytest-ipdb `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-ipdb-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-ipdb-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png A py.test plug-in to enable drop to ipdb debugger on test failure. + :target: http://plugincompat.herokuapp.com/output/pytest-ipdb-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-ipdb-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/mverteuil/pytest-ipdb + `pytest-ipynb `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-ipynb-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-ipynb-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Use pytest's runner to discover and execute tests as cells of IPython notebooks + :target: http://plugincompat.herokuapp.com/output/pytest-ipynb-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-ipynb-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/zonca/pytest-ipynb + `pytest-isort `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-isort-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-isort-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to perform isort checks (import ordering) + :target: http://plugincompat.herokuapp.com/output/pytest-isort-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-isort-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/moccu/pytest-isort/ + `pytest-jira `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-jira-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-jira-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test JIRA integration plugin, using markers + :target: http://plugincompat.herokuapp.com/output/pytest-jira-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-jira-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/jlaska/pytest_jira + `pytest-knows `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-knows-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-knows-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png A pytest plugin that can automaticly skip test case based on dependence info calculated by trace + :target: http://plugincompat.herokuapp.com/output/pytest-knows-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-knows-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/mapix/ptknows + `pytest-konira `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-konira-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-konira-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Run Konira DSL tests with py.test + :target: http://plugincompat.herokuapp.com/output/pytest-konira-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-konira-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/alfredodeza/pytest-konira + `pytest-localserver `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-localserver-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-localserver-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png py.test plugin to test server connections locally. + :target: http://plugincompat.herokuapp.com/output/pytest-localserver-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-localserver-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/basti/pytest-localserver/ + `pytest-markfiltration `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-markfiltration-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-markfiltration-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png UNKNOWN + :target: http://plugincompat.herokuapp.com/output/pytest-markfiltration-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-markfiltration-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/adamgoucher/pytest-markfiltration + `pytest-marks `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-marks-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-marks-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png UNKNOWN + :target: http://plugincompat.herokuapp.com/output/pytest-marks-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-marks-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/adamgoucher/pytest-marks + `pytest-mccabe `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-mccabe-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-mccabe-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to run the mccabe code complexity checker. + :target: http://plugincompat.herokuapp.com/output/pytest-mccabe-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-mccabe-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/The-Compiler/pytest-mccabe + `pytest-mock `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-mock-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-mock-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Thin-wrapper around the mock package for easier use with py.test + :target: http://plugincompat.herokuapp.com/output/pytest-mock-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-mock-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/pytest-dev/pytest-mock/ + `pytest-monkeyplus `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-monkeyplus-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-monkeyplus-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png pytest's monkeypatch subclass with extra functionalities + :target: http://plugincompat.herokuapp.com/output/pytest-monkeyplus-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-monkeyplus-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/hsoft/pytest-monkeyplus/ + `pytest-mozwebqa `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-mozwebqa-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-mozwebqa-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Mozilla WebQA plugin for py.test. + :target: http://plugincompat.herokuapp.com/output/pytest-mozwebqa-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-mozwebqa-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/mozilla/pytest-mozwebqa + `pytest-mpl `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-mpl-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-mpl-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to help with testing figures output from Matplotlib + :target: http://plugincompat.herokuapp.com/output/pytest-mpl-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-mpl-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/astrofrog/pytest-mpl + `pytest-multihost `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-multihost-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-multihost-latest?py=py34&pytest=2.8.0.dev4 `link `_ Utility for writing multi-host tests for pytest + :target: http://plugincompat.herokuapp.com/output/pytest-multihost-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-multihost-latest?py=py34&pytest=2.8.0.dev4 + `pytest-oerp `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-oerp-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-oerp-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to test OpenERP modules + :target: http://plugincompat.herokuapp.com/output/pytest-oerp-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-oerp-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/santagada/pytest-oerp/ + `pytest-oot `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-oot-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-oot-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Run object-oriented tests in a simple format + :target: http://plugincompat.herokuapp.com/output/pytest-oot-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-oot-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/steven004/pytest_oot + `pytest-optional `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-optional-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-optional-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png include/exclude values of fixtures in pytest + :target: http://plugincompat.herokuapp.com/output/pytest-optional-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-optional-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/maho/pytest-optional + `pytest-ordering `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-ordering-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-ordering-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to run your tests in a specific order + :target: http://plugincompat.herokuapp.com/output/pytest-ordering-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-ordering-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/ftobia/pytest-ordering + `pytest-osxnotify `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-osxnotify-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-osxnotify-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png OS X notifications for py.test results. + :target: http://plugincompat.herokuapp.com/output/pytest-osxnotify-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-osxnotify-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/dbader/pytest-osxnotify + `pytest-pep257 `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pep257-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-pep257-latest?py=py34&pytest=2.8.0.dev4 ? py.test plugin for pep257 + :target: http://plugincompat.herokuapp.com/output/pytest-pep257-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-pep257-latest?py=py34&pytest=2.8.0.dev4 + `pytest-pep8 `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pep8-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-pep8-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png pytest plugin to check PEP8 requirements + :target: http://plugincompat.herokuapp.com/output/pytest-pep8-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-pep8-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/hpk42/pytest-pep8/ + `pytest-pipeline `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pipeline-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-pipeline-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Pytest plugin for functional testing of data analysis pipelines + :target: http://plugincompat.herokuapp.com/output/pytest-pipeline-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-pipeline-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/bow/pytest-pipeline + `pytest-poo `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-poo-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-poo-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Visualize your crappy tests + :target: http://plugincompat.herokuapp.com/output/pytest-poo-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-poo-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/pelme/pytest-poo + `pytest-proper-wheel `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-proper-wheel-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-proper-wheel-latest?py=py34&pytest=2.8.0.dev4 `link `_ pytest: simple powerful testing with Python + :target: http://plugincompat.herokuapp.com/output/pytest-proper-wheel-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-proper-wheel-latest?py=py34&pytest=2.8.0.dev4 + `pytest-purkinje `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-purkinje-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-purkinje-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin for purkinje test runner + :target: http://plugincompat.herokuapp.com/output/pytest-purkinje-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-purkinje-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/bbiskup + `pytest-pycharm `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pycharm-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-pycharm-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Plugin for py.test to enter PyCharm debugger on uncaught exceptions + :target: http://plugincompat.herokuapp.com/output/pytest-pycharm-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-pycharm-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/jlubcke/pytest-pycharm + `pytest-pydev `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pydev-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-pydev-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png py.test plugin to connect to a remote debug server with PyDev or PyCharm. + :target: http://plugincompat.herokuapp.com/output/pytest-pydev-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-pydev-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/basti/pytest-pydev/ + `pytest-pylint `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pylint-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-pylint-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to check source code with pylint + :target: http://plugincompat.herokuapp.com/output/pytest-pylint-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-pylint-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/carsongee/pytest-pylint + `pytest-pyq `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pyq-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-pyq-latest?py=py34&pytest=2.8.0.dev4 `link `_ Pytest fixture "q" for pyq + :target: http://plugincompat.herokuapp.com/output/pytest-pyq-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-pyq-latest?py=py34&pytest=2.8.0.dev4 + `pytest-sftpserver `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to locally test sftp server connections. + :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/ulope/pytest-sftpserver/ + `pytest-rage `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-rage-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-rage-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to implement PEP712 + :target: http://plugincompat.herokuapp.com/output/pytest-rage-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-rage-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/santagada/pytest-rage/ + `pytest-smartcov `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-smartcov-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-smartcov-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Smart coverage plugin for pytest. + :target: http://plugincompat.herokuapp.com/output/pytest-smartcov-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-smartcov-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/carljm/pytest-smartcov/ + `pytest-variables `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-variables-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-variables-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin for providing variables to tests/fixtures + :target: http://plugincompat.herokuapp.com/output/pytest-variables-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-variables-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/davehunt/pytest-variables + `pytest-selenium `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-selenium-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-selenium-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png A selenium plugin for pytest + :target: http://plugincompat.herokuapp.com/output/pytest-selenium-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-selenium-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/codingjoe/pytest-selenium + `pytest-readme `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-readme-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-readme-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Test your README.md file + :target: http://plugincompat.herokuapp.com/output/pytest-readme-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-readme-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/boxed/pytest-readme + `pytest-translations `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-translations-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-translations-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Test your translation files + :target: http://plugincompat.herokuapp.com/output/pytest-translations-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-translations-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/thermondo/pytest-translations + `pytest-xprocess `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-xprocess-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-xprocess-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png pytest plugin to manage external processes across test runs + :target: http://plugincompat.herokuapp.com/output/pytest-xprocess-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-xprocess-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/hpk42/pytest-xprocess/ + `pytest-random `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-random-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-random-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to randomize tests + :target: http://plugincompat.herokuapp.com/output/pytest-random-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-random-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/klrmn/pytest-random + `pytest-sourceorder `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sourceorder-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-sourceorder-latest?py=py34&pytest=2.8.0.dev4 `link `_ Test-ordering plugin for pytest + :target: http://plugincompat.herokuapp.com/output/pytest-sourceorder-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-sourceorder-latest?py=py34&pytest=2.8.0.dev4 + `pytest-zap `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-zap-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-zap-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png OWASP ZAP plugin for py.test. + :target: http://plugincompat.herokuapp.com/output/pytest-zap-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-zap-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/davehunt/pytest-zap + `pytest-raisesregexp `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-raisesregexp-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-raisesregexp-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Simple pytest plugin to look for regex in Exceptions + :target: http://plugincompat.herokuapp.com/output/pytest-raisesregexp-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-raisesregexp-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/Walkman/pytest_raisesregexp + `pytest-trialtemp `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-trialtemp-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-trialtemp-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin for using the same _trial_temp working directory as trial + :target: http://plugincompat.herokuapp.com/output/pytest-trialtemp-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-trialtemp-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/jerith/pytest-trialtemp + `pytest-sftpserver `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to locally test sftp server connections. + :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/ulope/pytest-sftpserver/ + `pytest-rerunfailures `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-rerunfailures-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-rerunfailures-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to re-run tests to eliminate flakey failures + :target: http://plugincompat.herokuapp.com/output/pytest-rerunfailures-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-rerunfailures-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/klrmn/pytest-rerunfailures + `pytest-spec `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-spec-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-spec-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin to display test execution output like a SPECIFICATION + :target: http://plugincompat.herokuapp.com/output/pytest-spec-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-spec-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/pchomik/pytest-spec + `pytest-testmon `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-testmon-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-testmon-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png take TDD to a new level with py.test and testmon + :target: http://plugincompat.herokuapp.com/output/pytest-testmon-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-testmon-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/tarpas/pytest-testmon/ + `pytest-sftpserver `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to locally test sftp server connections. + :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/ulope/pytest-sftpserver/ + `pytest-stepwise `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-stepwise-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-stepwise-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Run a test suite one failing test at a time. + :target: http://plugincompat.herokuapp.com/output/pytest-stepwise-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-stepwise-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/nip3o/pytest-stepwise + `pytest-runfailed `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-runfailed-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-runfailed-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png implement a --failed option for pytest + :target: http://plugincompat.herokuapp.com/output/pytest-runfailed-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-runfailed-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/dmerejkowsky/pytest-runfailed + `pytest-tornado `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-tornado-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-tornado-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png A py.test plugin providing fixtures and markers to simplify testing of asynchronous tornado applications. + :target: http://plugincompat.herokuapp.com/output/pytest-tornado-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-tornado-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/eugeniy/pytest-tornado + `pytest-sftpserver `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to locally test sftp server connections. + :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/ulope/pytest-sftpserver/ + `pytest-timeout `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-timeout-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-timeout-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png py.test plugin to abort hanging tests + :target: http://plugincompat.herokuapp.com/output/pytest-timeout-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-timeout-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/flub/pytest-timeout/ + `pytest-sftpserver `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to locally test sftp server connections. + :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/ulope/pytest-sftpserver/ + `pytest-ubersmith `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-ubersmith-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-ubersmith-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Easily mock calls to ubersmith at the `requests` level. + :target: http://plugincompat.herokuapp.com/output/pytest-ubersmith-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-ubersmith-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/hivelocity/pytest-ubersmith + `pytest-services `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-services-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-services-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Services plugin for pytest testing framework + :target: http://plugincompat.herokuapp.com/output/pytest-services-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-services-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/pytest-dev/pytest-services + `pytest-pythonpath `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pythonpath-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-pythonpath-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest plugin for adding to the PYTHONPATH from command line or configs. + :target: http://plugincompat.herokuapp.com/output/pytest-pythonpath-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-pythonpath-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/bigsassy/pytest-pythonpath + `pytest-yamlwsgi `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-yamlwsgi-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-yamlwsgi-latest?py=py34&pytest=2.8.0.dev4 ? Run tests against wsgi apps defined in yaml + :target: http://plugincompat.herokuapp.com/output/pytest-yamlwsgi-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-yamlwsgi-latest?py=py34&pytest=2.8.0.dev4 + `pytest-trello `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-trello-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-trello-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Plugin for py.test that integrates trello using markers + :target: http://plugincompat.herokuapp.com/output/pytest-trello-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-trello-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/jlaska/pytest-trello + `pytest-quickcheck `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-quickcheck-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-quickcheck-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png pytest plugin to generate random data inspired by QuickCheck + :target: http://plugincompat.herokuapp.com/output/pytest-quickcheck-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-quickcheck-latest?py=py34&pytest=2.8.0.dev4 :target: https://bitbucket.org/pytest-dev/pytest-quickcheck + `pytest-twisted `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-twisted-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-twisted-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png A twisted plugin for py.test. + :target: http://plugincompat.herokuapp.com/output/pytest-twisted-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-twisted-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/schmir/pytest-twisted + `pytest-sftpserver `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test plugin to locally test sftp server connections. + :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/ulope/pytest-sftpserver/ + `pytest-watch `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-watch-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-watch-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Local continuous test runner with pytest and watchdog. + :target: http://plugincompat.herokuapp.com/output/pytest-watch-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-watch-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/joeyespo/pytest-watch + `pytest-unmarked `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-unmarked-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-unmarked-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Run only unmarked tests + :target: http://plugincompat.herokuapp.com/output/pytest-unmarked-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-unmarked-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/alyssa.barela/pytest-unmarked + `pytest-regtest `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-regtest-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-regtest-latest?py=py34&pytest=2.8.0.dev4 `link `_ py.test plugin for regression tests + :target: http://plugincompat.herokuapp.com/output/pytest-regtest-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-regtest-latest?py=py34&pytest=2.8.0.dev4 + `pytest-xdist `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-xdist-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-xdist-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png py.test xdist plugin for distributed testing and loop-on-failing modes + :target: http://plugincompat.herokuapp.com/output/pytest-xdist-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-xdist-latest?py=py34&pytest=2.8.0.dev4 :target: http://bitbucket.org/hpk42/pytest-xdist + `pytest-sugar `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sugar-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-sugar-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png py.test is a plugin for py.test that changes the default look and feel of py.test (e.g. progressbar, show tests that fail instantly). + :target: http://plugincompat.herokuapp.com/output/pytest-sugar-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-sugar-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/Frozenball/pytest-sugar + `pytest-qt `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-qt-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-qt-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png pytest support for PyQt and PySide applications + :target: http://plugincompat.herokuapp.com/output/pytest-qt-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-qt-latest?py=py34&pytest=2.8.0.dev4 :target: http://github.com/pytest-dev/pytest-qt + `pytest-runner `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-runner-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-runner-latest?py=py34&pytest=2.8.0.dev4 .. image:: bitbucket.png Invoke py.test as distutils command with dependency resolution. + :target: http://plugincompat.herokuapp.com/output/pytest-runner-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-runner-latest?py=py34&pytest=2.8.0.dev4 :target: https://bitbucket.org/pytest-dev/pytest-runner + `pytest-splinter `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-splinter-latest?py=py27&pytest=2.8.0.dev4 .. image:: http://plugincompat.herokuapp.com/status/pytest-splinter-latest?py=py34&pytest=2.8.0.dev4 .. image:: github.png Splinter plugin for pytest testing framework + :target: http://plugincompat.herokuapp.com/output/pytest-splinter-latest?py=py27&pytest=2.8.0.dev4 :target: http://plugincompat.herokuapp.com/output/pytest-splinter-latest?py=py34&pytest=2.8.0.dev4 :target: https://github.com/pytest-dev/pytest-splinter + +============================================================================================ ===================================================================================================================== ===================================================================================================================== =========================================================================== ============================================================================================================================================= + +*(Updated on 2015-06-30)* diff --git a/doc/en/plugins_index/index.txt b/doc/en/plugins_index/index.txt deleted file mode 100644 index 60539fd06c3..00000000000 --- a/doc/en/plugins_index/index.txt +++ /dev/null @@ -1,227 +0,0 @@ -.. _plugins_index: - -List of Third-Party Plugins -=========================== - -The table below contains a listing of plugins found in PyPI and -their status when tested using py.test **2.7.0** and python 2.7 and -3.3. - -A complete listing can also be found at -`plugincompat `_, which contains tests -status against other py.test releases. - - -============================================================================================ ================================================================================================================ ================================================================================================================ =========================================================================== ============================================================================================================================================= - Name Py27 Py34 Home Summary -============================================================================================ ================================================================================================================ ================================================================================================================ =========================================================================== ============================================================================================================================================= - `pytest-allure-adaptor `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-allure-adaptor-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-allure-adaptor-latest?py=py34&pytest=2.7.0 .. image:: github.png Plugin for py.test to generate allure xml reports - :target: http://plugincompat.herokuapp.com/output/pytest-allure-adaptor-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-allure-adaptor-latest?py=py34&pytest=2.7.0 :target: https://github.com/allure-framework/allure-python - `pytest-ansible `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-ansible-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-ansible-latest?py=py34&pytest=2.7.0 .. image:: github.png UNKNOWN - :target: http://plugincompat.herokuapp.com/output/pytest-ansible-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-ansible-latest?py=py34&pytest=2.7.0 :target: http://github.com/jlaska/pytest-ansible - `pytest-autochecklog `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-autochecklog-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-autochecklog-latest?py=py34&pytest=2.7.0 .. image:: github.png automatically check condition and log all the checks - :target: http://plugincompat.herokuapp.com/output/pytest-autochecklog-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-autochecklog-latest?py=py34&pytest=2.7.0 :target: https://github.com/steven004/python-autochecklog - `pytest-bdd `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-bdd-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-bdd-latest?py=py34&pytest=2.7.0 .. image:: github.png BDD for pytest - :target: http://plugincompat.herokuapp.com/output/pytest-bdd-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-bdd-latest?py=py34&pytest=2.7.0 :target: https://github.com/olegpidsadnyi/pytest-bdd - `pytest-beakerlib `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-beakerlib-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-beakerlib-latest?py=py34&pytest=2.7.0 `link `_ A pytest plugin that reports test results to the BeakerLib framework - :target: http://plugincompat.herokuapp.com/output/pytest-beakerlib-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-beakerlib-latest?py=py34&pytest=2.7.0 - `pytest-beds `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-beds-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-beds-latest?py=py34&pytest=2.7.0 .. image:: github.png Fixtures for testing Google Appengine (GAE) apps - :target: http://plugincompat.herokuapp.com/output/pytest-beds-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-beds-latest?py=py34&pytest=2.7.0 :target: https://github.com/kaste/pytest-beds - `pytest-bench `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-bench-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-bench-latest?py=py34&pytest=2.7.0 .. image:: github.png Benchmark utility that plugs into pytest. - :target: http://plugincompat.herokuapp.com/output/pytest-bench-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-bench-latest?py=py34&pytest=2.7.0 :target: http://github.com/concordusapps/pytest-bench - `pytest-benchmark `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-benchmark-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-benchmark-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test fixture for benchmarking code - :target: http://plugincompat.herokuapp.com/output/pytest-benchmark-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-benchmark-latest?py=py34&pytest=2.7.0 :target: https://github.com/ionelmc/pytest-benchmark - `pytest-blockage `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-blockage-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-blockage-latest?py=py34&pytest=2.7.0 .. image:: github.png Disable network requests during a test run. - :target: http://plugincompat.herokuapp.com/output/pytest-blockage-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-blockage-latest?py=py34&pytest=2.7.0 :target: https://github.com/rob-b/pytest-blockage - `pytest-bpdb `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-bpdb-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-bpdb-latest?py=py34&pytest=2.7.0 .. image:: github.png A py.test plug-in to enable drop to bpdb debugger on test failure. - :target: http://plugincompat.herokuapp.com/output/pytest-bpdb-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-bpdb-latest?py=py34&pytest=2.7.0 :target: https://github.com/slafs/pytest-bpdb - `pytest-browsermob-proxy `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-browsermob-proxy-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-browsermob-proxy-latest?py=py34&pytest=2.7.0 .. image:: github.png BrowserMob proxy plugin for py.test. - :target: http://plugincompat.herokuapp.com/output/pytest-browsermob-proxy-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-browsermob-proxy-latest?py=py34&pytest=2.7.0 :target: https://github.com/davehunt/pytest-browsermob-proxy - `pytest-bugzilla `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-bugzilla-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-bugzilla-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test bugzilla integration plugin - :target: http://plugincompat.herokuapp.com/output/pytest-bugzilla-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-bugzilla-latest?py=py34&pytest=2.7.0 :target: http://github.com/nibrahim/pytest_bugzilla - `pytest-cache `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cache-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-cache-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png pytest plugin with mechanisms for caching across test runs - :target: http://plugincompat.herokuapp.com/output/pytest-cache-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-cache-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/hpk42/pytest-cache/ - `pytest-cagoule `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cagoule-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-cagoule-latest?py=py34&pytest=2.7.0 .. image:: github.png Pytest plugin to only run tests affected by changes - :target: http://plugincompat.herokuapp.com/output/pytest-cagoule-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-cagoule-latest?py=py34&pytest=2.7.0 :target: https://github.com/davidszotten/pytest-cagoule - `pytest-capturelog `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-capturelog-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-capturelog-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png py.test plugin to capture log messages - :target: http://plugincompat.herokuapp.com/output/pytest-capturelog-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-capturelog-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/memedough/pytest-capturelog/overview - `pytest-catchlog `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-catchlog-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-catchlog-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test plugin to catch log messages. This is a fork of pytest-capturelog. - :target: http://plugincompat.herokuapp.com/output/pytest-catchlog-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-catchlog-latest?py=py34&pytest=2.7.0 :target: https://github.com/eisensheng/pytest-catchlog - `pytest-circleci `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-circleci-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-circleci-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test plugin for CircleCI - :target: http://plugincompat.herokuapp.com/output/pytest-circleci-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-circleci-latest?py=py34&pytest=2.7.0 :target: https://github.com/micktwomey/pytest-circleci - `pytest-cloud `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cloud-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-cloud-latest?py=py34&pytest=2.7.0 .. image:: github.png Distributed tests planner plugin for pytest testing framework. - :target: http://plugincompat.herokuapp.com/output/pytest-cloud-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-cloud-latest?py=py34&pytest=2.7.0 :target: https://github.com/pytest-dev/pytest-cloud - `pytest-codecheckers `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-codecheckers-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-codecheckers-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png pytest plugin to add source code sanity checks (pep8 and friends) - :target: http://plugincompat.herokuapp.com/output/pytest-codecheckers-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-codecheckers-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/RonnyPfannschmidt/pytest-codecheckers/ - `pytest-colordots `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-colordots-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-colordots-latest?py=py34&pytest=2.7.0 .. image:: github.png Colorizes the progress indicators - :target: http://plugincompat.herokuapp.com/output/pytest-colordots-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-colordots-latest?py=py34&pytest=2.7.0 :target: https://github.com/svenstaro/pytest-colordots - `pytest-config `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-config-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-config-latest?py=py34&pytest=2.7.0 .. image:: github.png Base configurations and utilities for developing your Python project test suite with pytest. - :target: http://plugincompat.herokuapp.com/output/pytest-config-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-config-latest?py=py34&pytest=2.7.0 :target: https://github.com/buzzfeed/pytest_config - `pytest-contextfixture `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-contextfixture-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-contextfixture-latest?py=py34&pytest=2.7.0 .. image:: github.png Define pytest fixtures as context managers. - :target: http://plugincompat.herokuapp.com/output/pytest-contextfixture-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-contextfixture-latest?py=py34&pytest=2.7.0 :target: http://github.com/pelme/pytest-contextfixture/ - `pytest-couchdbkit `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-couchdbkit-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-couchdbkit-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png py.test extension for per-test couchdb databases using couchdbkit - :target: http://plugincompat.herokuapp.com/output/pytest-couchdbkit-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-couchdbkit-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/RonnyPfannschmidt/pytest-couchdbkit - `pytest-cov `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cov-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-cov-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test plugin for coverage reporting with support for both centralised and distributed testing, including subprocesses and multiprocessing - :target: http://plugincompat.herokuapp.com/output/pytest-cov-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-cov-latest?py=py34&pytest=2.7.0 :target: https://github.com/schlamar/pytest-cov - `pytest-cpp `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-cpp-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-cpp-latest?py=py34&pytest=2.7.0 .. image:: github.png Use pytest's runner to discover and execute C++ tests - :target: http://plugincompat.herokuapp.com/output/pytest-cpp-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-cpp-latest?py=py34&pytest=2.7.0 :target: http://github.com/pytest-dev/pytest-cpp - `pytest-dbfixtures `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-dbfixtures-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-dbfixtures-latest?py=py34&pytest=2.7.0 .. image:: github.png Databases fixtures plugin for py.test. - :target: http://plugincompat.herokuapp.com/output/pytest-dbfixtures-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-dbfixtures-latest?py=py34&pytest=2.7.0 :target: https://github.com/ClearcodeHQ/pytest-dbfixtures - `pytest-dbus-notification `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-dbus-notification-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-dbus-notification-latest?py=py34&pytest=2.7.0 .. image:: github.png D-BUS notifications for pytest results. - :target: http://plugincompat.herokuapp.com/output/pytest-dbus-notification-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-dbus-notification-latest?py=py34&pytest=2.7.0 :target: https://github.com/bmathieu33/pytest-dbus-notification - `pytest-describe `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-describe-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-describe-latest?py=py34&pytest=2.7.0 .. image:: github.png Describe-style plugin for pytest - :target: http://plugincompat.herokuapp.com/output/pytest-describe-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-describe-latest?py=py34&pytest=2.7.0 :target: https://github.com/ropez/pytest-describe - `pytest-diffeo `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-diffeo-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-diffeo-latest?py=py34&pytest=2.7.0 .. image:: github.png Common py.test support for Diffeo packages - :target: http://plugincompat.herokuapp.com/output/pytest-diffeo-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-diffeo-latest?py=py34&pytest=2.7.0 :target: https://github.com/diffeo/pytest-diffeo - `pytest-django `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-django-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-django-latest?py=py34&pytest=2.7.0 `link `_ A Django plugin for py.test. - :target: http://plugincompat.herokuapp.com/output/pytest-django-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-django-latest?py=py34&pytest=2.7.0 - `pytest-django-haystack `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-django-haystack-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-django-haystack-latest?py=py34&pytest=2.7.0 .. image:: github.png Cleanup your Haystack indexes between tests - :target: http://plugincompat.herokuapp.com/output/pytest-django-haystack-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-django-haystack-latest?py=py34&pytest=2.7.0 :target: http://github.com/rouge8/pytest-django-haystack - `pytest-django-lite `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-django-lite-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-django-lite-latest?py=py34&pytest=2.7.0 .. image:: github.png The bare minimum to integrate py.test with Django. - :target: http://plugincompat.herokuapp.com/output/pytest-django-lite-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-django-lite-latest?py=py34&pytest=2.7.0 :target: https://github.com/dcramer/pytest-django-lite - `pytest-django-sqlcount `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-django-sqlcount-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-django-sqlcount-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test plugin for reporting the number of SQLs executed per django testcase. - :target: http://plugincompat.herokuapp.com/output/pytest-django-sqlcount-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-django-sqlcount-latest?py=py34&pytest=2.7.0 :target: https://github.com/stj/pytest-django-sqlcount - `pytest-echo `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-echo-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-echo-latest?py=py34&pytest=2.7.0 `link `_ pytest plugin with mechanisms for echoing environment variables, package version and generic attributes - :target: http://plugincompat.herokuapp.com/output/pytest-echo-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-echo-latest?py=py34&pytest=2.7.0 - `pytest-env `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-env-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-env-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test plugin that allows you to add environment variables. - :target: http://plugincompat.herokuapp.com/output/pytest-env-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-env-latest?py=py34&pytest=2.7.0 :target: https://github.com/MobileDynasty/pytest-env - `pytest-eradicate `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-eradicate-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-eradicate-latest?py=py34&pytest=2.7.0 .. image:: github.png pytest plugin to check for commented out code - :target: http://plugincompat.herokuapp.com/output/pytest-eradicate-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-eradicate-latest?py=py34&pytest=2.7.0 :target: https://github.com/spil-johan/pytest-eradicate - `pytest-figleaf `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-figleaf-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-figleaf-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png py.test figleaf coverage plugin - :target: http://plugincompat.herokuapp.com/output/pytest-figleaf-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-figleaf-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/hpk42/pytest-figleaf - `pytest-fixture-tools `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-fixture-tools-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-fixture-tools-latest?py=py34&pytest=2.7.0 ? Plugin for pytest which provides tools for fixtures - :target: http://plugincompat.herokuapp.com/output/pytest-fixture-tools-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-fixture-tools-latest?py=py34&pytest=2.7.0 - `pytest-flakes `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-flakes-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-flakes-latest?py=py34&pytest=2.7.0 .. image:: github.png pytest plugin to check source code with pyflakes - :target: http://plugincompat.herokuapp.com/output/pytest-flakes-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-flakes-latest?py=py34&pytest=2.7.0 :target: https://github.com/fschulze/pytest-flakes - `pytest-flask `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-flask-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-flask-latest?py=py34&pytest=2.7.0 .. image:: github.png A set of py.test fixtures to test Flask applications. - :target: http://plugincompat.herokuapp.com/output/pytest-flask-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-flask-latest?py=py34&pytest=2.7.0 :target: https://github.com/vitalk/pytest-flask - `pytest-greendots `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-greendots-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-greendots-latest?py=py34&pytest=2.7.0 ? Green progress dots - :target: http://plugincompat.herokuapp.com/output/pytest-greendots-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-greendots-latest?py=py34&pytest=2.7.0 - `pytest-growl `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-growl-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-growl-latest?py=py34&pytest=2.7.0 ? Growl notifications for pytest results. - :target: http://plugincompat.herokuapp.com/output/pytest-growl-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-growl-latest?py=py34&pytest=2.7.0 - `pytest-httpbin `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-httpbin-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-httpbin-latest?py=py34&pytest=2.7.0 .. image:: github.png Easily test your HTTP library against a local copy of httpbin - :target: http://plugincompat.herokuapp.com/output/pytest-httpbin-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-httpbin-latest?py=py34&pytest=2.7.0 :target: https://github.com/kevin1024/pytest-httpbin - `pytest-httpretty `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-httpretty-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-httpretty-latest?py=py34&pytest=2.7.0 .. image:: github.png A thin wrapper of HTTPretty for pytest - :target: http://plugincompat.herokuapp.com/output/pytest-httpretty-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-httpretty-latest?py=py34&pytest=2.7.0 :target: http://github.com/papaeye/pytest-httpretty - `pytest-incremental `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-incremental-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-incremental-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png an incremental test runner (pytest plugin) - :target: http://plugincompat.herokuapp.com/output/pytest-incremental-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-incremental-latest?py=py34&pytest=2.7.0 :target: https://bitbucket.org/schettino72/pytest-incremental - `pytest-instafail `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-instafail-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-instafail-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test plugin to show failures instantly - :target: http://plugincompat.herokuapp.com/output/pytest-instafail-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-instafail-latest?py=py34&pytest=2.7.0 :target: https://github.com/jpvanhal/pytest-instafail - `pytest-ipdb `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-ipdb-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-ipdb-latest?py=py34&pytest=2.7.0 .. image:: github.png A py.test plug-in to enable drop to ipdb debugger on test failure. - :target: http://plugincompat.herokuapp.com/output/pytest-ipdb-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-ipdb-latest?py=py34&pytest=2.7.0 :target: https://github.com/mverteuil/pytest-ipdb - `pytest-ipynb `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-ipynb-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-ipynb-latest?py=py34&pytest=2.7.0 .. image:: github.png Use pytest's runner to discover and execute tests as cells of IPython notebooks - :target: http://plugincompat.herokuapp.com/output/pytest-ipynb-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-ipynb-latest?py=py34&pytest=2.7.0 :target: http://github.com/zonca/pytest-ipynb - `pytest-jira `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-jira-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-jira-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test JIRA integration plugin, using markers - :target: http://plugincompat.herokuapp.com/output/pytest-jira-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-jira-latest?py=py34&pytest=2.7.0 :target: http://github.com/jlaska/pytest_jira - `pytest-knows `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-knows-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-knows-latest?py=py34&pytest=2.7.0 .. image:: github.png A pytest plugin that can automaticly skip test case based on dependence info calculated by trace - :target: http://plugincompat.herokuapp.com/output/pytest-knows-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-knows-latest?py=py34&pytest=2.7.0 :target: https://github.com/mapix/ptknows - `pytest-konira `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-konira-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-konira-latest?py=py34&pytest=2.7.0 .. image:: github.png Run Konira DSL tests with py.test - :target: http://plugincompat.herokuapp.com/output/pytest-konira-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-konira-latest?py=py34&pytest=2.7.0 :target: http://github.com/alfredodeza/pytest-konira - `pytest-localserver `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-localserver-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-localserver-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png py.test plugin to test server connections locally. - :target: http://plugincompat.herokuapp.com/output/pytest-localserver-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-localserver-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/basti/pytest-localserver/ - `pytest-marker-bugzilla `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-marker-bugzilla-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-marker-bugzilla-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test bugzilla integration plugin, using markers - :target: http://plugincompat.herokuapp.com/output/pytest-marker-bugzilla-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-marker-bugzilla-latest?py=py34&pytest=2.7.0 :target: http://github.com/eanxgeek/pytest_marker_bugzilla - `pytest-markfiltration `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-markfiltration-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-markfiltration-latest?py=py34&pytest=2.7.0 .. image:: github.png UNKNOWN - :target: http://plugincompat.herokuapp.com/output/pytest-markfiltration-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-markfiltration-latest?py=py34&pytest=2.7.0 :target: https://github.com/adamgoucher/pytest-markfiltration - `pytest-marks `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-marks-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-marks-latest?py=py34&pytest=2.7.0 .. image:: github.png UNKNOWN - :target: http://plugincompat.herokuapp.com/output/pytest-marks-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-marks-latest?py=py34&pytest=2.7.0 :target: https://github.com/adamgoucher/pytest-marks - `pytest-mock `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-mock-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-mock-latest?py=py34&pytest=2.7.0 .. image:: github.png Thin-wrapper around the mock package for easier use with py.test - :target: http://plugincompat.herokuapp.com/output/pytest-mock-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-mock-latest?py=py34&pytest=2.7.0 :target: https://github.com/pytest-dev/pytest-mock/ - `pytest-monkeyplus `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-monkeyplus-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-monkeyplus-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png pytest's monkeypatch subclass with extra functionalities - :target: http://plugincompat.herokuapp.com/output/pytest-monkeyplus-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-monkeyplus-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/hsoft/pytest-monkeyplus/ - `pytest-mozwebqa `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-mozwebqa-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-mozwebqa-latest?py=py34&pytest=2.7.0 .. image:: github.png Mozilla WebQA plugin for py.test. - :target: http://plugincompat.herokuapp.com/output/pytest-mozwebqa-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-mozwebqa-latest?py=py34&pytest=2.7.0 :target: https://github.com/mozilla/pytest-mozwebqa - `pytest-multihost `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-multihost-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-multihost-latest?py=py34&pytest=2.7.0 `link `_ Utility for writing multi-host tests for pytest - :target: http://plugincompat.herokuapp.com/output/pytest-multihost-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-multihost-latest?py=py34&pytest=2.7.0 - `pytest-oerp `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-oerp-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-oerp-latest?py=py34&pytest=2.7.0 .. image:: github.png pytest plugin to test OpenERP modules - :target: http://plugincompat.herokuapp.com/output/pytest-oerp-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-oerp-latest?py=py34&pytest=2.7.0 :target: http://github.com/santagada/pytest-oerp/ - `pytest-oot `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-oot-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-oot-latest?py=py34&pytest=2.7.0 `link `_ Run object-oriented tests in a simple format - :target: http://plugincompat.herokuapp.com/output/pytest-oot-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-oot-latest?py=py34&pytest=2.7.0 - `pytest-optional `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-optional-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-optional-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png include/exclude values of fixtures in pytest - :target: http://plugincompat.herokuapp.com/output/pytest-optional-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-optional-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/maho/pytest-optional - `pytest-ordering `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-ordering-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-ordering-latest?py=py34&pytest=2.7.0 .. image:: github.png pytest plugin to run your tests in a specific order - :target: http://plugincompat.herokuapp.com/output/pytest-ordering-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-ordering-latest?py=py34&pytest=2.7.0 :target: https://github.com/ftobia/pytest-ordering - `pytest-osxnotify `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-osxnotify-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-osxnotify-latest?py=py34&pytest=2.7.0 .. image:: github.png OS X notifications for py.test results. - :target: http://plugincompat.herokuapp.com/output/pytest-osxnotify-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-osxnotify-latest?py=py34&pytest=2.7.0 :target: https://github.com/dbader/pytest-osxnotify - `pytest-paste-config `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-paste-config-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-paste-config-latest?py=py34&pytest=2.7.0 ? Allow setting the path to a paste config file - :target: http://plugincompat.herokuapp.com/output/pytest-paste-config-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-paste-config-latest?py=py34&pytest=2.7.0 - `pytest-pep257 `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pep257-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-pep257-latest?py=py34&pytest=2.7.0 ? py.test plugin for pep257 - :target: http://plugincompat.herokuapp.com/output/pytest-pep257-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-pep257-latest?py=py34&pytest=2.7.0 - `pytest-pep8 `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pep8-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-pep8-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png pytest plugin to check PEP8 requirements - :target: http://plugincompat.herokuapp.com/output/pytest-pep8-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-pep8-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/hpk42/pytest-pep8/ - `pytest-pipeline `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pipeline-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-pipeline-latest?py=py34&pytest=2.7.0 .. image:: github.png Pytest plugin for functional testing of data analysis pipelines - :target: http://plugincompat.herokuapp.com/output/pytest-pipeline-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-pipeline-latest?py=py34&pytest=2.7.0 :target: https://github.com/bow/pytest_pipeline - `pytest-poo `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-poo-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-poo-latest?py=py34&pytest=2.7.0 .. image:: github.png Visualize your crappy tests - :target: http://plugincompat.herokuapp.com/output/pytest-poo-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-poo-latest?py=py34&pytest=2.7.0 :target: http://github.com/pelme/pytest-poo - `pytest-poo-fail `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-poo-fail-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-poo-fail-latest?py=py34&pytest=2.7.0 .. image:: github.png Visualize your failed tests with poo - :target: http://plugincompat.herokuapp.com/output/pytest-poo-fail-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-poo-fail-latest?py=py34&pytest=2.7.0 :target: http://github.com/alyssa.barela/pytest-poo-fail - `pytest-pycharm `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pycharm-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-pycharm-latest?py=py34&pytest=2.7.0 .. image:: github.png Plugin for py.test to enter PyCharm debugger on uncaught exceptions - :target: http://plugincompat.herokuapp.com/output/pytest-pycharm-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-pycharm-latest?py=py34&pytest=2.7.0 :target: https://github.com/jlubcke/pytest-pycharm - `pytest-pydev `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pydev-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-pydev-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png py.test plugin to connect to a remote debug server with PyDev or PyCharm. - :target: http://plugincompat.herokuapp.com/output/pytest-pydev-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-pydev-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/basti/pytest-pydev/ - `pytest-pyq `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pyq-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-pyq-latest?py=py34&pytest=2.7.0 `link `_ Pytest fixture "q" for pyq - :target: http://plugincompat.herokuapp.com/output/pytest-pyq-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-pyq-latest?py=py34&pytest=2.7.0 - `pytest-pythonpath `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-pythonpath-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-pythonpath-latest?py=py34&pytest=2.7.0 .. image:: github.png pytest plugin for adding to the PYTHONPATH from command line or configs. - :target: http://plugincompat.herokuapp.com/output/pytest-pythonpath-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-pythonpath-latest?py=py34&pytest=2.7.0 :target: https://github.com/bigsassy/pytest-pythonpath - `pytest-qt `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-qt-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-qt-latest?py=py34&pytest=2.7.0 .. image:: github.png pytest support for PyQt and PySide applications - :target: http://plugincompat.herokuapp.com/output/pytest-qt-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-qt-latest?py=py34&pytest=2.7.0 :target: http://github.com/pytest-dev/pytest-qt - `pytest-quickcheck `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-quickcheck-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-quickcheck-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png pytest plugin to generate random data inspired by QuickCheck - :target: http://plugincompat.herokuapp.com/output/pytest-quickcheck-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-quickcheck-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/t2y/pytest-quickcheck/ - `pytest-rage `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-rage-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-rage-latest?py=py34&pytest=2.7.0 .. image:: github.png pytest plugin to implement PEP712 - :target: http://plugincompat.herokuapp.com/output/pytest-rage-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-rage-latest?py=py34&pytest=2.7.0 :target: http://github.com/santagada/pytest-rage/ - `pytest-raisesregexp `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-raisesregexp-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-raisesregexp-latest?py=py34&pytest=2.7.0 .. image:: github.png Simple pytest plugin to look for regex in Exceptions - :target: http://plugincompat.herokuapp.com/output/pytest-raisesregexp-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-raisesregexp-latest?py=py34&pytest=2.7.0 :target: https://github.com/Walkman/pytest_raisesregexp - `pytest-random `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-random-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-random-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test plugin to randomize tests - :target: http://plugincompat.herokuapp.com/output/pytest-random-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-random-latest?py=py34&pytest=2.7.0 :target: https://github.com/klrmn/pytest-random - `pytest-readme `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-readme-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-readme-latest?py=py34&pytest=2.7.0 .. image:: github.png Test your README.md file - :target: http://plugincompat.herokuapp.com/output/pytest-readme-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-readme-latest?py=py34&pytest=2.7.0 :target: https://github.com/boxed/pytest-readme - `pytest-regtest `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-regtest-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-regtest-latest?py=py34&pytest=2.7.0 `link `_ py.test plugin for regression tests - :target: http://plugincompat.herokuapp.com/output/pytest-regtest-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-regtest-latest?py=py34&pytest=2.7.0 - `pytest-remove-stale-bytecode `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-remove-stale-bytecode-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-remove-stale-bytecode-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png py.test plugin to remove stale byte code files. - :target: http://plugincompat.herokuapp.com/output/pytest-remove-stale-bytecode-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-remove-stale-bytecode-latest?py=py34&pytest=2.7.0 :target: https://bitbucket.org/gocept/pytest-remove-stale-bytecode/ - `pytest-rerunfailures `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-rerunfailures-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-rerunfailures-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test plugin to re-run tests to eliminate flakey failures - :target: http://plugincompat.herokuapp.com/output/pytest-rerunfailures-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-rerunfailures-latest?py=py34&pytest=2.7.0 :target: https://github.com/klrmn/pytest-rerunfailures - `pytest-runfailed `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-runfailed-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-runfailed-latest?py=py34&pytest=2.7.0 .. image:: github.png implement a --failed option for pytest - :target: http://plugincompat.herokuapp.com/output/pytest-runfailed-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-runfailed-latest?py=py34&pytest=2.7.0 :target: http://github.com/dmerejkowsky/pytest-runfailed - `pytest-runner `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-runner-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-runner-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png Invoke py.test as distutils command with dependency resolution. - :target: http://plugincompat.herokuapp.com/output/pytest-runner-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-runner-latest?py=py34&pytest=2.7.0 :target: https://bitbucket.org/jaraco/pytest-runner - `pytest-services `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-services-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-services-latest?py=py34&pytest=2.7.0 .. image:: github.png Services plugin for pytest testing framework - :target: http://plugincompat.herokuapp.com/output/pytest-services-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-services-latest?py=py34&pytest=2.7.0 :target: https://github.com/pytest-dev/pytest-services - `pytest-sftpserver `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-sftpserver-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test plugin to locally test sftp server connections. - :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-sftpserver-latest?py=py34&pytest=2.7.0 :target: http://github.com/ulope/pytest-sftpserver/ - `pytest-smartcov `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-smartcov-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-smartcov-latest?py=py34&pytest=2.7.0 .. image:: github.png Smart coverage plugin for pytest. - :target: http://plugincompat.herokuapp.com/output/pytest-smartcov-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-smartcov-latest?py=py34&pytest=2.7.0 :target: https://github.com/carljm/pytest-smartcov/ - `pytest-sourceorder `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sourceorder-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-sourceorder-latest?py=py34&pytest=2.7.0 `link `_ Test-ordering plugin for pytest - :target: http://plugincompat.herokuapp.com/output/pytest-sourceorder-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-sourceorder-latest?py=py34&pytest=2.7.0 - `pytest-spec `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-spec-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-spec-latest?py=py34&pytest=2.7.0 .. image:: github.png pytest plugin to display test execution output like a SPECIFICATION - :target: http://plugincompat.herokuapp.com/output/pytest-spec-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-spec-latest?py=py34&pytest=2.7.0 :target: https://github.com/pchomik/pytest-spec - `pytest-splinter `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-splinter-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-splinter-latest?py=py34&pytest=2.7.0 .. image:: github.png Splinter plugin for pytest testing framework - :target: http://plugincompat.herokuapp.com/output/pytest-splinter-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-splinter-latest?py=py34&pytest=2.7.0 :target: https://github.com/pytest-dev/pytest-splinter - `pytest-stepwise `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-stepwise-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-stepwise-latest?py=py34&pytest=2.7.0 .. image:: github.png Run a test suite one failing test at a time. - :target: http://plugincompat.herokuapp.com/output/pytest-stepwise-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-stepwise-latest?py=py34&pytest=2.7.0 :target: https://github.com/nip3o/pytest-stepwise - `pytest-sugar `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-sugar-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-sugar-latest?py=py34&pytest=2.7.0 .. image:: github.png py.test is a plugin for py.test that changes the default look and feel of py.test (e.g. progressbar, show tests that fail instantly). - :target: http://plugincompat.herokuapp.com/output/pytest-sugar-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-sugar-latest?py=py34&pytest=2.7.0 :target: https://github.com/Frozenball/pytest-sugar - `pytest-timeout `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-timeout-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-timeout-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png py.test plugin to abort hanging tests - :target: http://plugincompat.herokuapp.com/output/pytest-timeout-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-timeout-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/flub/pytest-timeout/ - `pytest-tornado `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-tornado-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-tornado-latest?py=py34&pytest=2.7.0 .. image:: github.png A py.test plugin providing fixtures and markers to simplify testing of asynchronous tornado applications. - :target: http://plugincompat.herokuapp.com/output/pytest-tornado-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-tornado-latest?py=py34&pytest=2.7.0 :target: https://github.com/eugeniy/pytest-tornado - `pytest-translations `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-translations-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-translations-latest?py=py34&pytest=2.7.0 .. image:: github.png Test your translation files - :target: http://plugincompat.herokuapp.com/output/pytest-translations-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-translations-latest?py=py34&pytest=2.7.0 :target: https://github.com/thermondo/pytest-translations - `pytest-twisted `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-twisted-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-twisted-latest?py=py34&pytest=2.7.0 .. image:: github.png A twisted plugin for py.test. - :target: http://plugincompat.herokuapp.com/output/pytest-twisted-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-twisted-latest?py=py34&pytest=2.7.0 :target: https://github.com/schmir/pytest-twisted - `pytest-unmarked `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-unmarked-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-unmarked-latest?py=py34&pytest=2.7.0 .. image:: github.png Run only unmarked tests - :target: http://plugincompat.herokuapp.com/output/pytest-unmarked-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-unmarked-latest?py=py34&pytest=2.7.0 :target: http://github.com/alyssa.barela/pytest-unmarked - `pytest-watch `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-watch-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-watch-latest?py=py34&pytest=2.7.0 .. image:: github.png Local continuous test runner with pytest and watchdog. - :target: http://plugincompat.herokuapp.com/output/pytest-watch-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-watch-latest?py=py34&pytest=2.7.0 :target: http://github.com/joeyespo/pytest-watch - `pytest-xdist `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-xdist-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-xdist-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png py.test xdist plugin for distributed testing and loop-on-failing modes - :target: http://plugincompat.herokuapp.com/output/pytest-xdist-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-xdist-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/hpk42/pytest-xdist - `pytest-xprocess `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-xprocess-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-xprocess-latest?py=py34&pytest=2.7.0 .. image:: bitbucket.png pytest plugin to manage external processes across test runs - :target: http://plugincompat.herokuapp.com/output/pytest-xprocess-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-xprocess-latest?py=py34&pytest=2.7.0 :target: http://bitbucket.org/hpk42/pytest-xprocess/ - `pytest-yamlwsgi `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-yamlwsgi-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-yamlwsgi-latest?py=py34&pytest=2.7.0 ? Run tests against wsgi apps defined in yaml - :target: http://plugincompat.herokuapp.com/output/pytest-yamlwsgi-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-yamlwsgi-latest?py=py34&pytest=2.7.0 - `pytest-zap `_ .. image:: http://plugincompat.herokuapp.com/status/pytest-zap-latest?py=py27&pytest=2.7.0 .. image:: http://plugincompat.herokuapp.com/status/pytest-zap-latest?py=py34&pytest=2.7.0 .. image:: github.png OWASP ZAP plugin for py.test. - :target: http://plugincompat.herokuapp.com/output/pytest-zap-latest?py=py27&pytest=2.7.0 :target: http://plugincompat.herokuapp.com/output/pytest-zap-latest?py=py34&pytest=2.7.0 :target: https://github.com/davehunt/pytest-zap - -============================================================================================ ================================================================================================================ ================================================================================================================ =========================================================================== ============================================================================================================================================= - -*(Updated on 2015-02-28)* diff --git a/doc/en/plugins_index/plugins_index.py b/doc/en/plugins_index/plugins_index.py index 63547ca82f7..4589f2d5f35 100644 --- a/doc/en/plugins_index/plugins_index.py +++ b/doc/en/plugins_index/plugins_index.py @@ -38,15 +38,16 @@ def get_proxy(url): return ServerProxy(url) -def iter_plugins(client, search='pytest-'): +def iter_plugins(client): """ Returns an iterator of (name, version) from PyPI. :param client: ServerProxy :param search: package names to search for """ - for plug_data in client.search({'name': search}): - yield plug_data['name'], plug_data['version'] + for plug_data in client.search({'name': 'pytest'}): + if plug_data['name'].startswith('pytest-'): + yield plug_data['name'], plug_data['version'] def get_latest_versions(plugins): @@ -219,8 +220,7 @@ def get_row_limiter(char): with open(filename, 'w') as f: # header - header_text = HEADER.format(pytest_version=pytest_ver) - print(header_text, file=f) + print(HEADER, file=f) print(file=f) # table @@ -295,8 +295,7 @@ def main(argv): =========================== The table below contains a listing of plugins found in PyPI and -their status when tested using py.test **{pytest_version}** and python 2.7 and -3.3. +their status when tested when using latest py.test and python versions. A complete listing can also be found at `plugincompat `_, which contains tests diff --git a/doc/en/projects.txt b/doc/en/projects.rst similarity index 99% rename from doc/en/projects.txt rename to doc/en/projects.rst index 34a82b4c27d..76d004916ce 100644 --- a/doc/en/projects.txt +++ b/doc/en/projects.rst @@ -81,4 +81,5 @@ Some organisations using pytest * `Open End, Gothenborg `_ * `Laboratory of Bioinformatics, Warsaw `_ * `merlinux, Germany `_ +* `ESSS, Brazil `_ * many more ... (please be so kind to send a note via :ref:`contact`) diff --git a/doc/en/recwarn.rst b/doc/en/recwarn.rst new file mode 100644 index 00000000000..c2a1e65fa17 --- /dev/null +++ b/doc/en/recwarn.rst @@ -0,0 +1,116 @@ + +Asserting Warnings +===================================================== + +.. _warns: + +Asserting warnings with the warns function +----------------------------------------------- + +.. versionadded:: 2.8 + +You can check that code raises a particular warning using ``pytest.warns``, +which works in a similar manner to :ref:`raises `:: + + import warnings + import pytest + + def test_warning(): + with pytest.warns(UserWarning): + warnings.warn("my warning", UserWarning) + +The test will fail if the warning in question is not raised. + +You can also call ``pytest.warns`` on a function or code string:: + + pytest.warns(expected_warning, func, *args, **kwargs) + pytest.warns(expected_warning, "func(*args, **kwargs)") + +The function also returns a list of all raised warnings (as +``warnings.WarningMessage`` objects), which you can query for +additional information:: + + with pytest.warns(RuntimeWarning) as record: + warnings.warn("another warning", RuntimeWarning) + + # check that only one warning was raised + assert len(record) == 1 + # check that the message matches + assert record[0].message.args[0] == "another warning" + +Alternatively, you can examine raised warnings in detail using the +:ref:`recwarn ` fixture (see below). + +.. _recwarn: + +Recording warnings +------------------------ + +You can record raised warnings either using ``pytest.warns`` or with +the ``recwarn`` fixture. + +To record with ``pytest.warns`` without asserting anything about the warnings, +pass ``None`` as the expected warning type:: + + with pytest.warns(None) as record: + warnings.warn("user", UserWarning) + warnings.warn("runtime", RuntimeWarning) + + assert len(record) == 2 + assert str(record[0].message) == "user" + assert str(record[1].message) == "runtime" + +The ``recwarn`` fixture will record warnings for the whole function:: + + import warnings + + def test_hello(recwarn): + warnings.warn("hello", UserWarning) + assert len(recwarn) == 1 + w = recwarn.pop(UserWarning) + assert issubclass(w.category, UserWarning) + assert str(w.message) == "hello" + assert w.filename + assert w.lineno + +Both ``recwarn`` and ``pytest.warns`` return the same interface for recorded +warnings: a WarningsRecorder instance. To view the recorded warnings, you can +iterate over this instance, call ``len`` on it to get the number of recorded +warnings, or index into it to get a particular recorded warning. It also +provides these methods: + +.. autoclass:: _pytest.recwarn.WarningsRecorder() + :members: + +Each recorded warning has the attributes ``message``, ``category``, +``filename``, ``lineno``, ``file``, and ``line``. The ``category`` is the +class of the warning. The ``message`` is the warning itself; calling +``str(message)`` will return the actual message of the warning. + + +.. _ensuring_function_triggers: + +Ensuring a function triggers a deprecation warning +------------------------------------------------------- + +You can also call a global helper for checking +that a certain function call triggers a ``DeprecationWarning``:: + + import pytest + + def test_global(): + pytest.deprecated_call(myfunction, 17) + +By default, deprecation warnings will not be caught when using ``pytest.warns`` +or ``recwarn``, since the default Python warnings filters hide +DeprecationWarnings. If you wish to record them in your own code, use the +command ``warnings.simplefilter('always')``:: + + import warnings + import pytest + + def test_deprecation(recwarn): + warnings.simplefilter('always') + warnings.warn("deprecated", DeprecationWarning) + assert len(recwarn) == 1 + assert recwarn.pop(DeprecationWarning) diff --git a/doc/en/recwarn.txt b/doc/en/recwarn.txt deleted file mode 100644 index faa1ad761d4..00000000000 --- a/doc/en/recwarn.txt +++ /dev/null @@ -1,42 +0,0 @@ - -Asserting deprecation and other warnings -===================================================== - -.. _function_argument: - -The recwarn function argument ------------------------------------- - -You can use the ``recwarn`` funcarg to assert that code triggers -warnings through the Python warnings system. Here is a simple -self-contained test:: - - # content of test_recwarn.py - def test_hello(recwarn): - from warnings import warn - warn("hello", DeprecationWarning) - w = recwarn.pop(DeprecationWarning) - assert issubclass(w.category, DeprecationWarning) - assert 'hello' in str(w.message) - assert w.filename - assert w.lineno - -The ``recwarn`` function argument provides these methods: - -* ``pop(category=None)``: return last warning matching the category. -* ``clear()``: clear list of warnings - - -.. _ensuring_function_triggers: - -Ensuring a function triggers a deprecation warning -------------------------------------------------------- - -You can also call a global helper for checking -that a certain function call triggers a Deprecation -warning:: - - import pytest - - def test_global(): - pytest.deprecated_call(myfunction, 17) diff --git a/doc/en/release.txt b/doc/en/release.txt deleted file mode 100644 index a2c24722f69..00000000000 --- a/doc/en/release.txt +++ /dev/null @@ -1,54 +0,0 @@ -pytest release checklist -------------------------- - -For doing a release of pytest (status April 2015) this rough checklist is used: - -1. change version numbers in ``_pytest/__init__.py`` to the to-be-released version. - (the version number in ``setup.py`` reads from that init file as well) - -2. finalize ``./CHANGELOG`` (don't forget the the header). - -3. write ``doc/en/announce/release-VERSION.txt`` - (usually copying from an earlier release version). - -4. regenerate doc examples with ``tox -e regen`` and check with ``hg diff`` - if the differences show regressions. It's a bit of a manual process because - there a large part of the diff is about pytest headers or differences in - speed ("tests took X.Y seconds"). (XXX automate doc/example diffing to ignore - such changes and integrate it into "tox -e regen"). - -5. ``devpi upload`` to `your developer devpi index `_. You can create your own user and index on https://devpi.net, - an inofficial service from the devpi authors. - -6. run ``devpi use INDEX`` and ``devpi test`` from linux and windows machines - and verify test results on the index. On linux typically all environments - pass (April 2015 there is a setup problem with a cx_freeze environment) - but on windows all involving ``pexpect`` fail because pexpect does not exist - on windows and tox does not allow to have platform-specific environments. - Also on windows ``py33-trial`` fails but should probably pass (March 2015). - In any case, py26,py27,py33,py34 are required to pass for all platforms. - -7. You can fix tests/code and repeat number 6. until everything passes. - -8. Once you have sufficiently passing tox tests you can do the actual release:: - - cd doc/en/ - make install # will install to 2.7, 2.8, ... according to _pytest/__init__.py - make install-pdf # optional, requires latex packages installed - ssh pytest-dev@pytest.org # MANUAL: symlink "pytest.org/latest" to the just - # installed release docs - # browse to pytest.org to see - - devpi push pytest-VERSION pypi:NAME - hg ci -m "... finalized pytest-VERSION" - hg tag VERSION - hg push - -9. send out release announcement to pytest-dev@python.org, - testing-in-python@lists.idyll.org and python-announce-list@python.org . - -10. **after the release** bump the version number in ``_pytest/__init__.py``, - to the next Minor release version (i.e. if you released ``pytest-2.8.0``, - set it to ``pytest-2.9.0.dev1``). - -11. already done :) diff --git a/doc/en/setup.txt b/doc/en/setup.rst similarity index 100% rename from doc/en/setup.txt rename to doc/en/setup.rst diff --git a/doc/en/skipping.txt b/doc/en/skipping.rst similarity index 94% rename from doc/en/skipping.txt rename to doc/en/skipping.rst index 28ea3bd2736..77456e2de93 100644 --- a/doc/en/skipping.txt +++ b/doc/en/skipping.rst @@ -83,7 +83,7 @@ As with all function :ref:`marking ` you can skip test functions at the `whole class- or module level`_. If your code targets python2.6 or above you use the skipif decorator (and any other marker) on classes:: - @pytest.mark.skipif(sys.platform == 'win32', + @pytest.mark.skipif(sys.platform != 'win32', reason="requires windows") class TestPosixCalls: @@ -97,7 +97,7 @@ If your code targets python2.5 where class-decorators are not available, you can set the ``pytestmark`` attribute of a class:: class TestPosixCalls: - pytestmark = pytest.mark.skipif(sys.platform == 'win32', + pytestmark = pytest.mark.skipif(sys.platform != 'win32', reason="requires Windows") def test_function(self): @@ -107,10 +107,11 @@ As with the class-decorator, the ``pytestmark`` special name tells ``pytest`` to apply it to each test function in the class. If you want to skip all test functions of a module, you must use -the ``pytestmark`` name on the global level:: +the ``pytestmark`` name on the global level: - # test_module.py +.. code-block:: python + # test_module.py pytestmark = pytest.mark.skipif(...) If multiple "skipif" decorators are applied to a test function, it @@ -163,13 +164,13 @@ a simple test file with the several usages: Running it with the report-on-xfail option gives this output:: example $ py.test -rx xfail_demo.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $PWD/doc/en, inifile: pytest.ini collected 7 items xfail_demo.py xxxxxxx - ========================= short test summary info ========================== + ======= short test summary info ======== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 reason: [NOTRUN] @@ -183,7 +184,7 @@ Running it with the report-on-xfail option gives this output:: reason: reason XFAIL xfail_demo.py::test_hello7 - ======================== 7 xfailed in 0.06 seconds ========================= + ======= 7 xfailed in 0.12 seconds ======== .. _`skip/xfail with parametrize`: diff --git a/doc/en/status.txt b/doc/en/status.rst similarity index 55% rename from doc/en/status.txt rename to doc/en/status.rst index e46b7481fc1..3c7bf70eaa4 100644 --- a/doc/en/status.txt +++ b/doc/en/status.rst @@ -1,5 +1,5 @@ pytest development status ================================ -https://drone.io/bitbucket.org/pytest-dev/pytest +https://travis-ci.org/pytest-dev/pytest diff --git a/doc/en/talks.txt b/doc/en/talks.rst similarity index 100% rename from doc/en/talks.txt rename to doc/en/talks.rst diff --git a/doc/en/test/attic.txt b/doc/en/test/attic.rst similarity index 100% rename from doc/en/test/attic.txt rename to doc/en/test/attic.rst diff --git a/doc/en/test/index.txt b/doc/en/test/index.rst similarity index 100% rename from doc/en/test/index.txt rename to doc/en/test/index.rst diff --git a/doc/en/test/mission.txt b/doc/en/test/mission.rst similarity index 100% rename from doc/en/test/mission.txt rename to doc/en/test/mission.rst diff --git a/doc/en/test/plugin/cov.txt b/doc/en/test/plugin/cov.rst similarity index 100% rename from doc/en/test/plugin/cov.txt rename to doc/en/test/plugin/cov.rst diff --git a/doc/en/test/plugin/coverage.txt b/doc/en/test/plugin/coverage.rst similarity index 100% rename from doc/en/test/plugin/coverage.txt rename to doc/en/test/plugin/coverage.rst diff --git a/doc/en/test/plugin/django.txt b/doc/en/test/plugin/django.rst similarity index 100% rename from doc/en/test/plugin/django.txt rename to doc/en/test/plugin/django.rst diff --git a/doc/en/test/plugin/figleaf.txt b/doc/en/test/plugin/figleaf.rst similarity index 100% rename from doc/en/test/plugin/figleaf.txt rename to doc/en/test/plugin/figleaf.rst diff --git a/doc/en/test/plugin/genscript.txt b/doc/en/test/plugin/genscript.rst similarity index 86% rename from doc/en/test/plugin/genscript.txt rename to doc/en/test/plugin/genscript.rst index b2e1c58af20..ee80f233fa0 100644 --- a/doc/en/test/plugin/genscript.txt +++ b/doc/en/test/plugin/genscript.rst @@ -1,5 +1,5 @@ -generate standalone test script to be distributed along with an application. +(deprecated) generate standalone test script to be distributed along with an application. ============================================================================ diff --git a/doc/en/test/plugin/helpconfig.txt b/doc/en/test/plugin/helpconfig.rst similarity index 100% rename from doc/en/test/plugin/helpconfig.txt rename to doc/en/test/plugin/helpconfig.rst diff --git a/doc/en/test/plugin/index.txt b/doc/en/test/plugin/index.rst similarity index 100% rename from doc/en/test/plugin/index.txt rename to doc/en/test/plugin/index.rst diff --git a/doc/en/test/plugin/links.txt b/doc/en/test/plugin/links.rst similarity index 100% rename from doc/en/test/plugin/links.txt rename to doc/en/test/plugin/links.rst diff --git a/doc/en/test/plugin/nose.txt b/doc/en/test/plugin/nose.rst similarity index 100% rename from doc/en/test/plugin/nose.txt rename to doc/en/test/plugin/nose.rst diff --git a/doc/en/test/plugin/oejskit.txt b/doc/en/test/plugin/oejskit.rst similarity index 100% rename from doc/en/test/plugin/oejskit.txt rename to doc/en/test/plugin/oejskit.rst diff --git a/doc/en/test/plugin/terminal.txt b/doc/en/test/plugin/terminal.rst similarity index 100% rename from doc/en/test/plugin/terminal.txt rename to doc/en/test/plugin/terminal.rst diff --git a/doc/en/test/plugin/xdist.txt b/doc/en/test/plugin/xdist.rst similarity index 100% rename from doc/en/test/plugin/xdist.txt rename to doc/en/test/plugin/xdist.rst diff --git a/doc/en/tmpdir.txt b/doc/en/tmpdir.rst similarity index 55% rename from doc/en/tmpdir.txt rename to doc/en/tmpdir.rst index 4fb00ebd2f4..c396346a109 100644 --- a/doc/en/tmpdir.txt +++ b/doc/en/tmpdir.rst @@ -5,10 +5,10 @@ Temporary directories and files ================================================ -The 'tmpdir' test function argument ------------------------------------ +The 'tmpdir' fixture +-------------------- -You can use the ``tmpdir`` function argument which will +You can use the ``tmpdir`` fixture which will provide a temporary directory unique to the test invocation, created in the `base temporary directory`_. @@ -28,17 +28,17 @@ Running this would result in a passed test except for the last ``assert 0`` line which we use to look at values:: $ py.test test_tmpdir.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-118, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_tmpdir.py F - ================================= FAILURES ================================= - _____________________________ test_create_file _____________________________ + ======= FAILURES ======== + _______ test_create_file ________ - tmpdir = local('/tmp/pytest-19/test_create_file0') + tmpdir = local('/tmp/pytest-NaN/test_create_file0') def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") @@ -49,7 +49,45 @@ Running this would result in a passed test except for the last E assert 0 test_tmpdir.py:7: AssertionError - ========================= 1 failed in 0.04 seconds ========================= + ======= 1 failed in 0.12 seconds ======== + + +The 'tmpdir_factory' fixture +---------------------------- + +.. versionadded:: 2.8 + +The ``tmpdir_factory`` is a session-scoped fixture which can be used +to create arbitrary temporary directories from any other fixture or test. + +For example, suppose your test suite needs a large image on disk, which is +generated procedurally. Instead of computing the same image for each test +that uses it into its own ``tmpdir``, you can generate it once per-session +to save time: + +.. code-block:: python + + # contents of conftest.py + import pytest + + @pytest.fixture(scope='session') + def image_file(tmpdir_factory): + img = compute_expensive_image() + fn = tmpdir_factory.mktemp('data').join('img.png') + img.save(str(fn)) + return fn + + # contents of test_image.py + def test_histogram(image_file): + img = load_image(image_file) + # compute and test histogram + +``tmpdir_factory`` instances have the following methods: + +.. currentmodule:: _pytest.tmpdir + +.. automethod:: TempdirFactory.mktemp +.. automethod:: TempdirFactory.getbasetemp .. _`base temporary directory`: diff --git a/doc/en/unittest.txt b/doc/en/unittest.rst similarity index 90% rename from doc/en/unittest.txt rename to doc/en/unittest.rst index 736b23296c7..dd57ef0d24b 100644 --- a/doc/en/unittest.txt +++ b/doc/en/unittest.rst @@ -87,36 +87,36 @@ Due to the deliberately failing assert statements, we can take a look at the ``self.db`` values in the traceback:: $ py.test test_unittest_db.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-119, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_unittest_db.py FF - ================================= FAILURES ================================= - ___________________________ MyTest.test_method1 ____________________________ + ======= FAILURES ======== + _______ MyTest.test_method1 ________ self = def test_method1(self): assert hasattr(self, "db") > assert 0, self.db # fail for demo purposes - E AssertionError: .DummyDB object at 0x7f97382031d0> + E AssertionError: E assert 0 test_unittest_db.py:9: AssertionError - ___________________________ MyTest.test_method2 ____________________________ + _______ MyTest.test_method2 ________ self = def test_method2(self): > assert 0, self.db # fail for demo purposes - E AssertionError: .DummyDB object at 0x7f97382031d0> + E AssertionError: E assert 0 test_unittest_db.py:12: AssertionError - ========================= 2 failed in 0.04 seconds ========================= + ======= 2 failed in 0.12 seconds ======== This default pytest traceback shows that the two test methods share the same ``self.db`` instance which was our intention @@ -163,7 +163,7 @@ Running this test module ...:: $ py.test -q test_unittest_cleandir.py . - 1 passed in 0.25 seconds + 1 passed in 0.12 seconds ... gives us one passed test because the ``initdir`` fixture function was executed ahead of the ``test_method``. diff --git a/doc/en/usage.txt b/doc/en/usage.rst similarity index 85% rename from doc/en/usage.txt rename to doc/en/usage.rst index e774ebef667..54ad72bb175 100644 --- a/doc/en/usage.txt +++ b/doc/en/usage.rst @@ -46,12 +46,14 @@ Several test run options:: py.test test_mod.py # run tests in module py.test somepath # run all tests below somepath py.test -k stringexpr # only run tests with names that match the - # the "string expression", e.g. "MyClass and not method" + # the "string expression", e.g. "MyClass and not method" # will select TestMyClass.test_something # but not TestMyClass.test_method_simple py.test test_mod.py::test_func # only run tests that match the "node ID", # e.g "test_mod.py::test_func" will select # only test_func in test_mod.py + py.test test_mod.py::TestClass::test_method # run a single method in + # a single class Import 'pkg' and use its filesystem location to find and run tests:: @@ -87,16 +89,16 @@ failure situation:: py.test -x --pdb # drop to PDB on first failure, then end test session py.test --pdb --maxfail=3 # drop to PDB for first three failures -Note that on any failure the exception information is stored on +Note that on any failure the exception information is stored on ``sys.last_value``, ``sys.last_type`` and ``sys.last_traceback``. In interactive use, this allows one to drop into postmortem debugging with any debug tool. One can also manually access the exception information, for example:: - >> import sys - >> sys.last_traceback.tb_lineno + >>> import sys + >>> sys.last_traceback.tb_lineno 42 - >> sys.last_value + >>> sys.last_value AssertionError('assert result == "ok"',) Setting a breakpoint / aka ``set_trace()`` @@ -153,6 +155,36 @@ integration servers, use this invocation:: to create an XML file at ``path``. +record_xml_property +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 2.8 + +If you want to log additional information for a test, you can use the +``record_xml_property`` fixture: + +.. code-block:: python + + def test_function(record_xml_property): + record_xml_property("example_key", 1) + assert 0 + +This will add an extra property ``example_key="1"`` to the generated +``testcase`` tag: + +.. code-block:: xml + + + +.. warning:: + + This is an experimental feature, and its interface might be replaced + by something more powerful and general in future versions. The + functionality per-se will be kept, however. + + Also please note that using this feature will break any schema verification. + This might be a problem when used with some CI servers. + Creating resultlog format files ---------------------------------------------------- @@ -230,6 +262,6 @@ hook was invoked:: $ python myinvoke.py *** test run reporting finishing - + .. include:: links.inc diff --git a/doc/en/plugins.txt b/doc/en/writing_plugins.rst similarity index 57% rename from doc/en/plugins.txt rename to doc/en/writing_plugins.rst index 2e10417fe56..1e9807cf5ce 100644 --- a/doc/en/plugins.txt +++ b/doc/en/writing_plugins.rst @@ -1,28 +1,76 @@ .. _plugins: +.. _`writing-plugins`: -Working with plugins and conftest files -======================================= +Writing plugins +=============== -``pytest`` implements all aspects of configuration, collection, running and reporting by calling `well specified hooks`_. Virtually any Python module can be registered as a plugin. It can implement any number of hook functions (usually two or three) which all have a ``pytest_`` prefix, making hook functions easy to distinguish and find. There are three basic location types: +It is easy to implement `local conftest plugins`_ for your own project +or `pip-installable plugins`_ that can be used throughout many projects, +including third party projects. Please refer to :ref:`using plugins` if you +only want to use but not write plugins. + +A plugin contains one or multiple hook functions. :ref:`Writing hooks ` +explains the basics and details of how you can write a hook function yourself. +``pytest`` implements all aspects of configuration, collection, running and +reporting by calling `well specified hooks`_ of the following plugins: + +* :ref:`builtin plugins`: loaded from pytest's internal ``_pytest`` directory. + +* :ref:`external plugins `: modules discovered through + `setuptools entry points`_ -* `builtin plugins`_: loaded from pytest's internal ``_pytest`` directory. -* `external plugins`_: modules discovered through `setuptools entry points`_ * `conftest.py plugins`_: modules auto-discovered in test directories +In principle, each hook call is a ``1:N`` Python function call where ``N`` is the +number of registered implementation functions for a given specification. +All specifications and implementations following the ``pytest_`` prefix +naming convention, making them easy to distinguish and find. + +.. _`pluginorder`: + +Plugin discovery order at tool startup +-------------------------------------- + +``pytest`` loads plugin modules at tool startup in the following way: + +* by loading all builtin plugins + +* by loading all plugins registered through `setuptools entry points`_. + +* by pre-scanning the command line for the ``-p name`` option + and loading the specified plugin before actual command line parsing. + +* by loading all :file:`conftest.py` files as inferred by the command line + invocation: + + - if no test paths are specified use current dir as a test path + - if exists, load ``conftest.py`` and ``test*/conftest.py`` relative + to the directory part of the first test path. + + Note that pytest does not find ``conftest.py`` files in deeper nested + sub directories at tool startup. It is usually a good idea to keep + your conftest.py file in the top level test or project root directory. + +* by recursively loading all plugins specified by the + ``pytest_plugins`` variable in ``conftest.py`` files + + .. _`pytest/plugin`: http://bitbucket.org/pytest-dev/pytest/src/tip/pytest/plugin/ .. _`conftest.py plugins`: .. _`conftest.py`: .. _`localplugin`: .. _`conftest`: +.. _`local conftest plugins`: conftest.py: local per-directory plugins ---------------------------------------- -local ``conftest.py`` plugins contain directory-specific hook -implementations. Session and test running activities will +Local ``conftest.py`` plugins contain directory-specific hook +implementations. Hook Session and test running activities will invoke all hooks defined in ``conftest.py`` files closer to the -root of the filesystem. Example: Assume the following layout -and content of files:: +root of the filesystem. Example of implementing the +``pytest_runtest_setup`` hook so that is called for tests in the ``a`` +sub directory but not for other directories:: a/conftest.py: def pytest_runtest_setup(item): @@ -51,77 +99,9 @@ Here is how you might run it:: under a package scope or to never import anything from a conftest.py file. -.. _`external plugins`: -.. _`extplugins`: - -Installing External Plugins / Searching ---------------------------------------- - -Installing a plugin happens through any usual Python installation -tool, for example:: - pip install pytest-NAME - pip uninstall pytest-NAME - -If a plugin is installed, ``pytest`` automatically finds and integrates it, -there is no need to activate it. We have a :doc:`page listing -all 3rd party plugins and their status against the latest py.test version -` and here is a little annotated list -for some popular plugins: - -.. _`django`: https://www.djangoproject.com/ - -* `pytest-django `_: write tests - for `django`_ apps, using pytest integration. - -* `pytest-twisted `_: write tests - for `twisted `_ apps, starting a reactor and - processing deferreds from test functions. - -* `pytest-capturelog `_: - to capture and assert about messages from the logging module - -* `pytest-cov `_: - coverage reporting, compatible with distributed testing - -* `pytest-xdist `_: - to distribute tests to CPUs and remote hosts, to run in boxed - mode which allows to survive segmentation faults, to run in - looponfailing mode, automatically re-running failing tests - on file changes, see also :ref:`xdist` - -* `pytest-instafail `_: - to report failures while the test run is happening. - -* `pytest-bdd `_ and - `pytest-konira `_ - to write tests using behaviour-driven testing. - -* `pytest-timeout `_: - to timeout tests based on function marks or global definitions. - -* `pytest-cache `_: - to interactively re-run failing tests and help other plugins to - store test run information across invocations. - -* `pytest-pep8 `_: - a ``--pep8`` option to enable PEP8 compliance checking. - -* `oejskit `_: - a plugin to run javascript unittests in life browsers - -To see a complete list of all plugins with their latest testing -status against different py.test and Python versions, please visit -`plugincompat `_. - -You may also discover more plugins through a `pytest- pypi.python.org search`_. - -.. _`available installable plugins`: -.. _`pytest- pypi.python.org search`: http://pypi.python.org/pypi?%3Aaction=search&term=pytest-&submit=search - - -Writing a plugin by looking at examples ---------------------------------------- +Writing your own plugin +----------------------- .. _`setuptools`: http://pypi.python.org/pypi/setuptools @@ -129,17 +109,28 @@ If you want to write a plugin, there are many real-life examples you can copy from: * a custom collection example plugin: :ref:`yaml plugin` -* around 20 `builtin plugins`_ which provide pytest's own functionality -* many `external plugins`_ providing additional features +* around 20 doc:`builtin plugins` which provide pytest's own functionality +* many :ref:`external plugins ` providing additional features All of these plugins implement the documented `well specified hooks`_ to extend and add functionality. -You can also :ref:`contribute your plugin to pytest-dev` +.. note:: + Make sure to check out the excellent + `cookiecutter-pytest-plugin `_ + project, which is a `cookiecutter template `_ + for authoring plugins. + + The template provides an excellent starting point with a working plugin, + tests running with tox, comprehensive README and + entry-pointy already pre-configured. + +Also consider :ref:`contributing your plugin to pytest-dev` once it has some happy users other than yourself. .. _`setuptools entry points`: +.. _`pip-installable plugins`: Making your plugin installable by others ---------------------------------------- @@ -174,33 +165,6 @@ If a package is installed this way, ``pytest`` will load `well specified hooks`_. -.. _`pluginorder`: - -Plugin discovery order at tool startup --------------------------------------- - -``pytest`` loads plugin modules at tool startup in the following way: - -* by loading all builtin plugins - -* by loading all plugins registered through `setuptools entry points`_. - -* by pre-scanning the command line for the ``-p name`` option - and loading the specified plugin before actual command line parsing. - -* by loading all :file:`conftest.py` files as inferred by the command line - invocation: - - - if no test paths are specified use current dir as a test path - - if exists, load ``conftest.py`` and ``test*/conftest.py`` relative - to the directory part of the first test path. - - Note that pytest does not find ``conftest.py`` files in deeper nested - sub directories at tool startup. It is usually a good idea to keep - your conftest.py file in the top level test or project root directory. - -* by recursively loading all plugins specified by the - ``pytest_plugins`` variable in ``conftest.py`` files Requiring/Loading plugins in a test module or conftest file @@ -232,88 +196,237 @@ the plugin manager like this: If you want to look at the names of existing plugins, use the ``--traceconfig`` option. -.. _`findpluginname`: +Testing plugins +--------------- -Finding out which plugins are active ------------------------------------- +pytest comes with some facilities that you can enable for testing your +plugin. Given that you have an installed plugin you can enable the +:py:class:`testdir <_pytest.pytester.Testdir>` fixture via specifying a +command line option to include the pytester plugin (``-p pytester``) or +by putting ``pytest_plugins = pytester`` into your test or +``conftest.py`` file. You then will have a ``testdir`` fixure which you +can use like this:: -If you want to find out which plugins are active in your -environment you can type:: + # content of test_myplugin.py - py.test --traceconfig + pytest_plugins = pytester # to get testdir fixture -and will get an extended test header which shows activated plugins -and their names. It will also print local plugins aka -:ref:`conftest.py ` files when they are loaded. + def test_myplugin(testdir): + testdir.makepyfile(""" + def test_example(): + pass + """) + result = testdir.runpytest("--verbose") + result.fnmatch_lines(""" + test_example* + """) -.. _`cmdunregister`: +Note that by default ``testdir.runpytest()`` will perform a pytest +in-process. You can pass the command line option ``--runpytest=subprocess`` +to have it happen in a subprocess. + +Also see the :py:class:`RunResult <_pytest.pytester.RunResult>` for more +methods of the result object that you get from a call to ``runpytest``. + +.. _`writinghooks`: + +Writing hook functions +====================== + + +.. _validation: + +hook function validation and execution +-------------------------------------- + +pytest calls hook functions from registered plugins for any +given hook specification. Let's look at a typical hook function +for the ``pytest_collection_modifyitems(session, config, +items)`` hook which pytest calls after collection of all test items is +completed. + +When we implement a ``pytest_collection_modifyitems`` function in our plugin +pytest will during registration verify that you use argument +names which match the specification and bail out if not. + +Let's look at a possible implementation: + +.. code-block:: python + + def pytest_collection_modifyitems(config, items): + # called after collection is completed + # you can modify the ``items`` list + +Here, ``pytest`` will pass in ``config`` (the pytest config object) +and ``items`` (the list of collected test items) but will not pass +in the ``session`` argument because we didn't list it in the function +signature. This dynamic "pruning" of arguments allows ``pytest`` to +be "future-compatible": we can introduce new hook named parameters without +breaking the signatures of existing hook implementations. It is one of +the reasons for the general long-lived compatibility of pytest plugins. + +Note that hook functions other than ``pytest_runtest_*`` are not +allowed to raise exceptions. Doing so will break the pytest run. + + + +firstresult: stop at first non-None result +------------------------------------------- + +Most calls to ``pytest`` hooks result in a **list of results** which contains +all non-None results of the called hook functions. + +Some hook specifications use the ``firstresult=True`` option so that the hook +call only executes until the first of N registered functions returns a +non-None result which is then taken as result of the overall hook call. +The remaining hook functions will not be called in this case. + + +hookwrapper: executing around other hooks +------------------------------------------------- + +.. currentmodule:: _pytest.core + +.. versionadded:: 2.7 (experimental) + +pytest plugins can implement hook wrappers which wrap the execution +of other hook implementations. A hook wrapper is a generator function +which yields exactly once. When pytest invokes hooks it first executes +hook wrappers and passes the same arguments as to the regular hooks. + +At the yield point of the hook wrapper pytest will execute the next hook +implementations and return their result to the yield point in the form of +a :py:class:`CallOutcome` instance which encapsulates a result or +exception info. The yield point itself will thus typically not raise +exceptions (unless there are bugs). + +Here is an example definition of a hook wrapper:: + + import pytest + + @pytest.hookimpl(hookwrapper=True) + def pytest_pyfunc_call(pyfuncitem): + # do whatever you want before the next hook executes + + outcome = yield + # outcome.excinfo may be None or a (cls, val, tb) tuple + + res = outcome.get_result() # will raise if outcome was exception + # postprocess result + +Note that hook wrappers don't return results themselves, they merely +perform tracing or other side effects around the actual hook implementations. +If the result of the underlying hook is a mutable object, they may modify +that result but it's probably better to avoid it. -Deactivating / unregistering a plugin by name ---------------------------------------------- -You can prevent plugins from loading or unregister them:: +Hook function ordering / call example +------------------------------------- + +For any given hook specification there may be more than one +implementation and we thus generally view ``hook`` execution as a +``1:N`` function call where ``N`` is the number of registered functions. +There are ways to influence if a hook implementation comes before or +after others, i.e. the position in the ``N``-sized list of functions: + +.. code-block:: python + + # Plugin 1 + @pytest.hookimpl_spec(tryfirst=True) + def pytest_collection_modifyitems(items): + # will execute as early as possible + + # Plugin 2 + @pytest.hookimpl_spec(trylast=True) + def pytest_collection_modifyitems(items): + # will execute as late as possible + + # Plugin 3 + @pytest.hookimpl_spec(hookwrapper=True) + def pytest_collection_modifyitems(items): + # will execute even before the tryfirst one above! + outcome = yield + # will execute after all non-hookwrappers executed + +Here is the order of execution: + +1. Plugin3's pytest_collection_modifyitems called until the yield point + because it is a hook wrapper. - py.test -p no:NAME +2. Plugin1's pytest_collection_modifyitems is called because it is marked + with ``tryfirst=True``. + +3. Plugin2's pytest_collection_modifyitems is called because it is marked + with ``trylast=True`` (but even without this mark it would come after + Plugin1). + +4. Plugin3's pytest_collection_modifyitems then executing the code after the yield + point. The yield receives a :py:class:`CallOutcome` instance which encapsulates + the result from calling the non-wrappers. Wrappers shall not modify the result. + +It's possible to use ``tryfirst`` and ``trylast`` also in conjunction with +``hookwrapper=True`` in which case it will influence the ordering of hookwrappers +among each other. + + +Declaring new hooks +------------------------ + +.. currentmodule:: _pytest.hookspec + +Plugins and ``conftest.py`` files may declare new hooks that can then be +implemented by other plugins in order to alter behaviour or interact with +the new plugin: + +.. autofunction:: pytest_addhooks + +Hooks are usually declared as do-nothing functions that contain only +documentation describing when the hook will be called and what return values +are expected. + +For an example, see `newhooks.py`_ from :ref:`xdist`. + +.. _`newhooks.py`: https://bitbucket.org/pytest-dev/pytest-xdist/src/52082f70e7dd04b00361091b8af906c60fd6700f/xdist/newhooks.py?at=default + + +Optionally using hooks from 3rd party plugins +--------------------------------------------- -This means that any subsequent try to activate/load the named -plugin will it already existing. See :ref:`findpluginname` for -how to obtain the name of a plugin. +Using new hooks from plugins as explained above might be a little tricky +because of the standard :ref:`validation mechanism `: +if you depend on a plugin that is not installed, validation will fail and +the error message will not make much sense to your users. -.. _`builtin plugins`: +One approach is to defer the hook implementation to a new plugin instead of +declaring the hook functions directly in your plugin module, for example:: -pytest default plugin reference -=============================== + # contents of myplugin.py + class DeferPlugin(object): + """Simple plugin to defer pytest-xdist hook functions.""" -You can find the source code for the following plugins -in the `pytest repository `_. + def pytest_testnodedown(self, node, error): + """standard xdist hook function. + """ -.. autosummary:: + def pytest_configure(config): + if config.pluginmanager.hasplugin('xdist'): + config.pluginmanager.register(DeferPlugin()) - _pytest.assertion - _pytest.capture - _pytest.config - _pytest.doctest - _pytest.genscript - _pytest.helpconfig - _pytest.junitxml - _pytest.mark - _pytest.monkeypatch - _pytest.nose - _pytest.pastebin - _pytest.pdb - _pytest.pytester - _pytest.python - _pytest.recwarn - _pytest.resultlog - _pytest.runner - _pytest.main - _pytest.skipping - _pytest.terminal - _pytest.tmpdir - _pytest.unittest +This has the added benefit of allowing you to conditionally install hooks +depending on which plugins are installed. .. _`well specified hooks`: +.. currentmodule:: _pytest.hookspec + pytest hook reference ===================== -Hook specification and validation ---------------------------------- - -``pytest`` calls hook functions to implement initialization, running, -test execution and reporting. When ``pytest`` loads a plugin it validates -that each hook function conforms to its respective hook specification. -Each hook function name and its argument names need to match a hook -specification. However, a hook function may accept *fewer* parameters -by simply not specifying them. If you mistype argument names or the -hook name itself you get an error showing the available arguments. Initialization, command line and configuration hooks ---------------------------------------------------- -.. currentmodule:: _pytest.hookspec - .. autofunction:: pytest_load_initial_conftests .. autofunction:: pytest_cmdline_preparse .. autofunction:: pytest_cmdline_parse @@ -390,87 +503,6 @@ reporting or interaction with exceptions: .. autofunction:: pytest_exception_interact -Declaring new hooks ------------------------- - -Plugins and ``conftest.py`` files may declare new hooks that can then be -implemented by other plugins in order to alter behaviour or interact with -the new plugin: - -.. autofunction:: pytest_addhooks - -Hooks are usually declared as do-nothing functions that contain only -documentation describing when the hook will be called and what return values -are expected. - -For an example, see `newhooks.py`_ from :ref:`xdist`. - -.. _`newhooks.py`: https://bitbucket.org/pytest-dev/pytest-xdist/src/52082f70e7dd04b00361091b8af906c60fd6700f/xdist/newhooks.py?at=default - - -Using hooks from 3rd party plugins -------------------------------------- - -Using new hooks from plugins as explained above might be a little tricky -because the standard `Hook specification and validation`_ mechanism: -if you depend on a plugin that is not installed, -validation will fail and the error message will not make much sense to your users. - -One approach is to defer the hook implementation to a new plugin instead of -declaring the hook functions directly in your plugin module, for example:: - - # contents of myplugin.py - - class DeferPlugin(object): - """Simple plugin to defer pytest-xdist hook functions.""" - - def pytest_testnodedown(self, node, error): - """standard xdist hook function. - """ - - def pytest_configure(config): - if config.pluginmanager.hasplugin('xdist'): - config.pluginmanager.register(DeferPlugin()) - - -This has the added benefit of allowing you to conditionally install hooks -depending on which plugins are installed. - -hookwrapper: executing around other hooks -------------------------------------------------- - -.. currentmodule:: _pytest.core - -.. versionadded:: 2.7 (experimental) - -pytest plugins can implement hook wrappers which which wrap the execution -of other hook implementations. A hook wrapper is a generator function -which yields exactly once. When pytest invokes hooks it first executes -hook wrappers and passes the same arguments as to the regular hooks. - -At the yield point of the hook wrapper pytest will execute the next hook -implementations and return their result to the yield point in the form of -a :py:class:`CallOutcome` instance which encapsulates a result or -exception info. The yield point itself will thus typically not raise -exceptions (unless there are bugs). - -Here is an example definition of a hook wrapper:: - - import pytest - - @pytest.mark.hookwrapper - def pytest_pyfunc_call(pyfuncitem): - # do whatever you want before the next hook executes - outcome = yield - # outcome.excinfo may be None or a (cls, val, tb) tuple - res = outcome.get_result() # will raise if outcome was exception - # postprocess result - -Note that hook wrappers don't return results themselves, they merely -perform tracing or other side effects around the actual hook implementations. -If the result of the underlying hook is a mutable object, they may modify -that result, however. - Reference of objects involved in hooks ====================================== @@ -513,3 +545,23 @@ Reference of objects involved in hooks .. autoclass:: _pytest.core.CallOutcome() :members: +.. autofunction:: _pytest.config.get_plugin_manager() + +.. autoclass:: _pytest.config.PytestPluginManager() + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pluggy.PluginManager() + :members: + +.. currentmodule:: _pytest.pytester + +.. autoclass:: Testdir() + :members: runpytest,runpytest_subprocess,runpytest_inprocess,makeconftest,makepyfile + +.. autoclass:: RunResult() + :members: + +.. autoclass:: LineMatcher() + :members: diff --git a/doc/en/xdist.txt b/doc/en/xdist.rst similarity index 100% rename from doc/en/xdist.txt rename to doc/en/xdist.rst diff --git a/doc/en/xunit_setup.txt b/doc/en/xunit_setup.rst similarity index 100% rename from doc/en/xunit_setup.txt rename to doc/en/xunit_setup.rst diff --git a/doc/en/yieldfixture.txt b/doc/en/yieldfixture.rst similarity index 52% rename from doc/en/yieldfixture.txt rename to doc/en/yieldfixture.rst index 34bc93906a3..ee88a27df59 100644 --- a/doc/en/yieldfixture.txt +++ b/doc/en/yieldfixture.rst @@ -9,17 +9,9 @@ Fixture functions using "yield" / context manager integration pytest-2.4 allows fixture functions to seamlessly use a ``yield`` instead of a ``return`` statement to provide a fixture value while otherwise -fully supporting all other fixture features. +fully supporting all other fixture features. -.. note:: - - "yielding" fixture values is an experimental feature and its exact - declaration may change later but earliest in a 2.5 release. You can thus - safely use this feature in the 2.4 series but may need to adapt later. - Test functions themselves will not need to change (as a general - feature, they are ignorant of how fixtures are setup). - -Let's look at a simple standalone-example using the new ``yield`` syntax:: +Let's look at a simple standalone-example using the ``yield`` syntax:: # content of test_yield.py @@ -51,7 +43,7 @@ Let's run it with output capturing disabled:: test called .teardown after yield - 1 passed in 0.01 seconds + 1 passed in 0.12 seconds We can also seamlessly use the new syntax with ``with`` statements. Let's simplify the above ``passwd`` fixture:: @@ -72,24 +64,25 @@ The file ``f`` will be closed after the test finished execution because the Python ``file`` object supports finalization when the ``with`` statement ends. -Note that the new syntax is fully integrated with using ``scope``, -``params`` and other fixture features. Changing existing -fixture functions to use ``yield`` is thus straight forward. +Note that the yield fixture form supports all other fixture +features such as ``scope``, ``params``, etc., thus changing existing +fixture functions to use ``yield`` is straightforward. -Discussion and future considerations / feedback -++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. note:: -The yield-syntax has been discussed by pytest users extensively. -In general, the advantages of the using a ``yield`` fixture syntax are: + While the ``yield`` syntax is similar to what + :py:func:`contextlib.contextmanager` decorated functions + provide, with pytest fixture functions the part after the + "yield" will always be invoked, independently from the + exception status of the test function which uses the fixture. + This behaviour makes sense if you consider that many different + test functions might use a module or session scoped fixture. -- easy provision of fixtures in conjunction with context managers. -- no need to register a callback, providing for more synchronous - control flow in the fixture function. Also there is no need to accept - the ``request`` object into the fixture function just for providing - finalization code. +Discussion and future considerations / feedback +++++++++++++++++++++++++++++++++++++++++++++++++++++ -However, there are also limitations or foreseeable irritations: +There are some topics that are worth mentioning: - usually ``yield`` is used for producing multiple values. But fixture functions can only yield exactly one value. @@ -100,24 +93,8 @@ However, there are also limitations or foreseeable irritations: :ref:`fixture parametrization ` mechanisms together with ``yield``-style fixtures. -- the ``yield`` syntax is similar to what - :py:func:`contextlib.contextmanager` decorated functions - provide. With pytest fixture functions, the "after yield" part will - always be invoked, independently from the exception status - of the test function which uses the fixture. The pytest - behaviour makes sense if you consider that many different - test functions might use a module or session scoped fixture. - Some test functions might raise exceptions and others not, - so how could pytest re-raise a single exception at the - ``yield`` point in the fixture function? - - lastly ``yield`` introduces more than one way to write fixture functions, so what's the obvious way to a newcomer? - Newcomers reading the docs will see feature examples using the - ``return`` style so should use that, if in doubt. - Others can start experimenting with writing yield-style fixtures - and possibly help evolving them further. - -If you want to feedback or participate in the ongoing -discussion, please join our :ref:`contact channels`. -you are most welcome. + +If you want to feedback or participate in discussion of the above +topics, please join our :ref:`contact channels`, you are most welcome. diff --git a/pytest.py b/pytest.py index 6c25c61951c..161c4482290 100644 --- a/pytest.py +++ b/pytest.py @@ -11,8 +11,11 @@ # else we are imported -from _pytest.config import main, UsageError, _preloadplugins, cmdline -from _pytest import __version__ +from _pytest.config import ( + main, UsageError, _preloadplugins, cmdline, + hookspec, hookimpl +) +from _pytest import version as __version__ _preloadplugins() # to populate pytest.* namespace so help(pytest) works diff --git a/requirements-docs.txt b/requirements-docs.txt index a17bba710b7..be3a232e57b 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -1,2 +1,3 @@ sphinx==1.2.3 -hg+ssh://hg@bitbucket.org/pytest-dev/regendoc#egg=regendoc +regendoc +pyyaml diff --git a/setup.cfg b/setup.cfg index 770dd1fb2cc..1ab4fd059b2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,3 +8,6 @@ upload-dir = doc/en/build/html [bdist_wheel] universal = 1 + +[devpi:upload] +formats = sdist.tgz,bdist_wheel diff --git a/setup.py b/setup.py index 9cf1ed72a0e..69b6f5e5c3c 100644 --- a/setup.py +++ b/setup.py @@ -48,7 +48,7 @@ def has_environment_marker_support(): def main(): - install_requires = ['py>=1.4.29'] + install_requires = ['py>=1.4.29', 'pluggy>=0.3.0,<0.4.0'] extras_require = {} if has_environment_marker_support(): extras_require[':python_version=="2.6" or python_version=="3.0" or python_version=="3.1"'] = ['argparse'] @@ -63,7 +63,7 @@ def main(): name='pytest', description='pytest: simple powerful testing with Python', long_description=long_description, - version=get_version(), + use_scm_version={'write_to': '_pytest/__init__.py'}, url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], @@ -75,6 +75,7 @@ def main(): # the following should be enabled for release install_requires=install_requires, extras_require=extras_require, + setup_requires=['setuptools_scm'], packages=['_pytest', '_pytest.assertion'], py_modules=['pytest'], zip_safe=False, diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index fffb67e71d9..b9a3fa38109 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1,5 +1,7 @@ import sys import py, pytest +from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR + class TestGeneralUsage: def test_config_error(self, testdir): @@ -83,7 +85,7 @@ def pytest_addoption(parser): def test_option(pytestconfig): assert pytestconfig.option.xyz == "123" """) - result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123") + result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True) assert result.ret == 0 result.stdout.fnmatch_lines([ '*1 passed*', @@ -147,7 +149,7 @@ def pytest_collect_directory(): pytest.skip("early") """) result = testdir.runpytest() - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED result.stdout.fnmatch_lines([ "*1 skip*" ]) @@ -177,7 +179,7 @@ def test_issue93_initialnode_importing_capturing(self, testdir): sys.stderr.write("stder42\\n") """) result = testdir.runpytest() - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED assert "should not be seen" not in result.stdout.str() assert "stderr42" not in result.stderr.str() @@ -204,7 +206,7 @@ def test_chdir(self, testdir): os.chdir(os.path.dirname(os.getcwd())) print (py.log) """)) - result = testdir.runpython(p, prepend=False) + result = testdir.runpython(p) assert not result.ret def test_issue109_sibling_conftests_not_loaded(self, testdir): @@ -212,13 +214,13 @@ def test_issue109_sibling_conftests_not_loaded(self, testdir): sub2 = testdir.tmpdir.mkdir("sub2") sub1.join("conftest.py").write("assert 0") result = testdir.runpytest(sub2) - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED sub2.ensure("__init__.py") p = sub2.ensure("test_hello.py") result = testdir.runpytest(p) - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED result = testdir.runpytest(sub1) - assert result.ret != 0 + assert result.ret == EXIT_USAGEERROR def test_directory_skipped(self, testdir): testdir.makeconftest(""" @@ -228,7 +230,7 @@ def pytest_ignore_collect(): """) testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest() - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED result.stdout.fnmatch_lines([ "*1 skipped*" ]) @@ -354,7 +356,8 @@ def test_unknown_option(self, testdir): *unrecognized* """) - def test_getsourcelines_error_issue553(self, testdir): + def test_getsourcelines_error_issue553(self, testdir, monkeypatch): + monkeypatch.setattr("inspect.getsourcelines", None) p = testdir.makepyfile(""" def raise_error(obj): raise IOError('source code not available') @@ -478,7 +481,7 @@ def test_invoke_with_string(self, capsys): def test_invoke_with_path(self, tmpdir, capsys): retcode = pytest.main(tmpdir) - assert not retcode + assert retcode == EXIT_NOTESTSCOLLECTED out, err = capsys.readouterr() def test_invoke_plugin_api(self, testdir, capsys): @@ -579,6 +582,17 @@ def test_doctest_id(self, testdir): "*1 failed*", ]) + def test_core_backward_compatibility(self): + """Test backward compatibility for get_plugin_manager function. See #787.""" + import _pytest.config + assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager + + + def test_has_plugin(self, request): + """Test hasplugin function of the plugin manager (#932).""" + assert request.config.pluginmanager.hasplugin('python') + + class TestDurations: source = """ import time diff --git a/testing/conftest.py b/testing/conftest.py deleted file mode 100644 index 3fe1c80f326..00000000000 --- a/testing/conftest.py +++ /dev/null @@ -1,126 +0,0 @@ -import pytest -import sys - -pytest_plugins = "pytester", - -import os, py, gc - -class LsofFdLeakChecker(object): - def get_open_files(self): - gc.collect() - out = self._exec_lsof() - open_files = self._parse_lsof_output(out) - return open_files - - def _exec_lsof(self): - pid = os.getpid() - #return py.process.cmdexec("lsof -Ffn0 -p %d" % pid) - try: - return py.process.cmdexec("lsof -p %d" % pid) - except UnicodeDecodeError: - # cmdexec may raise UnicodeDecodeError on Windows systems - # with locale other than english: - # https://bitbucket.org/pytest-dev/py/issues/66 - return '' - - def _parse_lsof_output(self, out): - def isopen(line): - return line.startswith('f') and ( - "deleted" not in line and 'mem' not in line and "txt" not in line and 'cwd' not in line) - - open_files = [] - - for line in out.split("\n"): - if isopen(line): - fields = line.split('\0') - fd = fields[0][1:] - filename = fields[1][1:] - if filename.startswith('/'): - open_files.append((fd, filename)) - - return open_files - - -def pytest_addoption(parser): - parser.addoption('--lsof', - action="store_true", dest="lsof", default=False, - help=("run FD checks if lsof is available")) - -def pytest_runtest_setup(item): - config = item.config - config._basedir = py.path.local() - if config.getvalue("lsof"): - try: - config._fd_leak_checker = LsofFdLeakChecker() - config._openfiles = config._fd_leak_checker.get_open_files() - except py.process.cmdexec.Error: - pass - -#def pytest_report_header(): -# return "pid: %s" % os.getpid() - -def check_open_files(config): - lines2 = config._fd_leak_checker.get_open_files() - new_fds = set([t[0] for t in lines2]) - set([t[0] for t in config._openfiles]) - open_files = [t for t in lines2 if t[0] in new_fds] - if open_files: - error = [] - error.append("***** %s FD leakage detected" % len(open_files)) - error.extend([str(f) for f in open_files]) - error.append("*** Before:") - error.extend([str(f) for f in config._openfiles]) - error.append("*** After:") - error.extend([str(f) for f in lines2]) - error.append(error[0]) - raise AssertionError("\n".join(error)) - -def pytest_runtest_teardown(item, __multicall__): - item.config._basedir.chdir() - if hasattr(item.config, '_openfiles'): - x = __multicall__.execute() - check_open_files(item.config) - return x - -# XXX copied from execnet's conftest.py - needs to be merged -winpymap = { - 'python2.7': r'C:\Python27\python.exe', - 'python2.6': r'C:\Python26\python.exe', - 'python3.1': r'C:\Python31\python.exe', - 'python3.2': r'C:\Python32\python.exe', - 'python3.3': r'C:\Python33\python.exe', - 'python3.4': r'C:\Python34\python.exe', - 'python3.5': r'C:\Python35\python.exe', -} - -def getexecutable(name, cache={}): - try: - return cache[name] - except KeyError: - executable = py.path.local.sysfind(name) - if executable: - if name == "jython": - import subprocess - popen = subprocess.Popen([str(executable), "--version"], - universal_newlines=True, stderr=subprocess.PIPE) - out, err = popen.communicate() - if not err or "2.5" not in err: - executable = None - if "2.5.2" in err: - executable = None # http://bugs.jython.org/issue1790 - cache[name] = executable - return executable - -@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4", - 'pypy', 'pypy3']) -def anypython(request): - name = request.param - executable = getexecutable(name) - if executable is None: - if sys.platform == "win32": - executable = winpymap.get(name, None) - if executable: - executable = py.path.local(executable) - if executable.check(): - return executable - pytest.skip("no suitable %s found" % (name,)) - return executable diff --git a/testing/python/collect.py b/testing/python/collect.py index c7292829f62..6a302f29153 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -1,4 +1,8 @@ +import sys +from textwrap import dedent import pytest, py +from _pytest.main import EXIT_NOTESTSCOLLECTED + class TestModule: def test_failing_import(self, testdir): @@ -23,6 +27,24 @@ def test_import_duplicate(self, testdir): "*HINT*", ]) + def test_import_appends_for_import(self, testdir, monkeypatch): + syspath = list(sys.path) + monkeypatch.setattr(sys, "path", syspath) + root1 = testdir.mkdir("root1") + root2 = testdir.mkdir("root2") + root1.ensure("x456.py") + root2.ensure("x456.py") + p = root2.join("test_x456.py") + p.write(dedent("""\ + import x456 + def test(): + assert x456.__file__.startswith(%r) + """ % str(root1))) + syspath.insert(0, str(root1)) + with root2.as_cwd(): + reprec = testdir.inline_run() + reprec.assertoutcome(passed=1) + def test_syntax_error_in_module(self, testdir): modcol = testdir.getmodulecol("this is a syntax error") pytest.raises(modcol.CollectError, modcol.collect) @@ -38,14 +60,10 @@ def test_class_with_init_warning(self, testdir): class TestClass1: def __init__(self): pass - class TestClass2(object): - def __init__(self): - pass """) result = testdir.runpytest("-rw") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines_random(""" WC1*test_class_with_init_warning.py*__init__* - *2 warnings* """) def test_class_subclassobject(self, testdir): @@ -396,9 +414,19 @@ def value(): ['overridden']) def test_overridden_via_param(value): assert value == 'overridden' + + @pytest.mark.parametrize('somevalue', ['overridden']) + def test_not_overridden(value, somevalue): + assert value == 'value' + assert somevalue == 'overridden' + + @pytest.mark.parametrize('other,value', [('foo', 'overridden')]) + def test_overridden_via_multiparam(other, value): + assert other == 'foo' + assert value == 'overridden' """) rec = testdir.inline_run() - rec.assertoutcome(passed=1) + rec.assertoutcome(passed=3) def test_parametrize_overrides_parametrized_fixture(self, testdir): @@ -456,6 +484,38 @@ def pytest_pyfunc_call(self, pyfuncitem): config.pluginmanager.register(MyPlugin2()) config.hook.pytest_pyfunc_call(pyfuncitem=item) + def test_multiple_parametrize(self, testdir): + modcol = testdir.getmodulecol(""" + import pytest + @pytest.mark.parametrize('x', [0, 1]) + @pytest.mark.parametrize('y', [2, 3]) + def test1(x, y): + pass + """) + colitems = modcol.collect() + assert colitems[0].name == 'test1[2-0]' + assert colitems[1].name == 'test1[2-1]' + assert colitems[2].name == 'test1[3-0]' + assert colitems[3].name == 'test1[3-1]' + + def test_issue751_multiple_parametrize_with_ids(self, testdir): + modcol = testdir.getmodulecol(""" + import pytest + @pytest.mark.parametrize('x', [0], ids=['c']) + @pytest.mark.parametrize('y', [0, 1], ids=['a', 'b']) + class Test(object): + def test1(self, x, y): + pass + def test2(self, x, y): + pass + """) + colitems = modcol.collect()[0].collect()[0].collect() + assert colitems[0].name == 'test1[a-c]' + assert colitems[1].name == 'test1[b-c]' + assert colitems[2].name == 'test2[a-c]' + assert colitems[3].name == 'test2[b-c]' + + class TestSorting: def test_check_equality(self, testdir): modcol = testdir.getmodulecol(""" @@ -543,7 +603,7 @@ def test_customized_pymakeitem(self, testdir): b = testdir.mkdir("a").mkdir("b") b.join("conftest.py").write(py.code.Source(""" import pytest - @pytest.mark.hookwrapper + @pytest.hookimpl(hookwrapper=True) def pytest_pycollect_makeitem(): outcome = yield if outcome.excinfo is None: @@ -611,9 +671,7 @@ def pytest_runtest_teardown(item): sub1.join("test_in_sub1.py").write("def test_1(): pass") sub2.join("test_in_sub2.py").write("def test_2(): pass") result = testdir.runpytest("-v", "-s") - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) + result.assert_outcomes(passed=2) def test_modulecol_roundtrip(testdir): modcol = testdir.getmodulecol("pass", withinit=True) @@ -850,7 +908,7 @@ class Test: """) result = testdir.runpytest() assert "TypeError" not in result.stdout.str() - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED def test_collect_functools_partial(testdir): diff --git a/testing/python/fixture.py b/testing/python/fixture.py index 90986173a66..203176443da 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -100,9 +100,7 @@ def pytest_funcarg__arg2(request): sub1.join("test_in_sub1.py").write("def test_1(arg1): pass") sub2.join("test_in_sub2.py").write("def test_2(arg2): pass") result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) + result.assert_outcomes(passed=2) def test_extend_fixture_module_class(self, testdir): testfile = testdir.makepyfile(""" @@ -557,7 +555,8 @@ def sarg(tmpdir): pass def test_function(request, farg): assert set(get_public_names(request.fixturenames)) == \ - set(["tmpdir", "sarg", "arg1", "request", "farg"]) + set(["tmpdir", "sarg", "arg1", "request", "farg", + "tmpdir_factory"]) """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @@ -1492,7 +1491,7 @@ def test_2(self): reprec = testdir.inline_run("-v","-s") reprec.assertoutcome(passed=8) config = reprec.getcalls("pytest_unconfigure")[0].config - l = config._conftest.getconftestmodules(p)[0].l + l = config.pluginmanager._getconftestmodules(p)[0].l assert l == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2 def test_scope_ordering(self, testdir): @@ -1599,6 +1598,42 @@ def test_result(): reprec = testdir.inline_run() reprec.assertoutcome(passed=4) + def test_multiple_parametrization_issue_736(self, testdir): + testdir.makepyfile(""" + import pytest + + @pytest.fixture(params=[1,2,3]) + def foo(request): + return request.param + + @pytest.mark.parametrize('foobar', [4,5,6]) + def test_issue(foo, foobar): + assert foo in [1,2,3] + assert foobar in [4,5,6] + """) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=9) + + @pytest.mark.parametrize('param_args', ["'fixt, val'", "'fixt,val'", "['fixt', 'val']", "('fixt', 'val')"]) + def test_override_parametrized_fixture_issue_979(self, testdir, param_args): + """Make sure a parametrized argument can override a parametrized fixture. + + This was a regression introduced in the fix for #736. + """ + testdir.makepyfile(""" + import pytest + + @pytest.fixture(params=[1, 2]) + def fixt(request): + return request.param + + @pytest.mark.parametrize(%s, [(3, 'x'), (4, 'x')]) + def test_foo(fixt, val): + pass + """ % param_args) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=2) + def test_scope_session(self, testdir): testdir.makepyfile(""" import pytest diff --git a/testing/python/integration.py b/testing/python/integration.py index 96cc9dc5b12..1b9be5968da 100644 --- a/testing/python/integration.py +++ b/testing/python/integration.py @@ -238,7 +238,7 @@ def test_pytestconfig_is_session_scoped(): class TestNoselikeTestAttribute: - def test_module(self, testdir): + def test_module_with_global_test(self, testdir): testdir.makepyfile(""" __test__ = False def test_hello(): @@ -248,7 +248,7 @@ def test_hello(): assert not reprec.getfailedcollections() calls = reprec.getreports("pytest_runtest_logreport") assert not calls - + def test_class_and_method(self, testdir): testdir.makepyfile(""" __test__ = True diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index cbff9adfec7..ce84f7a5756 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -1,3 +1,4 @@ +import re import pytest, py from _pytest import python as funcargs @@ -138,6 +139,8 @@ def test_idmaker_native_strings(self): ("three", "three hundred"), (True, False), (None, None), + (re.compile('foo'), re.compile('bar')), + (str, int), (list("six"), [66, 66]), (set([7]), set("seven")), (tuple("eight"), (8, -8, 8)) @@ -147,9 +150,18 @@ def test_idmaker_native_strings(self): "three-three hundred", "True-False", "None-None", - "a5-b5", - "a6-b6", - "a7-b7"] + "foo-bar", + "str-int", + "a7-b7", + "a8-b8", + "a9-b9"] + + def test_idmaker_enum(self): + from _pytest.python import idmaker + enum = pytest.importorskip("enum") + e = enum.Enum("Foo", "one, two") + result = idmaker(("a", "b"), [(e.one, e.two)]) + assert result == ["Foo.one-Foo.two"] @pytest.mark.issue351 def test_idmaker_idfn(self): @@ -208,17 +220,136 @@ def func(x, y): pass assert metafunc._calls[0].id == "0-2" assert metafunc._calls[1].id == "0-3" + @pytest.mark.issue714 def test_parametrize_indirect(self): def func(x, y): pass metafunc = self.Metafunc(func) metafunc.parametrize('x', [1], indirect=True) metafunc.parametrize('y', [2,3], indirect=True) - metafunc.parametrize('unnamed', [1], indirect=True) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {} assert metafunc._calls[1].funcargs == {} - assert metafunc._calls[0].params == dict(x=1,y=2, unnamed=1) - assert metafunc._calls[1].params == dict(x=1,y=3, unnamed=1) + assert metafunc._calls[0].params == dict(x=1,y=2) + assert metafunc._calls[1].params == dict(x=1,y=3) + + @pytest.mark.issue714 + def test_parametrize_indirect_list(self): + def func(x, y): pass + metafunc = self.Metafunc(func) + metafunc.parametrize('x, y', [('a', 'b')], indirect=['x']) + assert metafunc._calls[0].funcargs == dict(y='b') + assert metafunc._calls[0].params == dict(x='a') + + @pytest.mark.issue714 + def test_parametrize_indirect_list_all(self): + def func(x, y): pass + metafunc = self.Metafunc(func) + metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'y']) + assert metafunc._calls[0].funcargs == {} + assert metafunc._calls[0].params == dict(x='a', y='b') + + @pytest.mark.issue714 + def test_parametrize_indirect_list_empty(self): + def func(x, y): pass + metafunc = self.Metafunc(func) + metafunc.parametrize('x, y', [('a', 'b')], indirect=[]) + assert metafunc._calls[0].funcargs == dict(x='a', y='b') + assert metafunc._calls[0].params == {} + + @pytest.mark.issue714 + def test_parametrize_indirect_list_functional(self, testdir): + """ + Test parametrization with 'indirect' parameter applied on + particular arguments. As y is is direct, its value should + be used directly rather than being passed to the fixture + y. + + :param testdir: the instance of Testdir class, a temporary + test directory. + """ + testdir.makepyfile(""" + import pytest + @pytest.fixture(scope='function') + def x(request): + return request.param * 3 + @pytest.fixture(scope='function') + def y(request): + return request.param * 2 + @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x']) + def test_simple(x,y): + assert len(x) == 3 + assert len(y) == 1 + """) + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "*test_simple*a-b*", + "*1 passed*", + ]) + + @pytest.mark.issue714 + def test_parametrize_indirect_list_error(self, testdir): + def func(x, y): pass + metafunc = self.Metafunc(func) + with pytest.raises(ValueError): + metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'z']) + + @pytest.mark.issue714 + def test_parametrize_uses_no_fixture_error_indirect_false(self, testdir): + """The 'uses no fixture' error tells the user at collection time + that the parametrize data they've set up doesn't correspond to the + fixtures in their test function, rather than silently ignoring this + and letting the test potentially pass. + """ + testdir.makepyfile(""" + import pytest + + @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=False) + def test_simple(x): + assert len(x) == 3 + """) + result = testdir.runpytest("--collect-only") + result.stdout.fnmatch_lines([ + "*uses no fixture 'y'*", + ]) + + @pytest.mark.xfail + @pytest.mark.issue714 + def test_parametrize_uses_no_fixture_error_indirect_true(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.fixture(scope='function') + def x(request): + return request.param * 3 + @pytest.fixture(scope='function') + def y(request): + return request.param * 2 + + @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=True) + def test_simple(x): + assert len(x) == 3 + """) + result = testdir.runpytest("--collect-only") + result.stdout.fnmatch_lines([ + "*uses no fixture 'y'*", + ]) + + @pytest.mark.xfail + @pytest.mark.issue714 + def test_parametrize_indirect_uses_no_fixture_error_indirect_list(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.fixture(scope='function') + def x(request): + return request.param * 3 + + @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x']) + def test_simple(x): + assert len(x) == 3 + """) + result = testdir.runpytest("--collect-only") + result.stdout.fnmatch_lines([ + "*uses no fixture 'y'*", + ]) def test_addcalls_and_parametrize_indirect(self): def func(x, y): pass @@ -292,9 +423,7 @@ def test_meth(self, x, y): """) result = testdir.runpytest() assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*6 fail*", - ]) + result.assert_outcomes(failed=6) def test_parametrize_CSV(self, testdir): testdir.makepyfile(""" @@ -375,9 +504,7 @@ def test_method(self, metafunc, pytestconfig): assert metafunc.cls == TestClass """) result = testdir.runpytest(p, "-v") - result.stdout.fnmatch_lines([ - "*2 passed in*", - ]) + result.assert_outcomes(passed=2) def test_addcall_with_two_funcargs_generators(self, testdir): testdir.makeconftest(""" @@ -432,9 +559,7 @@ def test_hello(xyz): pass """) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*1 pass*", - ]) + result.assert_outcomes(passed=1) def test_generate_plugin_and_module(self, testdir): @@ -508,9 +633,7 @@ def setup_method(self, func): self.val = 1 """) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*1 pass*", - ]) + result.assert_outcomes(passed=1) def test_parametrize_functional2(self, testdir): testdir.makepyfile(""" @@ -655,8 +778,8 @@ def pytest_generate_tests(metafunc): def test_function(): pass """) - reprec = testdir.inline_run() - reprec.assertoutcome(passed=1) + reprec = testdir.runpytest() + reprec.assert_outcomes(passed=1) def test_generate_tests_only_done_in_subdir(self, testdir): sub1 = testdir.mkpydir("sub1") @@ -672,9 +795,7 @@ def pytest_generate_tests(metafunc): sub1.join("test_in_sub1.py").write("def test_1(): pass") sub2.join("test_in_sub2.py").write("def test_2(): pass") result = testdir.runpytest("-v", "-s", sub1, sub2, sub1) - result.stdout.fnmatch_lines([ - "*3 passed*" - ]) + result.assert_outcomes(passed=3) def test_generate_same_function_names_issue403(self, testdir): testdir.makepyfile(""" @@ -689,22 +810,24 @@ def test_foo(x): test_x = make_tests() test_y = make_tests() """) - reprec = testdir.inline_run() - reprec.assertoutcome(passed=4) + reprec = testdir.runpytest() + reprec.assert_outcomes(passed=4) @pytest.mark.issue463 - def test_parameterize_misspelling(self, testdir): + @pytest.mark.parametrize('attr', ['parametrise', 'parameterize', + 'parameterise']) + def test_parametrize_misspelling(self, testdir, attr): testdir.makepyfile(""" import pytest - @pytest.mark.parameterize("x", range(2)) + @pytest.mark.{0}("x", range(2)) def test_foo(x): pass - """) + """.format(attr)) reprec = testdir.inline_run('--collectonly') failures = reprec.getfailures() assert len(failures) == 1 - expectederror = "MarkerError: test_foo has 'parameterize', spelling should be 'parametrize'" + expectederror = "MarkerError: test_foo has '{0}', spelling should be 'parametrize'".format(attr) assert expectederror in failures[0].longrepr.reprcrash.message diff --git a/testing/python/raises.py b/testing/python/raises.py index 5ba56bb7123..edeb522267b 100644 --- a/testing/python/raises.py +++ b/testing/python/raises.py @@ -34,7 +34,6 @@ def test_raises_flip_builtin_AssertionError(self): raise BuiltinAssertionError """) - @pytest.mark.skipif('sys.version < "2.5"') def test_raises_as_contextmanager(self, testdir): testdir.makepyfile(""" from __future__ import with_statement diff --git a/testing/test_assertion.py b/testing/test_assertion.py index e8063f0d319..914feddf74a 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -231,6 +231,17 @@ def test_unicode(self): assert expl[1] == py.builtin._totext('- £€', 'utf-8') assert expl[2] == py.builtin._totext('+ £', 'utf-8') + def test_nonascii_text(self): + """ + :issue: 877 + non ascii python2 str caused a UnicodeDecodeError + """ + class A(str): + def __repr__(self): + return '\xff' + expl = callequal(A(), '1') + assert expl + def test_mojibake(self): # issue 429 left = 'e' @@ -411,8 +422,13 @@ def test_long(): """) result = testdir.runpytest() + # without -vv, truncate the message showing a few diff lines only result.stdout.fnmatch_lines([ - "*truncated*use*-vv*", + "*- 1", + "*- 3", + "*- 5", + "*- 7", + "*truncated (191 more lines)*use*-vv*", ]) @@ -461,7 +477,7 @@ def test_hello(): ("--assert=plain", "--nomagic"), ("--assert=plain", "--no-assert", "--nomagic")) for opt in off_options: - result = testdir.runpytest(*opt) + result = testdir.runpytest_subprocess(*opt) assert "3 == 4" not in result.stdout.str() def test_old_assert_mode(testdir): @@ -469,7 +485,7 @@ def test_old_assert_mode(testdir): def test_in_old_mode(): assert "@py_builtins" not in globals() """) - result = testdir.runpytest("--assert=reinterp") + result = testdir.runpytest_subprocess("--assert=reinterp") assert result.ret == 0 def test_triple_quoted_string_issue113(testdir): diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index 5ea2f2bf85b..544250ad5e8 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -12,6 +12,7 @@ from _pytest.assertion import util from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG +from _pytest.main import EXIT_NOTESTSCOLLECTED def setup_module(mod): @@ -429,7 +430,7 @@ def test_zipfile(self, testdir): import sys sys.path.append(%r) import test_gum.test_lizard""" % (z_fn,)) - assert testdir.runpytest().ret == 0 + assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED def test_readonly(self, testdir): sub = testdir.mkdir("testing") @@ -453,7 +454,7 @@ def test_no_bytecode(): assert not os.path.exists(__cached__) assert not os.path.exists(os.path.dirname(__cached__))""") monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1") - assert testdir.runpytest().ret == 0 + assert testdir.runpytest_subprocess().ret == 0 @pytest.mark.skipif('"__pypy__" in sys.modules') def test_pyc_vs_pyo(self, testdir, monkeypatch): @@ -468,12 +469,12 @@ def test_optimized(): tmp = "--basetemp=%s" % p monkeypatch.setenv("PYTHONOPTIMIZE", "2") monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False) - assert testdir.runpybin("py.test", tmp).ret == 0 + assert testdir.runpytest_subprocess(tmp).ret == 0 tagged = "test_pyc_vs_pyo." + PYTEST_TAG assert tagged + ".pyo" in os.listdir("__pycache__") monkeypatch.undo() monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False) - assert testdir.runpybin("py.test", tmp).ret == 1 + assert testdir.runpytest_subprocess(tmp).ret == 1 assert tagged + ".pyc" in os.listdir("__pycache__") def test_package(self, testdir): @@ -497,7 +498,7 @@ def test_package_without__init__py(self, testdir): pkg = testdir.mkdir('a_package_without_init_py') pkg.join('module.py').ensure() testdir.makepyfile("import a_package_without_init_py.module") - assert testdir.runpytest().ret == 0 + assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED class TestAssertionRewriteHookDetails(object): def test_loader_is_package_false_for_module(self, testdir): @@ -615,10 +616,8 @@ def test_load_resource(): testdir.makepyfile(**contents) testdir.maketxtfile(**{'testpkg/resource': "Load me please."}) - result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '* 1 passed*', - ]) + result = testdir.runpytest_subprocess() + result.assert_outcomes(passed=1) def test_read_pyc(self, tmpdir): """ @@ -666,6 +665,21 @@ def test_loader(): "* 1 passed*", ]) + def test_get_data_support(self, testdir): + """Implement optional PEP302 api (#808). + """ + path = testdir.mkpydir("foo") + path.join("test_foo.py").write(py.code.Source(""" + class Test: + def test_foo(self): + import pkgutil + data = pkgutil.get_data('foo.test_foo', 'data.txt') + assert data == b'Hey' + """)) + path.join('data.txt').write('Hey') + result = testdir.runpytest() + result.stdout.fnmatch_lines('*1 passed*') + def test_issue731(testdir): testdir.makepyfile(""" diff --git a/testing/test_capture.py b/testing/test_capture.py index 2c69254ad66..53933352573 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -10,6 +10,7 @@ from _pytest import capture from _pytest.capture import CaptureManager +from _pytest.main import EXIT_NOTESTSCOLLECTED from py.builtin import print_ needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')") @@ -282,7 +283,7 @@ def test_logging(): logging.basicConfig(stream=stream) stream.close() # to free memory/release resources """) - result = testdir.runpytest(p) + result = testdir.runpytest_subprocess(p) result.stderr.str().find("atexit") == -1 def test_logging_and_immediate_setupteardown(self, testdir): @@ -301,7 +302,7 @@ def teardown_function(function): """) for optargs in (('--capture=sys',), ('--capture=fd',)): print (optargs) - result = testdir.runpytest(p, *optargs) + result = testdir.runpytest_subprocess(p, *optargs) s = result.stdout.str() result.stdout.fnmatch_lines([ "*WARN*hello3", # errors show first! @@ -327,7 +328,7 @@ def teardown_module(function): """) for optargs in (('--capture=sys',), ('--capture=fd',)): print (optargs) - result = testdir.runpytest(p, *optargs) + result = testdir.runpytest_subprocess(p, *optargs) s = result.stdout.str() result.stdout.fnmatch_lines([ "*WARN*hello3", # errors come first @@ -348,7 +349,7 @@ def test_something(): logging.warn("hello432") assert 0 """) - result = testdir.runpytest( + result = testdir.runpytest_subprocess( p, "--traceconfig", "-p", "no:capturelog") assert result.ret != 0 @@ -364,8 +365,8 @@ def test_conftestlogging_is_shown(self, testdir): logging.warn("hello435") """) # make sure that logging is still captured in tests - result = testdir.runpytest("-s", "-p", "no:capturelog") - assert result.ret == 0 + result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog") + assert result.ret == EXIT_NOTESTSCOLLECTED result.stderr.fnmatch_lines([ "WARNING*hello435*", ]) @@ -383,7 +384,7 @@ def test_hello(): logging.warn("hello433") assert 0 """) - result = testdir.runpytest(p, "-p", "no:capturelog") + result = testdir.runpytest_subprocess(p, "-p", "no:capturelog") assert result.ret != 0 result.stdout.fnmatch_lines([ "WARNING*hello433*", @@ -461,7 +462,7 @@ def test_hello(capfd): os.write(1, str(42).encode('ascii')) raise KeyboardInterrupt() """) - result = testdir.runpytest(p) + result = testdir.runpytest_subprocess(p) result.stdout.fnmatch_lines([ "*KeyboardInterrupt*" ]) @@ -474,7 +475,7 @@ def test_capture_and_logging(self, testdir): def test_log(capsys): logging.error('x') """) - result = testdir.runpytest(p) + result = testdir.runpytest_subprocess(p) assert 'closed' not in result.stderr.str() @@ -500,7 +501,7 @@ def test_fdfuncarg_skips_on_no_osdup(testdir): def test_hello(capfd): pass """) - result = testdir.runpytest("--capture=no") + result = testdir.runpytest_subprocess("--capture=no") result.stdout.fnmatch_lines([ "*1 skipped*" ]) @@ -563,8 +564,25 @@ def test_foo(): test_foo() """) result = testdir.runpytest('--assert=plain') - result.stdout.fnmatch_lines([ - '*2 passed*', + result.assert_outcomes(passed=2) + + +def test_error_during_readouterr(testdir): + """Make sure we suspend capturing if errors occurr during readouterr""" + testdir.makepyfile(pytest_xyz=""" + from _pytest.capture import FDCapture + def bad_snap(self): + raise Exception('boom') + assert FDCapture.snap + FDCapture.snap = bad_snap + """) + result = testdir.runpytest_subprocess( + "-p", "pytest_xyz", "--version", syspathinsert=True + ) + result.stderr.fnmatch_lines([ + "*in bad_snap", + " raise Exception('boom')", + "Exception: boom", ]) @@ -652,7 +670,7 @@ def lsof_check(): try: out = py.process.cmdexec("lsof -p %d" % pid) except (py.process.cmdexec.Error, UnicodeDecodeError): - # about UnicodeDecodeError, see note on conftest.py + # about UnicodeDecodeError, see note on pytester pytest.skip("could not run 'lsof'") yield out2 = py.process.cmdexec("lsof -p %d" % pid) @@ -886,7 +904,7 @@ def test_x(): os.write(1, "hello\\n".encode("ascii")) assert 0 """) - result = testdir.runpytest() + result = testdir.runpytest_subprocess() result.stdout.fnmatch_lines(""" *test_x* *assert 0* @@ -937,7 +955,7 @@ def test_stdin(): cap = StdCaptureFD(out=False, err=False, in_=True) cap.stop_capturing() """) - result = testdir.runpytest("--capture=fd") + result = testdir.runpytest_subprocess("--capture=fd") assert result.ret == 0 assert result.parseoutcomes()['passed'] == 3 @@ -972,7 +990,7 @@ def test_capture_again(): os.write(1, b"hello\\n") assert 0 """) - result = testdir.runpytest() + result = testdir.runpytest_subprocess() result.stdout.fnmatch_lines(""" *test_capture_again* *assert 0* diff --git a/testing/test_collection.py b/testing/test_collection.py index 0c8cabc67f1..749c5b7ce45 100644 --- a/testing/test_collection.py +++ b/testing/test_collection.py @@ -1,6 +1,6 @@ import pytest, py -from _pytest.main import Session +from _pytest.main import Session, EXIT_NOTESTSCOLLECTED class TestCollector: def test_collect_versus_item(self): @@ -116,6 +116,35 @@ def test_custom_norecursedirs(self, testdir): rec = testdir.inline_run("xyz123/test_2.py") rec.assertoutcome(failed=1) + def test_testpaths_ini(self, testdir, monkeypatch): + testdir.makeini(""" + [pytest] + testpaths = gui uts + """) + tmpdir = testdir.tmpdir + tmpdir.ensure("env", "test_1.py").write("def test_env(): pass") + tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass") + tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass") + + # executing from rootdir only tests from `testpaths` directories + # are collected + items, reprec = testdir.inline_genitems('-v') + assert [x.name for x in items] == ['test_gui', 'test_uts'] + + # check that explicitly passing directories in the command-line + # collects the tests + for dirname in ('env', 'gui', 'uts'): + items, reprec = testdir.inline_genitems(tmpdir.join(dirname)) + assert [x.name for x in items] == ['test_%s' % dirname] + + # changing cwd to each subdirectory and running pytest without + # arguments collects the tests in that directory normally + for dirname in ('env', 'gui', 'uts'): + monkeypatch.chdir(testdir.tmpdir.join(dirname)) + items, reprec = testdir.inline_genitems() + assert [x.name for x in items] == ['test_%s' % dirname] + + class TestCollectPluginHookRelay: def test_pytest_collect_file(self, testdir): wascalled = [] @@ -218,10 +247,10 @@ def pytest_ignore_collect(path, config): p = testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest(p) assert result.ret == 0 - assert "1 passed" in result.stdout.str() + result.stdout.fnmatch_lines("*1 passed*") result = testdir.runpytest() - assert result.ret == 0 - assert "1 passed" not in result.stdout.str() + assert result.ret == EXIT_NOTESTSCOLLECTED + result.stdout.fnmatch_lines("*collected 0 items*") def test_collectignore_exclude_on_option(self, testdir): testdir.makeconftest(""" @@ -235,7 +264,7 @@ def pytest_configure(config): testdir.mkdir("hello") testdir.makepyfile(test_world="def test_hello(): pass") result = testdir.runpytest() - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED assert "passed" not in result.stdout.str() result = testdir.runpytest("--XX") assert result.ret == 0 @@ -296,7 +325,6 @@ def test_parsearg(self, testdir): subdir.ensure("__init__.py") target = subdir.join(p.basename) p.move(target) - testdir.chdir() subdir.chdir() config = testdir.parseconfig(p.basename) rcol = Session(config=config) @@ -313,7 +341,7 @@ def test_parsearg(self, testdir): def test_collect_topdir(self, testdir): p = testdir.makepyfile("def test_func(): pass") id = "::".join([p.basename, "test_func"]) - # XXX migrate to inline_genitems? (see below) + # XXX migrate to collectonly? (see below) config = testdir.parseconfig(id) topdir = testdir.tmpdir rcol = Session(config) @@ -470,7 +498,6 @@ def test_global_file(self, testdir, tmpdir): assert col.config is config def test_pkgfile(self, testdir): - testdir.chdir() tmpdir = testdir.tmpdir subdir = tmpdir.join("subdir") x = subdir.ensure("x.py") diff --git a/testing/test_config.py b/testing/test_config.py index f1e3c5eb9e1..d497200ee90 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -1,6 +1,7 @@ import py, pytest from _pytest.config import getcfg, get_common_ancestor, determine_setup +from _pytest.main import EXIT_NOTESTSCOLLECTED class TestParseIni: def test_getcfg_and_config(self, testdir, tmpdir): @@ -75,7 +76,7 @@ def test_confcutdir(self, testdir): [pytest] addopts = --qwe """) - result = testdir.runpytest("--confcutdir=.") + result = testdir.inline_run("--confcutdir=.") assert result.ret == 0 class TestConfigCmdlineParsing: @@ -313,7 +314,7 @@ def load(self): monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) config = testdir.parseconfig("-p", "no:mytestplugin") plugin = config.pluginmanager.getplugin("mytestplugin") - assert plugin == -1 + assert plugin is None def test_cmdline_processargs_simple(testdir): testdir.makeconftest(""" @@ -326,11 +327,49 @@ def pytest_cmdline_preparse(args): "*-h*", ]) +def test_invalid_options_show_extra_information(testdir): + """display extra information when pytest exits due to unrecognized + options in the command-line""" + testdir.makeini(""" + [pytest] + addopts = --invalid-option + """) + result = testdir.runpytest() + result.stderr.fnmatch_lines([ + "*error: unrecognized arguments: --invalid-option*", + "* inifile: %s*" % testdir.tmpdir.join('tox.ini'), + "* rootdir: %s*" % testdir.tmpdir, + ]) + + +@pytest.mark.parametrize('args', [ + ['dir1', 'dir2', '-v'], + ['dir1', '-v', 'dir2'], + ['dir2', '-v', 'dir1'], + ['-v', 'dir2', 'dir1'], +]) +def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args): + """ + Consider all arguments in the command-line for rootdir and inifile + discovery, even if they happen to occur after an option. #949 + """ + # replace "dir1" and "dir2" from "args" into their real directory + root = testdir.tmpdir.mkdir('myroot') + d1 = root.mkdir('dir1') + d2 = root.mkdir('dir2') + for i, arg in enumerate(args): + if arg == 'dir1': + args[i] = d1 + elif arg == 'dir2': + args[i] = d2 + result = testdir.runpytest(*args) + result.stdout.fnmatch_lines(['*rootdir: *myroot, inifile: ']) + @pytest.mark.skipif("sys.platform == 'win32'") def test_toolongargs_issue224(testdir): result = testdir.runpytest("-m", "hello" * 500) - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED def test_notify_exception(testdir, capfd): config = testdir.parseconfig() @@ -348,17 +387,18 @@ def pytest_internalerror(self, excrepr): def test_load_initial_conftest_last_ordering(testdir): - from _pytest.config import get_plugin_manager - pm = get_plugin_manager() + from _pytest.config import get_config + pm = get_config().pluginmanager class My: def pytest_load_initial_conftests(self): pass m = My() pm.register(m) - l = pm.listattr("pytest_load_initial_conftests") - assert l[-1].__module__ == "_pytest.capture" - assert l[-2] == m.pytest_load_initial_conftests - assert l[-3].__module__ == "_pytest.config" + hc = pm.hook.pytest_load_initial_conftests + l = hc._nonwrappers + hc._wrappers + assert l[-1].function.__module__ == "_pytest.capture" + assert l[-2].function == m.pytest_load_initial_conftests + assert l[-3].function.__module__ == "_pytest.config" class TestWarning: def test_warn_config(self, testdir): @@ -367,9 +407,8 @@ def test_warn_config(self, testdir): def pytest_configure(config): config.warn("C1", "hello") def pytest_logwarning(code, message): - assert code == "C1" - assert message == "hello" - l.append(1) + if message == "hello" and code == "C1": + l.append(1) """) testdir.makepyfile(""" def test_proper(pytestconfig): @@ -390,15 +429,13 @@ def test_hello(fix): pass """) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" - *1 warning* - """) + assert result.parseoutcomes()["pytest-warnings"] > 0 assert "hello" not in result.stdout.str() + result = testdir.runpytest("-rw") result.stdout.fnmatch_lines(""" - ===*warning summary*=== + ===*pytest-warning summary*=== *WT1*test_warn_on_test_item*:5*hello* - *1 warning* """) class TestRootdir: diff --git a/testing/test_conftest.py b/testing/test_conftest.py index 82ed00b3ba0..6700502c482 100644 --- a/testing/test_conftest.py +++ b/testing/test_conftest.py @@ -1,13 +1,13 @@ from textwrap import dedent import py, pytest -from _pytest.config import Conftest - +from _pytest.config import PytestPluginManager +from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR @pytest.fixture(scope="module", params=["global", "inpackage"]) -def basedir(request): +def basedir(request, tmpdir_factory): from _pytest.tmpdir import tmpdir - tmpdir = tmpdir(request) + tmpdir = tmpdir(request, tmpdir_factory) tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3") tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5") if request.param == "inpackage": @@ -16,7 +16,7 @@ def basedir(request): return tmpdir def ConftestWithSetinitial(path): - conftest = Conftest() + conftest = PytestPluginManager() conftest_setinitial(conftest, [path]) return conftest @@ -25,51 +25,42 @@ class Namespace: def __init__(self): self.file_or_dir = args self.confcutdir = str(confcutdir) - conftest.setinitial(Namespace()) + self.noconftest = False + conftest._set_initial_conftests(Namespace()) class TestConftestValueAccessGlobal: def test_basic_init(self, basedir): - conftest = Conftest() + conftest = PytestPluginManager() p = basedir.join("adir") - assert conftest.rget_with_confmod("a", p)[1] == 1 - - def test_onimport(self, basedir): - l = [] - conftest = Conftest(onimport=l.append) - adir = basedir.join("adir") - conftest_setinitial(conftest, [adir], confcutdir=basedir) - assert len(l) == 1 - assert conftest.rget_with_confmod("a", adir)[1] == 1 - assert conftest.rget_with_confmod("b", adir.join("b"))[1] == 2 - assert len(l) == 2 + assert conftest._rget_with_confmod("a", p)[1] == 1 def test_immediate_initialiation_and_incremental_are_the_same(self, basedir): - conftest = Conftest() + conftest = PytestPluginManager() len(conftest._path2confmods) - conftest.getconftestmodules(basedir) + conftest._getconftestmodules(basedir) snap1 = len(conftest._path2confmods) #assert len(conftest._path2confmods) == snap1 + 1 - conftest.getconftestmodules(basedir.join('adir')) + conftest._getconftestmodules(basedir.join('adir')) assert len(conftest._path2confmods) == snap1 + 1 - conftest.getconftestmodules(basedir.join('b')) + conftest._getconftestmodules(basedir.join('b')) assert len(conftest._path2confmods) == snap1 + 2 def test_value_access_not_existing(self, basedir): conftest = ConftestWithSetinitial(basedir) with pytest.raises(KeyError): - conftest.rget_with_confmod('a', basedir) + conftest._rget_with_confmod('a', basedir) def test_value_access_by_path(self, basedir): conftest = ConftestWithSetinitial(basedir) adir = basedir.join("adir") - assert conftest.rget_with_confmod("a", adir)[1] == 1 - assert conftest.rget_with_confmod("a", adir.join("b"))[1] == 1.5 + assert conftest._rget_with_confmod("a", adir)[1] == 1 + assert conftest._rget_with_confmod("a", adir.join("b"))[1] == 1.5 def test_value_access_with_confmod(self, basedir): startdir = basedir.join("adir", "b") startdir.ensure("xx", dir=True) conftest = ConftestWithSetinitial(startdir) - mod, value = conftest.rget_with_confmod("a", startdir) + mod, value = conftest._rget_with_confmod("a", startdir) assert value == 1.5 path = py.path.local(mod.__file__) assert path.dirpath() == basedir.join("adir", "b") @@ -85,9 +76,9 @@ def test_conftest_in_nonpkg_with_init(tmpdir): def test_doubledash_considered(testdir): conf = testdir.mkdir("--option") conf.join("conftest.py").ensure() - conftest = Conftest() + conftest = PytestPluginManager() conftest_setinitial(conftest, [conf.basename, conf.basename]) - l = conftest.getconftestmodules(conf) + l = conftest._getconftestmodules(conf) assert len(l) == 1 def test_issue151_load_all_conftests(testdir): @@ -96,7 +87,7 @@ def test_issue151_load_all_conftests(testdir): p = testdir.mkdir(name) p.ensure("conftest.py") - conftest = Conftest() + conftest = PytestPluginManager() conftest_setinitial(conftest, names) d = list(conftest._conftestpath2mod.values()) assert len(d) == len(names) @@ -105,15 +96,15 @@ def test_conftest_global_import(testdir): testdir.makeconftest("x=3") p = testdir.makepyfile(""" import py, pytest - from _pytest.config import Conftest - conf = Conftest() - mod = conf.importconftest(py.path.local("conftest.py")) + from _pytest.config import PytestPluginManager + conf = PytestPluginManager() + mod = conf._importconftest(py.path.local("conftest.py")) assert mod.x == 3 import conftest assert conftest is mod, (conftest, mod) subconf = py.path.local().ensure("sub", "conftest.py") subconf.write("y=4") - mod2 = conf.importconftest(subconf) + mod2 = conf._importconftest(subconf) assert mod != mod2 assert mod2.y == 4 import conftest @@ -125,27 +116,27 @@ def test_conftest_global_import(testdir): def test_conftestcutdir(testdir): conf = testdir.makeconftest("") p = testdir.mkdir("x") - conftest = Conftest() + conftest = PytestPluginManager() conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p) - l = conftest.getconftestmodules(p) + l = conftest._getconftestmodules(p) assert len(l) == 0 - l = conftest.getconftestmodules(conf.dirpath()) + l = conftest._getconftestmodules(conf.dirpath()) assert len(l) == 0 assert conf not in conftest._conftestpath2mod # but we can still import a conftest directly - conftest.importconftest(conf) - l = conftest.getconftestmodules(conf.dirpath()) + conftest._importconftest(conf) + l = conftest._getconftestmodules(conf.dirpath()) assert l[0].__file__.startswith(str(conf)) # and all sub paths get updated properly - l = conftest.getconftestmodules(p) + l = conftest._getconftestmodules(p) assert len(l) == 1 assert l[0].__file__.startswith(str(conf)) def test_conftestcutdir_inplace_considered(testdir): conf = testdir.makeconftest("") - conftest = Conftest() + conftest = PytestPluginManager() conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath()) - l = conftest.getconftestmodules(conf.dirpath()) + l = conftest._getconftestmodules(conf.dirpath()) assert len(l) == 1 assert l[0].__file__.startswith(str(conf)) @@ -153,7 +144,7 @@ def test_conftestcutdir_inplace_considered(testdir): def test_setinitial_conftest_subdirs(testdir, name): sub = testdir.mkdir(name) subconftest = sub.ensure("conftest.py") - conftest = Conftest() + conftest = PytestPluginManager() conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir) if name not in ('whatever', '.dotdir'): assert subconftest in conftest._conftestpath2mod @@ -173,6 +164,14 @@ def pytest_addoption(parser): result.stdout.fnmatch_lines(["*--xyz*"]) assert 'warning: could not load initial' not in result.stdout.str() +def test_no_conftest(testdir): + testdir.makeconftest("assert 0") + result = testdir.runpytest("--noconftest") + assert result.ret == EXIT_NOTESTSCOLLECTED + + result = testdir.runpytest() + assert result.ret == EXIT_USAGEERROR + def test_conftest_existing_resultlog(testdir): x = testdir.mkdir("tests") x.join("conftest.py").write(py.code.Source(""" @@ -200,9 +199,9 @@ def test_conftest_import_order(testdir, monkeypatch): ct2.write("") def impct(p): return p - conftest = Conftest() - monkeypatch.setattr(conftest, 'importconftest', impct) - assert conftest.getconftestmodules(sub) == [ct1, ct2] + conftest = PytestPluginManager() + monkeypatch.setattr(conftest, '_importconftest', impct) + assert conftest._getconftestmodules(sub) == [ct1, ct2] def test_fixture_dependency(testdir, monkeypatch): @@ -348,3 +347,44 @@ def test_parsefactories_relative_node_ids( with dirs[chdir].as_cwd(): reprec = testdir.inline_run(testarg, "-q", "--traceconfig") reprec.assertoutcome(passed=expect_ntests_passed) + + +@pytest.mark.parametrize('confcutdir,passed,error', [ + ('.', 2, 0), + ('src', 1, 1), + (None, 1, 1), +]) +def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error): + """Test that conftest files are detected only up to a ini file, unless + an explicit --confcutdir option is given. + """ + root = testdir.tmpdir + src = root.join('src').ensure(dir=1) + src.join('pytest.ini').write('[pytest]') + src.join('conftest.py').write(py.code.Source(""" + import pytest + @pytest.fixture + def fix1(): pass + """)) + src.join('test_foo.py').write(py.code.Source(""" + def test_1(fix1): + pass + def test_2(out_of_reach): + pass + """)) + root.join('conftest.py').write(py.code.Source(""" + import pytest + @pytest.fixture + def out_of_reach(): pass + """)) + + args = [str(src)] + if confcutdir: + args = ['--confcutdir=%s' % root.join(confcutdir)] + result = testdir.runpytest(*args) + match = '' + if passed: + match += '*%d passed*' % passed + if error: + match += '*%d error*' % error + result.stdout.fnmatch_lines(match) diff --git a/testing/test_core.py b/testing/test_core.py deleted file mode 100644 index 631e88ae913..00000000000 --- a/testing/test_core.py +++ /dev/null @@ -1,881 +0,0 @@ -import pytest, py, os -from _pytest.core import * # noqa -from _pytest.config import get_plugin_manager - - -class TestBootstrapping: - def test_consider_env_fails_to_import(self, monkeypatch): - pluginmanager = PluginManager() - monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",") - pytest.raises(ImportError, lambda: pluginmanager.consider_env()) - - def test_preparse_args(self): - pluginmanager = PluginManager() - pytest.raises(ImportError, lambda: - pluginmanager.consider_preparse(["xyz", "-p", "hello123"])) - - def test_plugin_prevent_register(self): - pluginmanager = PluginManager() - pluginmanager.consider_preparse(["xyz", "-p", "no:abc"]) - l1 = pluginmanager.getplugins() - pluginmanager.register(42, name="abc") - l2 = pluginmanager.getplugins() - assert len(l2) == len(l1) - - def test_plugin_prevent_register_unregistered_alredy_registered(self): - pluginmanager = PluginManager() - pluginmanager.register(42, name="abc") - l1 = pluginmanager.getplugins() - assert 42 in l1 - pluginmanager.consider_preparse(["xyz", "-p", "no:abc"]) - l2 = pluginmanager.getplugins() - assert 42 not in l2 - - def test_plugin_double_register(self): - pm = PluginManager() - pm.register(42, name="abc") - pytest.raises(ValueError, lambda: pm.register(42, name="abc")) - - def test_plugin_skip(self, testdir, monkeypatch): - p = testdir.makepyfile(skipping1=""" - import pytest - pytest.skip("hello") - """) - p.copy(p.dirpath("skipping2.py")) - monkeypatch.setenv("PYTEST_PLUGINS", "skipping2") - result = testdir.runpytest("-rw", "-p", "skipping1", "--traceconfig") - assert result.ret == 0 - result.stdout.fnmatch_lines([ - "WI1*skipped plugin*skipping1*hello*", - "WI1*skipped plugin*skipping2*hello*", - ]) - - def test_consider_env_plugin_instantiation(self, testdir, monkeypatch): - pluginmanager = PluginManager() - testdir.syspathinsert() - testdir.makepyfile(xy123="#") - monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123') - l1 = len(pluginmanager.getplugins()) - pluginmanager.consider_env() - l2 = len(pluginmanager.getplugins()) - assert l2 == l1 + 1 - assert pluginmanager.getplugin('xy123') - pluginmanager.consider_env() - l3 = len(pluginmanager.getplugins()) - assert l2 == l3 - - def test_consider_setuptools_instantiation(self, monkeypatch): - pkg_resources = pytest.importorskip("pkg_resources") - def my_iter(name): - assert name == "pytest11" - class EntryPoint: - name = "pytest_mytestplugin" - dist = None - def load(self): - class PseudoPlugin: - x = 42 - return PseudoPlugin() - return iter([EntryPoint()]) - - monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) - pluginmanager = PluginManager() - pluginmanager.consider_setuptools_entrypoints() - plugin = pluginmanager.getplugin("mytestplugin") - assert plugin.x == 42 - - def test_consider_setuptools_not_installed(self, monkeypatch): - monkeypatch.setitem(py.std.sys.modules, 'pkg_resources', - py.std.types.ModuleType("pkg_resources")) - pluginmanager = PluginManager() - pluginmanager.consider_setuptools_entrypoints() - # ok, we did not explode - - def test_pluginmanager_ENV_startup(self, testdir, monkeypatch): - testdir.makepyfile(pytest_x500="#") - p = testdir.makepyfile(""" - import pytest - def test_hello(pytestconfig): - plugin = pytestconfig.pluginmanager.getplugin('pytest_x500') - assert plugin is not None - """) - monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",") - result = testdir.runpytest(p) - assert result.ret == 0 - result.stdout.fnmatch_lines(["*1 passed in*"]) - - def test_import_plugin_importname(self, testdir): - pluginmanager = PluginManager() - pytest.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")') - pytest.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwx.y")') - - testdir.syspathinsert() - pluginname = "pytest_hello" - testdir.makepyfile(**{pluginname: ""}) - pluginmanager.import_plugin("pytest_hello") - len1 = len(pluginmanager.getplugins()) - pluginmanager.import_plugin("pytest_hello") - len2 = len(pluginmanager.getplugins()) - assert len1 == len2 - plugin1 = pluginmanager.getplugin("pytest_hello") - assert plugin1.__name__.endswith('pytest_hello') - plugin2 = pluginmanager.getplugin("pytest_hello") - assert plugin2 is plugin1 - - def test_import_plugin_dotted_name(self, testdir): - pluginmanager = PluginManager() - pytest.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")') - pytest.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwex.y")') - - testdir.syspathinsert() - testdir.mkpydir("pkg").join("plug.py").write("x=3") - pluginname = "pkg.plug" - pluginmanager.import_plugin(pluginname) - mod = pluginmanager.getplugin("pkg.plug") - assert mod.x == 3 - - def test_consider_module(self, testdir): - pluginmanager = PluginManager() - testdir.syspathinsert() - testdir.makepyfile(pytest_p1="#") - testdir.makepyfile(pytest_p2="#") - mod = py.std.types.ModuleType("temp") - mod.pytest_plugins = ["pytest_p1", "pytest_p2"] - pluginmanager.consider_module(mod) - assert pluginmanager.getplugin("pytest_p1").__name__ == "pytest_p1" - assert pluginmanager.getplugin("pytest_p2").__name__ == "pytest_p2" - - def test_consider_module_import_module(self, testdir): - mod = py.std.types.ModuleType("x") - mod.pytest_plugins = "pytest_a" - aplugin = testdir.makepyfile(pytest_a="#") - pluginmanager = get_plugin_manager() - reprec = testdir.make_hook_recorder(pluginmanager) - #syspath.prepend(aplugin.dirpath()) - py.std.sys.path.insert(0, str(aplugin.dirpath())) - pluginmanager.consider_module(mod) - call = reprec.getcall(pluginmanager.hook.pytest_plugin_registered.name) - assert call.plugin.__name__ == "pytest_a" - - # check that it is not registered twice - pluginmanager.consider_module(mod) - l = reprec.getcalls("pytest_plugin_registered") - assert len(l) == 1 - - def test_config_sets_conftesthandle_onimport(self, testdir): - config = testdir.parseconfig([]) - assert config._conftest._onimport == config._onimportconftest - - def test_consider_conftest_deps(self, testdir): - mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport() - pp = PluginManager() - pytest.raises(ImportError, lambda: pp.consider_conftest(mod)) - - def test_pm(self): - pp = PluginManager() - class A: pass - a1, a2 = A(), A() - pp.register(a1) - assert pp.isregistered(a1) - pp.register(a2, "hello") - assert pp.isregistered(a2) - l = pp.getplugins() - assert a1 in l - assert a2 in l - assert pp.getplugin('hello') == a2 - pp.unregister(a1) - assert not pp.isregistered(a1) - - def test_pm_ordering(self): - pp = PluginManager() - class A: pass - a1, a2 = A(), A() - pp.register(a1) - pp.register(a2, "hello") - l = pp.getplugins() - assert l.index(a1) < l.index(a2) - a3 = A() - pp.register(a3, prepend=True) - l = pp.getplugins() - assert l.index(a3) == 0 - - def test_register_imported_modules(self): - pp = PluginManager() - mod = py.std.types.ModuleType("x.y.pytest_hello") - pp.register(mod) - assert pp.isregistered(mod) - l = pp.getplugins() - assert mod in l - pytest.raises(ValueError, "pp.register(mod)") - pytest.raises(ValueError, lambda: pp.register(mod)) - #assert not pp.isregistered(mod2) - assert pp.getplugins() == l - - def test_canonical_import(self, monkeypatch): - mod = py.std.types.ModuleType("pytest_xyz") - monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod) - pp = PluginManager() - pp.import_plugin('pytest_xyz') - assert pp.getplugin('pytest_xyz') == mod - assert pp.isregistered(mod) - - def test_register_mismatch_method(self): - pp = get_plugin_manager() - class hello: - def pytest_gurgel(self): - pass - pytest.raises(Exception, lambda: pp.register(hello())) - - def test_register_mismatch_arg(self): - pp = get_plugin_manager() - class hello: - def pytest_configure(self, asd): - pass - pytest.raises(Exception, lambda: pp.register(hello())) - - def test_register(self): - pm = get_plugin_manager() - class MyPlugin: - pass - my = MyPlugin() - pm.register(my) - assert pm.getplugins() - my2 = MyPlugin() - pm.register(my2) - assert pm.getplugins()[-2:] == [my, my2] - - assert pm.isregistered(my) - assert pm.isregistered(my2) - pm.unregister(my) - assert not pm.isregistered(my) - assert pm.getplugins()[-1:] == [my2] - - def test_listattr(self): - plugins = PluginManager() - class api1: - x = 41 - class api2: - x = 42 - class api3: - x = 43 - plugins.register(api1()) - plugins.register(api2()) - plugins.register(api3()) - l = list(plugins.listattr('x')) - assert l == [41, 42, 43] - - def test_hook_tracing(self): - pm = get_plugin_manager() - saveindent = [] - class api1: - x = 41 - def pytest_plugin_registered(self, plugin): - saveindent.append(pm.trace.root.indent) - raise ValueError(42) - l = [] - pm.set_tracing(l.append) - indent = pm.trace.root.indent - p = api1() - pm.register(p) - - assert pm.trace.root.indent == indent - assert len(l) == 2 - assert 'pytest_plugin_registered' in l[0] - assert 'finish' in l[1] - pytest.raises(ValueError, lambda: pm.register(api1())) - assert pm.trace.root.indent == indent - assert saveindent[0] > indent - -class TestPytestPluginInteractions: - - def test_addhooks_conftestplugin(self, testdir): - testdir.makepyfile(newhooks=""" - def pytest_myhook(xyz): - "new hook" - """) - conf = testdir.makeconftest(""" - import sys ; sys.path.insert(0, '.') - import newhooks - def pytest_addhooks(pluginmanager): - pluginmanager.addhooks(newhooks) - def pytest_myhook(xyz): - return xyz + 1 - """) - config = get_plugin_manager().config - config._conftest.importconftest(conf) - print(config.pluginmanager.getplugins()) - res = config.hook.pytest_myhook(xyz=10) - assert res == [11] - - def test_addhooks_nohooks(self, testdir): - testdir.makeconftest(""" - import sys - def pytest_addhooks(pluginmanager): - pluginmanager.addhooks(sys) - """) - res = testdir.runpytest() - assert res.ret != 0 - res.stderr.fnmatch_lines([ - "*did not find*sys*" - ]) - - def test_namespace_early_from_import(self, testdir): - p = testdir.makepyfile(""" - from pytest import Item - from pytest import Item as Item2 - assert Item is Item2 - """) - result = testdir.runpython(p) - assert result.ret == 0 - - def test_do_ext_namespace(self, testdir): - testdir.makeconftest(""" - def pytest_namespace(): - return {'hello': 'world'} - """) - p = testdir.makepyfile(""" - from pytest import hello - import pytest - def test_hello(): - assert hello == "world" - assert 'hello' in pytest.__all__ - """) - reprec = testdir.inline_run(p) - reprec.assertoutcome(passed=1) - - def test_do_option_postinitialize(self, testdir): - config = testdir.parseconfigure() - assert not hasattr(config.option, 'test123') - p = testdir.makepyfile(""" - def pytest_addoption(parser): - parser.addoption('--test123', action="store_true", - default=True) - """) - config._conftest.importconftest(p) - assert config.option.test123 - - def test_configure(self, testdir): - config = testdir.parseconfig() - l = [] - class A: - def pytest_configure(self, config): - l.append(self) - - config.pluginmanager.register(A()) - assert len(l) == 0 - config.do_configure() - assert len(l) == 1 - config.pluginmanager.register(A()) # leads to a configured() plugin - assert len(l) == 2 - assert l[0] != l[1] - - config.do_unconfigure() - config.pluginmanager.register(A()) - assert len(l) == 2 - - # lower level API - - def test_listattr(self): - pluginmanager = PluginManager() - class My2: - x = 42 - pluginmanager.register(My2()) - assert not pluginmanager.listattr("hello") - assert pluginmanager.listattr("x") == [42] - - def test_listattr_tryfirst(self): - class P1: - @pytest.mark.tryfirst - def m(self): - return 17 - - class P2: - def m(self): - return 23 - class P3: - def m(self): - return 19 - - pluginmanager = PluginManager() - p1 = P1() - p2 = P2() - p3 = P3() - pluginmanager.register(p1) - pluginmanager.register(p2) - pluginmanager.register(p3) - methods = pluginmanager.listattr('m') - assert methods == [p2.m, p3.m, p1.m] - del P1.m.__dict__['tryfirst'] - pytest.mark.trylast(getattr(P2.m, 'im_func', P2.m)) - methods = pluginmanager.listattr('m') - assert methods == [p2.m, p1.m, p3.m] - - -def test_namespace_has_default_and_env_plugins(testdir): - p = testdir.makepyfile(""" - import pytest - pytest.mark - """) - result = testdir.runpython(p) - assert result.ret == 0 - -def test_varnames(): - def f(x): - i = 3 # noqa - class A: - def f(self, y): - pass - class B(object): - def __call__(self, z): - pass - assert varnames(f) == ("x",) - assert varnames(A().f) == ('y',) - assert varnames(B()) == ('z',) - -def test_varnames_default(): - def f(x, y=3): - pass - assert varnames(f) == ("x",) - -def test_varnames_class(): - class C: - def __init__(self, x): - pass - class D: - pass - assert varnames(C) == ("x",) - assert varnames(D) == () - -class TestMultiCall: - def test_uses_copy_of_methods(self): - l = [lambda: 42] - mc = MultiCall(l, {}) - repr(mc) - l[:] = [] - res = mc.execute() - return res == 42 - - def test_call_passing(self): - class P1: - def m(self, __multicall__, x): - assert len(__multicall__.results) == 1 - assert not __multicall__.methods - return 17 - - class P2: - def m(self, __multicall__, x): - assert __multicall__.results == [] - assert __multicall__.methods - return 23 - - p1 = P1() - p2 = P2() - multicall = MultiCall([p1.m, p2.m], {'x': 23}) - assert "23" in repr(multicall) - reslist = multicall.execute() - assert len(reslist) == 2 - # ensure reversed order - assert reslist == [23, 17] - - def test_keyword_args(self): - def f(x): - return x + 1 - class A: - def f(self, x, y): - return x + y - multicall = MultiCall([f, A().f], dict(x=23, y=24)) - assert "'x': 23" in repr(multicall) - assert "'y': 24" in repr(multicall) - reslist = multicall.execute() - assert reslist == [24+23, 24] - assert "2 results" in repr(multicall) - - def test_keyword_args_with_defaultargs(self): - def f(x, z=1): - return x + z - reslist = MultiCall([f], dict(x=23, y=24)).execute() - assert reslist == [24] - - def test_tags_call_error(self): - multicall = MultiCall([lambda x: x], {}) - pytest.raises(KeyError, multicall.execute) - - def test_call_subexecute(self): - def m(__multicall__): - subresult = __multicall__.execute() - return subresult + 1 - - def n(): - return 1 - - call = MultiCall([n, m], {}, firstresult=True) - res = call.execute() - assert res == 2 - - def test_call_none_is_no_result(self): - def m1(): - return 1 - def m2(): - return None - res = MultiCall([m1, m2], {}, firstresult=True).execute() - assert res == 1 - res = MultiCall([m1, m2], {}).execute() - assert res == [1] - - def test_hookwrapper(self): - l = [] - def m1(): - l.append("m1 init") - yield None - l.append("m1 finish") - m1.hookwrapper = True - - def m2(): - l.append("m2") - return 2 - res = MultiCall([m2, m1], {}).execute() - assert res == [2] - assert l == ["m1 init", "m2", "m1 finish"] - l[:] = [] - res = MultiCall([m2, m1], {}, firstresult=True).execute() - assert res == 2 - assert l == ["m1 init", "m2", "m1 finish"] - - def test_hookwrapper_order(self): - l = [] - def m1(): - l.append("m1 init") - yield 1 - l.append("m1 finish") - m1.hookwrapper = True - - def m2(): - l.append("m2 init") - yield 2 - l.append("m2 finish") - m2.hookwrapper = True - res = MultiCall([m2, m1], {}).execute() - assert res == [] - assert l == ["m1 init", "m2 init", "m2 finish", "m1 finish"] - - def test_listattr_hookwrapper_ordering(self): - class P1: - @pytest.mark.hookwrapper - def m(self): - return 17 - - class P2: - def m(self): - return 23 - - class P3: - @pytest.mark.tryfirst - def m(self): - return 19 - - pluginmanager = PluginManager() - p1 = P1() - p2 = P2() - p3 = P3() - pluginmanager.register(p1) - pluginmanager.register(p2) - pluginmanager.register(p3) - methods = pluginmanager.listattr('m') - assert methods == [p2.m, p3.m, p1.m] - ## listattr keeps a cache and deleting - ## a function attribute requires clearing it - #pluginmanager._listattrcache.clear() - #del P1.m.__dict__['tryfirst'] - - def test_hookwrapper_not_yield(self): - def m1(): - pass - m1.hookwrapper = True - - mc = MultiCall([m1], {}) - with pytest.raises(TypeError): - mc.execute() - - def test_hookwrapper_too_many_yield(self): - def m1(): - yield 1 - yield 2 - m1.hookwrapper = True - - mc = MultiCall([m1], {}) - with pytest.raises(RuntimeError) as ex: - mc.execute() - assert "m1" in str(ex.value) - assert "test_core.py:" in str(ex.value) - - @pytest.mark.parametrize("exc", [ValueError, SystemExit]) - def test_hookwrapper_exception(self, exc): - l = [] - def m1(): - l.append("m1 init") - yield None - l.append("m1 finish") - m1.hookwrapper = True - - def m2(): - raise exc - with pytest.raises(exc): - MultiCall([m2, m1], {}).execute() - assert l == ["m1 init", "m1 finish"] - - -class TestHookRelay: - def test_happypath(self): - class Api: - def hello(self, arg): - "api hook 1" - pm = PluginManager([Api], prefix="he") - hook = pm.hook - assert hasattr(hook, 'hello') - assert repr(hook.hello).find("hello") != -1 - class Plugin: - def hello(self, arg): - return arg + 1 - plugin = Plugin() - pm.register(plugin) - l = hook.hello(arg=3) - assert l == [4] - assert not hasattr(hook, 'world') - pm.unregister(plugin) - assert hook.hello(arg=3) == [] - - def test_argmismatch(self): - class Api: - def hello(self, arg): - "api hook 1" - pm = PluginManager(Api, prefix="he") - class Plugin: - def hello(self, argwrong): - return arg + 1 - with pytest.raises(PluginValidationError) as exc: - pm.register(Plugin()) - assert "argwrong" in str(exc.value) - - def test_only_kwargs(self): - pm = PluginManager() - class Api: - def hello(self, arg): - "api hook 1" - mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he") - pytest.raises(TypeError, lambda: mcm.hello(3)) - - def test_firstresult_definition(self): - class Api: - def hello(self, arg): - "api hook 1" - hello.firstresult = True - pm = PluginManager([Api], "he") - class Plugin: - def hello(self, arg): - return arg + 1 - pm.register(Plugin()) - res = pm.hook.hello(arg=3) - assert res == 4 - -class TestTracer: - def test_simple(self): - from _pytest.core import TagTracer - rootlogger = TagTracer() - log = rootlogger.get("pytest") - log("hello") - l = [] - rootlogger.setwriter(l.append) - log("world") - assert len(l) == 1 - assert l[0] == "world [pytest]\n" - sublog = log.get("collection") - sublog("hello") - assert l[1] == "hello [pytest:collection]\n" - - def test_indent(self): - from _pytest.core import TagTracer - rootlogger = TagTracer() - log = rootlogger.get("1") - l = [] - log.root.setwriter(lambda arg: l.append(arg)) - log("hello") - log.root.indent += 1 - log("line1") - log("line2") - log.root.indent += 1 - log("line3") - log("line4") - log.root.indent -= 1 - log("line5") - log.root.indent -= 1 - log("last") - assert len(l) == 7 - names = [x[:x.rfind(' [')] for x in l] - assert names == ['hello', ' line1', ' line2', - ' line3', ' line4', ' line5', 'last'] - - def test_readable_output_dictargs(self): - from _pytest.core import TagTracer - rootlogger = TagTracer() - - out = rootlogger.format_message(['test'], [1]) - assert out == ['1 [test]\n'] - - out2= rootlogger.format_message(['test'], ['test', {'a':1}]) - assert out2 ==[ - 'test [test]\n', - ' a: 1\n' - ] - - def test_setprocessor(self): - from _pytest.core import TagTracer - rootlogger = TagTracer() - log = rootlogger.get("1") - log2 = log.get("2") - assert log2.tags == tuple("12") - l = [] - rootlogger.setprocessor(tuple("12"), lambda *args: l.append(args)) - log("not seen") - log2("seen") - assert len(l) == 1 - tags, args = l[0] - assert "1" in tags - assert "2" in tags - assert args == ("seen",) - l2 = [] - rootlogger.setprocessor("1:2", lambda *args: l2.append(args)) - log2("seen") - tags, args = l2[0] - assert args == ("seen",) - - - def test_setmyprocessor(self): - from _pytest.core import TagTracer - rootlogger = TagTracer() - log = rootlogger.get("1") - log2 = log.get("2") - l = [] - log2.setmyprocessor(lambda *args: l.append(args)) - log("not seen") - assert not l - log2(42) - assert len(l) == 1 - tags, args = l[0] - assert "1" in tags - assert "2" in tags - assert args == (42,) - -def test_default_markers(testdir): - result = testdir.runpytest("--markers") - result.stdout.fnmatch_lines([ - "*tryfirst*first*", - "*trylast*last*", - ]) - -def test_importplugin_issue375(testdir): - testdir.makepyfile(qwe="import aaaa") - excinfo = pytest.raises(ImportError, lambda: importplugin("qwe")) - assert "qwe" not in str(excinfo.value) - assert "aaaa" in str(excinfo.value) - -class TestWrapMethod: - def test_basic_happypath(self): - class A: - def f(self): - return "A.f" - - l = [] - def f(self): - l.append(1) - box = yield - assert box.result == "A.f" - l.append(2) - undo = add_method_wrapper(A, f) - - assert A().f() == "A.f" - assert l == [1,2] - undo() - l[:] = [] - assert A().f() == "A.f" - assert l == [] - - def test_no_yield(self): - class A: - def method(self): - return - - def method(self): - if 0: - yield - - add_method_wrapper(A, method) - with pytest.raises(RuntimeError) as excinfo: - A().method() - - assert "method" in str(excinfo.value) - assert "did not yield" in str(excinfo.value) - - def test_method_raises(self): - class A: - def error(self, val): - raise ValueError(val) - - l = [] - def error(self, val): - l.append(val) - yield - l.append(None) - - undo = add_method_wrapper(A, error) - - with pytest.raises(ValueError): - A().error(42) - assert l == [42, None] - undo() - l[:] = [] - with pytest.raises(ValueError): - A().error(42) - assert l == [] - - def test_controller_swallows_method_raises(self): - class A: - def error(self, val): - raise ValueError(val) - - def error(self, val): - box = yield - box.force_result(2) - - add_method_wrapper(A, error) - assert A().error(42) == 2 - - def test_reraise_on_controller_StopIteration(self): - class A: - def error(self, val): - raise ValueError(val) - - def error(self, val): - try: - yield - except ValueError: - pass - - add_method_wrapper(A, error) - with pytest.raises(ValueError): - A().error(42) - - @pytest.mark.xfail(reason="if needed later") - def test_modify_call_args(self): - class A: - def error(self, val1, val2): - raise ValueError(val1+val2) - - l = [] - def error(self): - box = yield (1,), {'val2': 2} - assert box.excinfo[1].args == (3,) - l.append(1) - - add_method_wrapper(A, error) - with pytest.raises(ValueError): - A().error() - assert l == [1] diff --git a/testing/test_doctest.py b/testing/test_doctest.py index 2bf6d245047..25a995add82 100644 --- a/testing/test_doctest.py +++ b/testing/test_doctest.py @@ -1,5 +1,7 @@ +import sys from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile -import py, pytest +import py +import pytest class TestDoctests: @@ -75,8 +77,6 @@ def another(): assert isinstance(items[0].parent, DoctestModule) assert items[0].parent is items[1].parent - @pytest.mark.xfail('hasattr(sys, "pypy_version_info")', reason= - "pypy leaks one FD") def test_simple_doctestfile(self, testdir): p = testdir.maketxtfile(test_doc=""" >>> x = 1 @@ -370,3 +370,126 @@ def foo(): reprec = testdir.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml") reprec.assertoutcome(failed=1) + + def test_doctest_module_session_fixture(self, testdir): + """Test that session fixtures are initialized for doctest modules (#768) + """ + # session fixture which changes some global data, which will + # be accessed by doctests in a module + testdir.makeconftest(""" + import pytest + import sys + + @pytest.yield_fixture(autouse=True, scope='session') + def myfixture(): + assert not hasattr(sys, 'pytest_session_data') + sys.pytest_session_data = 1 + yield + del sys.pytest_session_data + """) + testdir.makepyfile(foo=""" + import sys + + def foo(): + ''' + >>> assert sys.pytest_session_data == 1 + ''' + + def bar(): + ''' + >>> assert sys.pytest_session_data == 1 + ''' + """) + result = testdir.runpytest("--doctest-modules") + result.stdout.fnmatch_lines('*2 passed*') + + @pytest.mark.parametrize('config_mode', ['ini', 'comment']) + def test_allow_unicode(self, testdir, config_mode): + """Test that doctests which output unicode work in all python versions + tested by pytest when the ALLOW_UNICODE option is used (either in + the ini file or by an inline comment). + """ + if config_mode == 'ini': + testdir.makeini(''' + [pytest] + doctest_optionflags = ALLOW_UNICODE + ''') + comment = '' + else: + comment = '#doctest: +ALLOW_UNICODE' + + testdir.maketxtfile(test_doc=""" + >>> b'12'.decode('ascii') {comment} + '12' + """.format(comment=comment)) + testdir.makepyfile(foo=""" + def foo(): + ''' + >>> b'12'.decode('ascii') {comment} + '12' + ''' + """.format(comment=comment)) + reprec = testdir.inline_run("--doctest-modules") + reprec.assertoutcome(passed=2) + + def test_unicode_string(self, testdir): + """Test that doctests which output unicode fail in Python 2 when + the ALLOW_UNICODE option is not used. The same test should pass + in Python 3. + """ + testdir.maketxtfile(test_doc=""" + >>> b'12'.decode('ascii') + '12' + """) + reprec = testdir.inline_run() + passed = int(sys.version_info[0] >= 3) + reprec.assertoutcome(passed=passed, failed=int(not passed)) + + +class TestDocTestSkips: + """ + If all examples in a doctest are skipped due to the SKIP option, then + the tests should be SKIPPED rather than PASSED. (#957) + """ + + @pytest.fixture(params=['text', 'module']) + def makedoctest(self, testdir, request): + def makeit(doctest): + mode = request.param + if mode == 'text': + testdir.maketxtfile(doctest) + else: + assert mode == 'module' + testdir.makepyfile('"""\n%s"""' % doctest) + + return makeit + + def test_one_skipped(self, testdir, makedoctest): + makedoctest(""" + >>> 1 + 1 # doctest: +SKIP + 2 + >>> 2 + 2 + 4 + """) + reprec = testdir.inline_run("--doctest-modules") + reprec.assertoutcome(passed=1) + + def test_one_skipped_failed(self, testdir, makedoctest): + makedoctest(""" + >>> 1 + 1 # doctest: +SKIP + 2 + >>> 2 + 2 + 200 + """) + reprec = testdir.inline_run("--doctest-modules") + reprec.assertoutcome(failed=1) + + def test_all_skipped(self, testdir, makedoctest): + makedoctest(""" + >>> 1 + 1 # doctest: +SKIP + 2 + >>> 2 + 2 # doctest: +SKIP + 200 + """) + reprec = testdir.inline_run("--doctest-modules") + reprec.assertoutcome(skipped=1) diff --git a/testing/test_genscript.py b/testing/test_genscript.py index 1c65fec1462..1260a5a6b26 100644 --- a/testing/test_genscript.py +++ b/testing/test_genscript.py @@ -16,7 +16,6 @@ def __init__(self, request): assert self.script.check() def run(self, anypython, testdir, *args): - testdir.chdir() return testdir._run(anypython, self.script, *args) def test_gen(testdir, anypython, standalone): @@ -28,13 +27,17 @@ def test_gen(testdir, anypython, standalone): pytest.skip("genscript called from python2.7 cannot work " "earlier python versions") result = standalone.run(anypython, testdir, '--version') - assert result.ret == 0 - result.stderr.fnmatch_lines([ - "*imported from*mypytest*" - ]) - p = testdir.makepyfile("def test_func(): assert 0") - result = standalone.run(anypython, testdir, p) - assert result.ret != 0 + if result.ret == 2: + result.stderr.fnmatch_lines(["*ERROR: setuptools not installed*"]) + elif result.ret == 0: + result.stderr.fnmatch_lines([ + "*imported from*mypytest*" + ]) + p = testdir.makepyfile("def test_func(): assert 0") + result = standalone.run(anypython, testdir, p) + assert result.ret != 0 + else: + pytest.fail("Unexpected return code") def test_freeze_includes(): diff --git a/testing/test_helpconfig.py b/testing/test_helpconfig.py index 30ce9c9f236..9f8d87b7cb5 100644 --- a/testing/test_helpconfig.py +++ b/testing/test_helpconfig.py @@ -1,3 +1,4 @@ +from _pytest.main import EXIT_NOTESTSCOLLECTED import pytest def test_version(testdir, pytestconfig): @@ -7,7 +8,7 @@ def test_version(testdir, pytestconfig): result.stderr.fnmatch_lines([ '*pytest*%s*imported from*' % (pytest.__version__, ) ]) - if pytestconfig.pluginmanager._plugin_distinfo: + if pytestconfig.pluginmanager.list_plugin_distinfo(): result.stderr.fnmatch_lines([ "*setuptools registered plugins:", "*at*", @@ -38,12 +39,12 @@ def pytest_hello(xyz): def test_hookvalidation_optional(testdir): testdir.makeconftest(""" import pytest - @pytest.mark.optionalhook + @pytest.hookimpl(optionalhook=True) def pytest_hello(xyz): pass """) result = testdir.runpytest() - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED def test_traceconfig(testdir): result = testdir.runpytest("--traceconfig") @@ -53,15 +54,15 @@ def test_traceconfig(testdir): ]) def test_debug(testdir, monkeypatch): - result = testdir.runpytest("--debug") - assert result.ret == 0 + result = testdir.runpytest_subprocess("--debug") + assert result.ret == EXIT_NOTESTSCOLLECTED p = testdir.tmpdir.join("pytestdebug.log") assert "pytest_sessionstart" in p.read() def test_PYTEST_DEBUG(testdir, monkeypatch): monkeypatch.setenv("PYTEST_DEBUG", "1") - result = testdir.runpytest() - assert result.ret == 0 + result = testdir.runpytest_subprocess() + assert result.ret == EXIT_NOTESTSCOLLECTED result.stderr.fnmatch_lines([ "*pytest_plugin_registered*", "*manager*PluginManager*" diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index d019f87cc05..cb4d0c4440e 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from xml.dom import minidom +from _pytest.main import EXIT_NOTESTSCOLLECTED import py, sys, os from _pytest.junitxml import LogXML @@ -44,6 +45,10 @@ def test_xpass(): def test_timing_function(self, testdir): testdir.makepyfile(""" import time, pytest + def setup_module(): + time.sleep(0.01) + def teardown_module(): + time.sleep(0.01) def test_sleep(): time.sleep(0.01) """) @@ -51,7 +56,7 @@ def test_sleep(): node = dom.getElementsByTagName("testsuite")[0] tnode = node.getElementsByTagName("testcase")[0] val = tnode.getAttributeNode("time").value - assert float(val) >= 0.001 + assert round(float(val), 2) >= 0.03 def test_setup_error(self, testdir): testdir.makepyfile(""" @@ -66,6 +71,8 @@ def test_function(arg): assert_attr(node, errors=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, + file="test_setup_error.py", + line="2", classname="test_setup_error", name="test_function") fnode = tnode.getElementsByTagName("error")[0] @@ -84,6 +91,8 @@ def test_skip(): assert_attr(node, skips=1) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, + file="test_skip_contains_name_reason.py", + line="1", classname="test_skip_contains_name_reason", name="test_skip") snode = tnode.getElementsByTagName("skipped")[0] @@ -104,6 +113,8 @@ def test_method(self): assert_attr(node, failures=1) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, + file="test_classname_instance.py", + line="1", classname="test_classname_instance.TestClass", name="test_method") @@ -116,6 +127,8 @@ def test_classname_nested_dir(self, testdir): assert_attr(node, failures=1) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, + file=os.path.join("sub", "test_hello.py"), + line="0", classname="sub.test_hello", name="test_func") @@ -147,6 +160,8 @@ def test_fail(): assert_attr(node, failures=1, tests=1) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, + file="test_failure_function.py", + line="1", classname="test_failure_function", name="test_fail") fnode = tnode.getElementsByTagName("failure")[0] @@ -189,6 +204,8 @@ def test_func(arg1): tnode = node.getElementsByTagName("testcase")[index] assert_attr(tnode, + file="test_failure_escape.py", + line="1", classname="test_failure_escape", name="test_func[%s]" % char) sysout = tnode.getElementsByTagName('system-out')[0] @@ -210,10 +227,14 @@ def test_hello(self): assert_attr(node, failures=1, tests=2) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, + file="test_junit_prefixing.py", + line="0", classname="xyz.test_junit_prefixing", name="test_func") tnode = node.getElementsByTagName("testcase")[1] assert_attr(tnode, + file="test_junit_prefixing.py", + line="3", classname="xyz.test_junit_prefixing." "TestHello", name="test_hello") @@ -230,6 +251,8 @@ def test_xfail(): assert_attr(node, skips=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, + file="test_xfailure_function.py", + line="1", classname="test_xfailure_function", name="test_xfail") fnode = tnode.getElementsByTagName("skipped")[0] @@ -249,6 +272,8 @@ def test_xpass(): assert_attr(node, skips=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, + file="test_xfailure_xpass.py", + line="1", classname="test_xfailure_xpass", name="test_xpass") fnode = tnode.getElementsByTagName("skipped")[0] @@ -263,8 +288,10 @@ def test_collect_error(self, testdir): assert_attr(node, errors=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, + file="test_collect_error.py", #classname="test_collect_error", name="test_collect_error") + assert tnode.getAttributeNode("line") is None fnode = tnode.getElementsByTagName("error")[0] assert_attr(fnode, message="collection failure") assert "SyntaxError" in fnode.toxml() @@ -272,13 +299,15 @@ def test_collect_error(self, testdir): def test_collect_skipped(self, testdir): testdir.makepyfile("import pytest; pytest.skip('xyz')") result, dom = runandparse(testdir) - assert not result.ret + assert result.ret == EXIT_NOTESTSCOLLECTED node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, skips=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, + file="test_collect_skipped.py", #classname="test_collect_error", name="test_collect_skipped") + assert tnode.getAttributeNode("line") is None # py.test doesn't give us a line here. fnode = tnode.getElementsByTagName("skipped")[0] assert_attr(fnode, message="collection skipped") @@ -506,6 +535,7 @@ class Report(BaseReport): longrepr = ustr sections = [] nodeid = "something" + location = 'tests/filename.py', 42, 'TestClass.method' report = Report() # hopefully this is not too brittle ... @@ -523,4 +553,13 @@ class Report(BaseReport): log.append_skipped(report) log.pytest_sessionfinish() - +def test_record_property(testdir): + testdir.makepyfile(""" + def test_record(record_xml_property): + record_xml_property("foo", "<1"); + """) + result, dom = runandparse(testdir, '-rw') + node = dom.getElementsByTagName("testsuite")[0] + tnode = node.getElementsByTagName("testcase")[0] + assert_attr(tnode, foo="<1") + result.stdout.fnmatch_lines('*C3*test_record_property.py*experimental*') diff --git a/testing/test_mark.py b/testing/test_mark.py index 3527deb1e9f..1aa33618308 100644 --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -1,3 +1,5 @@ +import os + import py, pytest from _pytest.mark import MarkGenerator as Mark @@ -84,6 +86,22 @@ def g(): assert g.some.kwargs['reason2'] == "456" +def test_marked_class_run_twice(testdir, request): + """Test fails file is run twice that contains marked class. + See issue#683. + """ + py_file = testdir.makepyfile(""" + import pytest + @pytest.mark.parametrize('abc', [1, 2, 3]) + class Test1(object): + def test_1(self, abc): + assert abc in [1, 2, 3] + """) + file_name = os.path.basename(py_file.strpath) + rec = testdir.inline_run(file_name, file_name) + rec.assertoutcome(passed=6) + + def test_ini_markers(testdir): testdir.makeini(""" [pytest] @@ -565,7 +583,7 @@ def test_2(self): """) testdir.makepyfile(conftest=""" import pytest - @pytest.mark.hookwrapper + @pytest.hookimpl(hookwrapper=True) def pytest_pycollect_makeitem(name): outcome = yield if name == "TestClass": diff --git a/testing/test_monkeypatch.py b/testing/test_monkeypatch.py index 99f77a64629..690aee55664 100644 --- a/testing/test_monkeypatch.py +++ b/testing/test_monkeypatch.py @@ -62,6 +62,11 @@ def test_unknown_attr(self, monkeypatch): pytest.raises(pytest.fail.Exception, lambda: monkeypatch.setattr("os.path.qweqwe", None)) + def test_unknown_attr_non_raising(self, monkeypatch): + # https://github.com/pytest-dev/pytest/issues/746 + monkeypatch.setattr('os.path.qweqwe', 42, raising=False) + assert os.path.qweqwe == 42 + def test_delattr(self, monkeypatch): monkeypatch.delattr("os.path.abspath") assert not hasattr(os.path, "abspath") diff --git a/testing/test_nose.py b/testing/test_nose.py index fac7a5d93e7..6260aae4788 100644 --- a/testing/test_nose.py +++ b/testing/test_nose.py @@ -19,9 +19,7 @@ def test_world(): test_hello.teardown = lambda: l.append(2) """) result = testdir.runpytest(p, '-p', 'nose') - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) + result.assert_outcomes(passed=2) def test_setup_func_with_setup_decorator(): @@ -66,9 +64,7 @@ def test_world(): """) result = testdir.runpytest(p, '-p', 'nose') - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) + result.assert_outcomes(passed=2) def test_nose_setup_func_failure(testdir): @@ -302,7 +298,7 @@ def test_fun(self): pass """) result = testdir.runpytest() - result.stdout.fnmatch_lines("*1 passed*") + result.assert_outcomes(passed=1) @pytest.mark.skipif("sys.version_info < (2,6)") def test_setup_teardown_linking_issue265(testdir): @@ -327,8 +323,8 @@ def teardown(self): """Undoes the setup.""" raise Exception("should not call teardown for skipped tests") ''') - reprec = testdir.inline_run() - reprec.assertoutcome(passed=1, skipped=1) + reprec = testdir.runpytest() + reprec.assert_outcomes(passed=1, skipped=1) def test_SkipTest_during_collection(testdir): @@ -339,9 +335,7 @@ def test_failing(): assert False """) result = testdir.runpytest(p) - outcome = result.parseoutcomes() - outcome.pop('seconds') - assert outcome == dict(skipped=1) + result.assert_outcomes(skipped=1) def test_SkipTest_in_test(testdir): @@ -353,3 +347,49 @@ def test_skipping(): """) reprec = testdir.inline_run() reprec.assertoutcome(skipped=1) + +def test_istest_function_decorator(testdir): + p = testdir.makepyfile(""" + import nose.tools + @nose.tools.istest + def not_test_prefix(): + pass + """) + result = testdir.runpytest(p) + result.assert_outcomes(passed=1) + +def test_nottest_function_decorator(testdir): + testdir.makepyfile(""" + import nose.tools + @nose.tools.nottest + def test_prefix(): + pass + """) + reprec = testdir.inline_run() + assert not reprec.getfailedcollections() + calls = reprec.getreports("pytest_runtest_logreport") + assert not calls + +def test_istest_class_decorator(testdir): + p = testdir.makepyfile(""" + import nose.tools + @nose.tools.istest + class NotTestPrefix: + def test_method(self): + pass + """) + result = testdir.runpytest(p) + result.assert_outcomes(passed=1) + +def test_nottest_class_decorator(testdir): + testdir.makepyfile(""" + import nose.tools + @nose.tools.nottest + class TestPrefix: + def test_method(self): + pass + """) + reprec = testdir.inline_run() + assert not reprec.getfailedcollections() + calls = reprec.getreports("pytest_runtest_logreport") + assert not calls diff --git a/testing/test_parseopt.py b/testing/test_parseopt.py index 4b3a7147577..15b28225c5a 100644 --- a/testing/test_parseopt.py +++ b/testing/test_parseopt.py @@ -107,6 +107,15 @@ def test_parse_known_args(self, parser): parser.addoption("--hello", action="store_true") ns = parser.parse_known_args(["x", "--y", "--hello", "this"]) assert ns.hello + assert ns.file_or_dir == ['x'] + + def test_parse_known_and_unknown_args(self, parser): + parser.addoption("--hello", action="store_true") + ns, unknown = parser.parse_known_and_unknown_args(["x", "--y", + "--hello", "this"]) + assert ns.hello + assert ns.file_or_dir == ['x'] + assert unknown == ['--y', 'this'] def test_parse_will_set_default(self, parser): parser.addoption("--hello", dest="hello", default="x", action="store") diff --git a/testing/test_pdb.py b/testing/test_pdb.py index f0cf2b3333f..a2fd4d43da8 100644 --- a/testing/test_pdb.py +++ b/testing/test_pdb.py @@ -2,6 +2,13 @@ import py import sys +def runpdb_and_get_report(testdir, source): + p = testdir.makepyfile(source) + result = testdir.runpytest_inprocess("--pdb", p) + reports = result.reprec.getreports("pytest_runtest_logreport") + assert len(reports) == 3, reports # setup/call/teardown + return reports[1] + class TestPDB: def pytest_funcarg__pdblist(self, request): @@ -14,7 +21,7 @@ def mypdb(*args): return pdblist def test_pdb_on_fail(self, testdir, pdblist): - rep = testdir.inline_runsource1('--pdb', """ + rep = runpdb_and_get_report(testdir, """ def test_func(): assert 0 """) @@ -24,7 +31,7 @@ def test_func(): assert tb[-1].name == "test_func" def test_pdb_on_xfail(self, testdir, pdblist): - rep = testdir.inline_runsource1('--pdb', """ + rep = runpdb_and_get_report(testdir, """ import pytest @pytest.mark.xfail def test_func(): @@ -34,7 +41,7 @@ def test_func(): assert not pdblist def test_pdb_on_skip(self, testdir, pdblist): - rep = testdir.inline_runsource1('--pdb', """ + rep = runpdb_and_get_report(testdir, """ import pytest def test_func(): pytest.skip("hello") @@ -43,7 +50,7 @@ def test_func(): assert len(pdblist) == 0 def test_pdb_on_BdbQuit(self, testdir, pdblist): - rep = testdir.inline_runsource1('--pdb', """ + rep = runpdb_and_get_report(testdir, """ import bdb def test_func(): raise bdb.BdbQuit @@ -260,7 +267,7 @@ def test_foo(a): def test_pdb_collection_failure_is_shown(self, testdir): p1 = testdir.makepyfile("""xxx """) - result = testdir.runpytest("--pdb", p1) + result = testdir.runpytest_subprocess("--pdb", p1) result.stdout.fnmatch_lines([ "*NameError*xxx*", "*1 error*", diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py new file mode 100644 index 00000000000..c499381001c --- /dev/null +++ b/testing/test_pluginmanager.py @@ -0,0 +1,319 @@ +import pytest +import py +import os + +from _pytest.config import get_config, PytestPluginManager +from _pytest.main import EXIT_NOTESTSCOLLECTED + +@pytest.fixture +def pytestpm(): + return PytestPluginManager() + +class TestPytestPluginInteractions: + def test_addhooks_conftestplugin(self, testdir): + testdir.makepyfile(newhooks=""" + def pytest_myhook(xyz): + "new hook" + """) + conf = testdir.makeconftest(""" + import sys ; sys.path.insert(0, '.') + import newhooks + def pytest_addhooks(pluginmanager): + pluginmanager.addhooks(newhooks) + def pytest_myhook(xyz): + return xyz + 1 + """) + config = get_config() + pm = config.pluginmanager + pm.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=config.pluginmanager)) + config.pluginmanager._importconftest(conf) + #print(config.pluginmanager.get_plugins()) + res = config.hook.pytest_myhook(xyz=10) + assert res == [11] + + def test_addhooks_nohooks(self, testdir): + testdir.makeconftest(""" + import sys + def pytest_addhooks(pluginmanager): + pluginmanager.addhooks(sys) + """) + res = testdir.runpytest() + assert res.ret != 0 + res.stderr.fnmatch_lines([ + "*did not find*sys*" + ]) + + def test_namespace_early_from_import(self, testdir): + p = testdir.makepyfile(""" + from pytest import Item + from pytest import Item as Item2 + assert Item is Item2 + """) + result = testdir.runpython(p) + assert result.ret == 0 + + def test_do_ext_namespace(self, testdir): + testdir.makeconftest(""" + def pytest_namespace(): + return {'hello': 'world'} + """) + p = testdir.makepyfile(""" + from pytest import hello + import pytest + def test_hello(): + assert hello == "world" + assert 'hello' in pytest.__all__ + """) + reprec = testdir.inline_run(p) + reprec.assertoutcome(passed=1) + + def test_do_option_postinitialize(self, testdir): + config = testdir.parseconfigure() + assert not hasattr(config.option, 'test123') + p = testdir.makepyfile(""" + def pytest_addoption(parser): + parser.addoption('--test123', action="store_true", + default=True) + """) + config.pluginmanager._importconftest(p) + assert config.option.test123 + + def test_configure(self, testdir): + config = testdir.parseconfig() + l = [] + class A: + def pytest_configure(self, config): + l.append(self) + + config.pluginmanager.register(A()) + assert len(l) == 0 + config._do_configure() + assert len(l) == 1 + config.pluginmanager.register(A()) # leads to a configured() plugin + assert len(l) == 2 + assert l[0] != l[1] + + config._ensure_unconfigure() + config.pluginmanager.register(A()) + assert len(l) == 2 + + def test_hook_tracing(self): + pytestpm = get_config().pluginmanager # fully initialized with plugins + saveindent = [] + class api1: + def pytest_plugin_registered(self): + saveindent.append(pytestpm.trace.root.indent) + class api2: + def pytest_plugin_registered(self): + saveindent.append(pytestpm.trace.root.indent) + raise ValueError() + l = [] + pytestpm.trace.root.setwriter(l.append) + undo = pytestpm.enable_tracing() + try: + indent = pytestpm.trace.root.indent + p = api1() + pytestpm.register(p) + assert pytestpm.trace.root.indent == indent + assert len(l) >= 2 + assert 'pytest_plugin_registered' in l[0] + assert 'finish' in l[1] + + l[:] = [] + with pytest.raises(ValueError): + pytestpm.register(api2()) + assert pytestpm.trace.root.indent == indent + assert saveindent[0] > indent + finally: + undo() + + def test_warn_on_deprecated_multicall(self, pytestpm): + warnings = [] + + class get_warnings: + def pytest_logwarning(self, message): + warnings.append(message) + + class Plugin: + def pytest_configure(self, __multicall__): + pass + + pytestpm.register(get_warnings()) + before = list(warnings) + pytestpm.register(Plugin()) + assert len(warnings) == len(before) + 1 + assert "deprecated" in warnings[-1] + + +def test_namespace_has_default_and_env_plugins(testdir): + p = testdir.makepyfile(""" + import pytest + pytest.mark + """) + result = testdir.runpython(p) + assert result.ret == 0 + +def test_default_markers(testdir): + result = testdir.runpytest("--markers") + result.stdout.fnmatch_lines([ + "*tryfirst*first*", + "*trylast*last*", + ]) + +def test_importplugin_issue375(testdir, pytestpm): + testdir.syspathinsert(testdir.tmpdir) + testdir.makepyfile(qwe="import aaaa") + with pytest.raises(ImportError) as excinfo: + pytestpm.import_plugin("qwe") + assert "qwe" not in str(excinfo.value) + assert "aaaa" in str(excinfo.value) + + +class TestPytestPluginManager: + def test_register_imported_modules(self): + pm = PytestPluginManager() + mod = py.std.types.ModuleType("x.y.pytest_hello") + pm.register(mod) + assert pm.is_registered(mod) + l = pm.get_plugins() + assert mod in l + pytest.raises(ValueError, "pm.register(mod)") + pytest.raises(ValueError, lambda: pm.register(mod)) + #assert not pm.is_registered(mod2) + assert pm.get_plugins() == l + + def test_canonical_import(self, monkeypatch): + mod = py.std.types.ModuleType("pytest_xyz") + monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod) + pm = PytestPluginManager() + pm.import_plugin('pytest_xyz') + assert pm.get_plugin('pytest_xyz') == mod + assert pm.is_registered(mod) + + def test_consider_module(self, testdir, pytestpm): + testdir.syspathinsert() + testdir.makepyfile(pytest_p1="#") + testdir.makepyfile(pytest_p2="#") + mod = py.std.types.ModuleType("temp") + mod.pytest_plugins = ["pytest_p1", "pytest_p2"] + pytestpm.consider_module(mod) + assert pytestpm.get_plugin("pytest_p1").__name__ == "pytest_p1" + assert pytestpm.get_plugin("pytest_p2").__name__ == "pytest_p2" + + def test_consider_module_import_module(self, testdir): + pytestpm = get_config().pluginmanager + mod = py.std.types.ModuleType("x") + mod.pytest_plugins = "pytest_a" + aplugin = testdir.makepyfile(pytest_a="#") + reprec = testdir.make_hook_recorder(pytestpm) + #syspath.prepend(aplugin.dirpath()) + py.std.sys.path.insert(0, str(aplugin.dirpath())) + pytestpm.consider_module(mod) + call = reprec.getcall(pytestpm.hook.pytest_plugin_registered.name) + assert call.plugin.__name__ == "pytest_a" + + # check that it is not registered twice + pytestpm.consider_module(mod) + l = reprec.getcalls("pytest_plugin_registered") + assert len(l) == 1 + + def test_consider_env_fails_to_import(self, monkeypatch, pytestpm): + monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",") + with pytest.raises(ImportError): + pytestpm.consider_env() + + def test_plugin_skip(self, testdir, monkeypatch): + p = testdir.makepyfile(skipping1=""" + import pytest + pytest.skip("hello") + """) + p.copy(p.dirpath("skipping2.py")) + monkeypatch.setenv("PYTEST_PLUGINS", "skipping2") + result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True) + assert result.ret == EXIT_NOTESTSCOLLECTED + result.stdout.fnmatch_lines([ + "WI1*skipped plugin*skipping1*hello*", + "WI1*skipped plugin*skipping2*hello*", + ]) + + def test_consider_env_plugin_instantiation(self, testdir, monkeypatch, pytestpm): + testdir.syspathinsert() + testdir.makepyfile(xy123="#") + monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123') + l1 = len(pytestpm.get_plugins()) + pytestpm.consider_env() + l2 = len(pytestpm.get_plugins()) + assert l2 == l1 + 1 + assert pytestpm.get_plugin('xy123') + pytestpm.consider_env() + l3 = len(pytestpm.get_plugins()) + assert l2 == l3 + + def test_pluginmanager_ENV_startup(self, testdir, monkeypatch): + testdir.makepyfile(pytest_x500="#") + p = testdir.makepyfile(""" + import pytest + def test_hello(pytestconfig): + plugin = pytestconfig.pluginmanager.get_plugin('pytest_x500') + assert plugin is not None + """) + monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",") + result = testdir.runpytest(p, syspathinsert=True) + assert result.ret == 0 + result.stdout.fnmatch_lines(["*1 passed*"]) + + def test_import_plugin_importname(self, testdir, pytestpm): + pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")') + pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwx.y")') + + testdir.syspathinsert() + pluginname = "pytest_hello" + testdir.makepyfile(**{pluginname: ""}) + pytestpm.import_plugin("pytest_hello") + len1 = len(pytestpm.get_plugins()) + pytestpm.import_plugin("pytest_hello") + len2 = len(pytestpm.get_plugins()) + assert len1 == len2 + plugin1 = pytestpm.get_plugin("pytest_hello") + assert plugin1.__name__.endswith('pytest_hello') + plugin2 = pytestpm.get_plugin("pytest_hello") + assert plugin2 is plugin1 + + def test_import_plugin_dotted_name(self, testdir, pytestpm): + pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")') + pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwex.y")') + + testdir.syspathinsert() + testdir.mkpydir("pkg").join("plug.py").write("x=3") + pluginname = "pkg.plug" + pytestpm.import_plugin(pluginname) + mod = pytestpm.get_plugin("pkg.plug") + assert mod.x == 3 + + def test_consider_conftest_deps(self, testdir, pytestpm): + mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport() + with pytest.raises(ImportError): + pytestpm.consider_conftest(mod) + + +class TestPytestPluginManagerBootstrapming: + def test_preparse_args(self, pytestpm): + pytest.raises(ImportError, lambda: + pytestpm.consider_preparse(["xyz", "-p", "hello123"])) + + def test_plugin_prevent_register(self, pytestpm): + pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) + l1 = pytestpm.get_plugins() + pytestpm.register(42, name="abc") + l2 = pytestpm.get_plugins() + assert len(l2) == len(l1) + assert 42 not in l2 + + def test_plugin_prevent_register_unregistered_alredy_registered(self, pytestpm): + pytestpm.register(42, name="abc") + l1 = pytestpm.get_plugins() + assert 42 in l1 + pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) + l2 = pytestpm.get_plugins() + assert 42 not in l2 diff --git a/testing/test_pytester.py b/testing/test_pytester.py index ac57f2c873f..65660afdfe3 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -1,7 +1,9 @@ import pytest import os from _pytest.pytester import HookRecorder -from _pytest.core import PluginManager +from _pytest.config import PytestPluginManager +from _pytest.main import EXIT_OK, EXIT_TESTSFAILED + def test_make_hook_recorder(testdir): item = testdir.getitem("def test_func(): pass") @@ -62,14 +64,12 @@ def test_parseconfig(testdir): def test_testdir_runs_with_plugin(testdir): testdir.makepyfile(""" - pytest_plugins = "pytest_pytester" + pytest_plugins = "pytester" def test_hello(testdir): assert 1 """) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) + result.assert_outcomes(passed=1) def make_holder(): @@ -91,8 +91,8 @@ def pytest_xyz_noarg(): @pytest.mark.parametrize("holder", make_holder()) def test_hookrecorder_basic(holder): - pm = PluginManager() - pm.hook._addhooks(holder, "pytest_") + pm = PytestPluginManager() + pm.addhooks(holder) rec = HookRecorder(pm) pm.hook.pytest_xyz(arg=123) call = rec.popcall("pytest_xyz") @@ -112,12 +112,11 @@ def test_makepyfile_unicode(testdir): unichr = chr testdir.makepyfile(unichr(0xfffd)) -def test_inprocess_plugins(testdir): - class Plugin(object): - configured = False - def pytest_configure(self, config): - self.configured = True - plugin = Plugin() - testdir.inprocess_run([], [plugin]) - - assert plugin.configured +def test_inline_run_clean_modules(testdir): + test_mod = testdir.makepyfile("def test_foo(): assert True") + result = testdir.inline_run(str(test_mod)) + assert result.ret == EXIT_OK + # rewrite module, now test should fail if module was re-imported + test_mod.write("def test_foo(): assert False") + result2 = testdir.inline_run(str(test_mod)) + assert result2.ret == EXIT_TESTSFAILED diff --git a/testing/test_recwarn.py b/testing/test_recwarn.py index 65989cb3c89..644b09ef754 100644 --- a/testing/test_recwarn.py +++ b/testing/test_recwarn.py @@ -1,28 +1,11 @@ -import py, pytest +import warnings +import py +import pytest from _pytest.recwarn import WarningsRecorder -def test_WarningRecorder(recwarn): - showwarning = py.std.warnings.showwarning - rec = WarningsRecorder() - assert py.std.warnings.showwarning != showwarning - assert not rec.list - py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13) - assert len(rec.list) == 1 - py.std.warnings.warn(DeprecationWarning("hello")) - assert len(rec.list) == 2 - warn = rec.pop() - assert str(warn.message) == "hello" - l = rec.list - rec.clear() - assert len(rec.list) == 0 - assert l is rec.list - pytest.raises(AssertionError, "rec.pop()") - rec.finalize() - assert showwarning == py.std.warnings.showwarning def test_recwarn_functional(testdir): reprec = testdir.inline_runsource(""" - pytest_plugins = 'pytest_recwarn', import warnings oldwarn = warnings.showwarning def test_method(recwarn): @@ -36,6 +19,50 @@ def test_finalized(): res = reprec.countoutcomes() assert tuple(res) == (2, 0, 0), res + +class TestWarningsRecorderChecker(object): + def test_recording(self, recwarn): + showwarning = py.std.warnings.showwarning + rec = WarningsRecorder() + with rec: + assert py.std.warnings.showwarning != showwarning + assert not rec.list + py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13) + assert len(rec.list) == 1 + py.std.warnings.warn(DeprecationWarning("hello")) + assert len(rec.list) == 2 + warn = rec.pop() + assert str(warn.message) == "hello" + l = rec.list + rec.clear() + assert len(rec.list) == 0 + assert l is rec.list + pytest.raises(AssertionError, "rec.pop()") + + assert showwarning == py.std.warnings.showwarning + + def test_typechecking(self): + from _pytest.recwarn import WarningsChecker + with pytest.raises(TypeError): + WarningsChecker(5) + with pytest.raises(TypeError): + WarningsChecker(('hi', RuntimeWarning)) + with pytest.raises(TypeError): + WarningsChecker([DeprecationWarning, RuntimeWarning]) + + def test_invalid_enter_exit(self): + # wrap this test in WarningsRecorder to ensure warning state gets reset + with WarningsRecorder(): + with pytest.raises(RuntimeError): + rec = WarningsRecorder() + rec.__exit__(None, None, None) # can't exit before entering + + with pytest.raises(RuntimeError): + rec = WarningsRecorder() + with rec: + with rec: + pass # can't enter twice + # # ============ test pytest.deprecated_call() ============== # @@ -51,35 +78,107 @@ def dep_explicit(i): py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning, filename="hello", lineno=3) -def test_deprecated_call_raises(): - excinfo = pytest.raises(AssertionError, - "pytest.deprecated_call(dep, 3)") - assert str(excinfo).find("did not produce") != -1 - -def test_deprecated_call(): - pytest.deprecated_call(dep, 0) - -def test_deprecated_call_ret(): - ret = pytest.deprecated_call(dep, 0) - assert ret == 42 - -def test_deprecated_call_preserves(): - onceregistry = py.std.warnings.onceregistry.copy() - filters = py.std.warnings.filters[:] - warn = py.std.warnings.warn - warn_explicit = py.std.warnings.warn_explicit - test_deprecated_call_raises() - test_deprecated_call() - assert onceregistry == py.std.warnings.onceregistry - assert filters == py.std.warnings.filters - assert warn is py.std.warnings.warn - assert warn_explicit is py.std.warnings.warn_explicit - -def test_deprecated_explicit_call_raises(): - pytest.raises(AssertionError, - "pytest.deprecated_call(dep_explicit, 3)") - -def test_deprecated_explicit_call(): - pytest.deprecated_call(dep_explicit, 0) - pytest.deprecated_call(dep_explicit, 0) +class TestDeprecatedCall(object): + def test_deprecated_call_raises(self): + excinfo = pytest.raises(AssertionError, + "pytest.deprecated_call(dep, 3)") + assert str(excinfo).find("did not produce") != -1 + + def test_deprecated_call(self): + pytest.deprecated_call(dep, 0) + + def test_deprecated_call_ret(self): + ret = pytest.deprecated_call(dep, 0) + assert ret == 42 + + def test_deprecated_call_preserves(self): + onceregistry = py.std.warnings.onceregistry.copy() + filters = py.std.warnings.filters[:] + warn = py.std.warnings.warn + warn_explicit = py.std.warnings.warn_explicit + self.test_deprecated_call_raises() + self.test_deprecated_call() + assert onceregistry == py.std.warnings.onceregistry + assert filters == py.std.warnings.filters + assert warn is py.std.warnings.warn + assert warn_explicit is py.std.warnings.warn_explicit + + def test_deprecated_explicit_call_raises(self): + pytest.raises(AssertionError, + "pytest.deprecated_call(dep_explicit, 3)") + + def test_deprecated_explicit_call(self): + pytest.deprecated_call(dep_explicit, 0) + pytest.deprecated_call(dep_explicit, 0) + + +class TestWarns(object): + def test_strings(self): + # different messages, b/c Python suppresses multiple identical warnings + source1 = "warnings.warn('w1', RuntimeWarning)" + source2 = "warnings.warn('w2', RuntimeWarning)" + source3 = "warnings.warn('w3', RuntimeWarning)" + pytest.warns(RuntimeWarning, source1) + pytest.raises(pytest.fail.Exception, + lambda: pytest.warns(UserWarning, source2)) + pytest.warns(RuntimeWarning, source3) + + def test_function(self): + pytest.warns(SyntaxWarning, + lambda msg: warnings.warn(msg, SyntaxWarning), "syntax") + + def test_warning_tuple(self): + pytest.warns((RuntimeWarning, SyntaxWarning), + lambda: warnings.warn('w1', RuntimeWarning)) + pytest.warns((RuntimeWarning, SyntaxWarning), + lambda: warnings.warn('w2', SyntaxWarning)) + pytest.raises(pytest.fail.Exception, + lambda: pytest.warns( + (RuntimeWarning, SyntaxWarning), + lambda: warnings.warn('w3', UserWarning))) + + def test_as_contextmanager(self): + with pytest.warns(RuntimeWarning): + warnings.warn("runtime", RuntimeWarning) + + with pytest.raises(pytest.fail.Exception): + with pytest.warns(RuntimeWarning): + warnings.warn("user", UserWarning) + + with pytest.raises(pytest.fail.Exception): + with pytest.warns(UserWarning): + warnings.warn("runtime", RuntimeWarning) + + with pytest.warns(UserWarning): + warnings.warn("user", UserWarning) + + def test_record(self): + with pytest.warns(UserWarning) as record: + warnings.warn("user", UserWarning) + + assert len(record) == 1 + assert str(record[0].message) == "user" + + def test_record_only(self): + with pytest.warns(None) as record: + warnings.warn("user", UserWarning) + warnings.warn("runtime", RuntimeWarning) + + assert len(record) == 2 + assert str(record[0].message) == "user" + assert str(record[1].message) == "runtime" + + def test_double_test(self, testdir): + """If a test is run again, the warning should still be raised""" + testdir.makepyfile(''' + import pytest + import warnings + + @pytest.mark.parametrize('run', [1, 2]) + def test(run): + with pytest.warns(RuntimeWarning): + warnings.warn("runtime", RuntimeWarning) + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines(['*2 passed in*']) diff --git a/testing/test_runner.py b/testing/test_runner.py index 167ddc57bc4..3641ab8caf0 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -293,8 +293,8 @@ class TestExecutionForked(BaseFunctionalTests): def getrunner(self): # XXX re-arrange this test to live in pytest-xdist - xplugin = pytest.importorskip("xdist.plugin") - return xplugin.forked_run_report + boxed = pytest.importorskip("xdist.boxed") + return boxed.forked_run_report def test_suicide(self, testdir): reports = testdir.runitem(""" @@ -431,6 +431,27 @@ def teardown_function(function): ]) assert 'def teardown_function' not in result.stdout.str() + +def test_pytest_no_tests_collected_exit_status(testdir): + result = testdir.runpytest() + result.stdout.fnmatch_lines('*collected 0 items*') + assert result.ret == main.EXIT_NOTESTSCOLLECTED + + testdir.makepyfile(test_foo=""" + def test_foo(): + assert 1 + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines('*collected 1 items*') + result.stdout.fnmatch_lines('*1 passed*') + assert result.ret == main.EXIT_OK + + result = testdir.runpytest('-k nonmatch') + result.stdout.fnmatch_lines('*collected 1 items*') + result.stdout.fnmatch_lines('*1 deselected*') + assert result.ret == main.EXIT_NOTESTSCOLLECTED + + def test_exception_printing_skip(): try: pytest.skip("hello") diff --git a/testing/test_session.py b/testing/test_session.py index 4b38c7efd69..76f804b4f99 100644 --- a/testing/test_session.py +++ b/testing/test_session.py @@ -1,5 +1,7 @@ import pytest +from _pytest.main import EXIT_NOTESTSCOLLECTED + class SessionTests: def test_basic_testitem_events(self, testdir): tfile = testdir.makepyfile(""" @@ -203,7 +205,6 @@ def test_minus_x_import_error(self, testdir): def test_plugin_specify(testdir): - testdir.chdir() pytest.raises(ImportError, """ testdir.parseconfig("-p", "nqweotexistent") """) @@ -214,8 +215,8 @@ def test_plugin_specify(testdir): def test_plugin_already_exists(testdir): config = testdir.parseconfig("-p", "terminal") assert config.option.plugins == ['terminal'] - config.do_configure() - config.do_unconfigure() + config._do_configure() + config._ensure_unconfigure() def test_exclude(testdir): hellodir = testdir.mkdir("hello") @@ -240,4 +241,4 @@ def pytest_sessionfinish(): """) res = testdir.runpytest("--collect-only") - assert res.ret == 0 + assert res.ret == EXIT_NOTESTSCOLLECTED diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 6e827cb46bd..1048c9455fb 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -549,6 +549,43 @@ def test_foo(): 'ERROR*test_foo*', ]) +def test_reportchars_all(testdir): + testdir.makepyfile(""" + import pytest + def test_1(): + assert 0 + @pytest.mark.xfail + def test_2(): + assert 0 + @pytest.mark.xfail + def test_3(): + pass + def test_4(): + pytest.skip("four") + """) + result = testdir.runpytest("-ra") + result.stdout.fnmatch_lines([ + "FAIL*test_1*", + "SKIP*four*", + "XFAIL*test_2*", + "XPASS*test_3*", + ]) + +def test_reportchars_all_error(testdir): + testdir.makepyfile( + conftest=""" + def pytest_runtest_teardown(): + assert 0 + """, + test_simple=""" + def test_foo(): + pass + """) + result = testdir.runpytest('-ra') + result.stdout.fnmatch_lines([ + 'ERROR*test_foo*', + ]) + @pytest.mark.xfail("hasattr(sys, 'pypy_version_info')") def test_errors_in_xfail_skip_expressions(testdir): testdir.makepyfile(""" diff --git a/testing/test_terminal.py b/testing/test_terminal.py index afb79d00ccd..81fdfd60b59 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -1,16 +1,24 @@ """ terminal reporting of the full testing process. """ -import pytest, py +import collections +import pytest +import py +import pluggy import sys +from _pytest.main import EXIT_NOTESTSCOLLECTED from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt +from _pytest.terminal import build_summary_stats_line, _plugin_nameversions from _pytest import runner def basic_run_report(item): runner.call_and_report(item, "setup", log=False) return runner.call_and_report(item, "call", log=False) +DistInfo = collections.namedtuple('DistInfo', ['project_name', 'version']) + + class Option: def __init__(self, verbose=False, fulltrace=False): self.verbose = verbose @@ -37,6 +45,21 @@ def pytest_generate_tests(metafunc): funcargs={'option': Option(fulltrace=True)}) +@pytest.mark.parametrize('input,expected', [ + ([DistInfo(project_name='test', version=1)], ['test-1']), + ([DistInfo(project_name='pytest-test', version=1)], ['test-1']), + ([ + DistInfo(project_name='test', version=1), + DistInfo(project_name='test', version=1) + ], ['test-1']), +], ids=['normal', 'prefix-strip', 'deduplicate']) + +def test_plugin_nameversion(input, expected): + pluginlist = [(None, x) for x in input] + result = _plugin_nameversions(pluginlist) + assert result == expected + + class TestTerminal: def test_pass_skip_fail(self, testdir, option): testdir.makepyfile(""" @@ -130,8 +153,8 @@ class TestMore(BaseTests): ]) def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir): - a = testdir.mkpydir("a") - a.join("test_hello.py").write(py.code.Source(""" + a = testdir.mkpydir("a123") + a.join("test_hello123.py").write(py.code.Source(""" class TestClass: def test_method(self): pass @@ -139,7 +162,7 @@ def test_method(self): result = testdir.runpytest("-v") assert result.ret == 0 result.stdout.fnmatch_lines([ - "*a/test_hello.py*PASS*", + "*a123/test_hello123.py*PASS*", ]) assert " <- " not in result.stdout.str() @@ -153,7 +176,7 @@ def test_interrupt_me(): raise KeyboardInterrupt # simulating the user """) - result = testdir.runpytest(*option.args) + result = testdir.runpytest(*option.args, no_reraise_ctrlc=True) result.stdout.fnmatch_lines([ " def test_foobar():", "> assert 0", @@ -176,7 +199,7 @@ def test_foobar(): pass """) - result = testdir.runpytest() + result = testdir.runpytest(no_reraise_ctrlc=True) assert result.ret == 2 result.stdout.fnmatch_lines(['*KeyboardInterrupt*']) @@ -408,13 +431,13 @@ def test_passes(): verinfo = ".".join(map(str, py.std.sys.version_info[:3])) result.stdout.fnmatch_lines([ "*===== test session starts ====*", - "platform %s -- Python %s* -- py-%s -- pytest-%s" % ( + "platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s" % ( py.std.sys.platform, verinfo, - py.__version__, pytest.__version__), + pytest.__version__, py.__version__, pluggy.__version__), "*test_header_trailer_info.py .", - "=* 1 passed in *.[0-9][0-9] seconds *=", + "=* 1 passed*in *.[0-9][0-9] seconds *=", ]) - if pytest.config.pluginmanager._plugin_distinfo: + if pytest.config.pluginmanager.list_plugin_distinfo(): result.stdout.fnmatch_lines([ "plugins: *", ]) @@ -457,7 +480,9 @@ def check(x): ]) assert result.ret == 1 - pytestconfig.pluginmanager.skipifmissing("xdist") + if not pytestconfig.pluginmanager.get_plugin("xdist"): + pytest.skip("xdist plugin not installed") + result = testdir.runpytest(p1, '-v', '-n 1') result.stdout.fnmatch_lines([ "*FAIL*test_verbose_reporting.py::test_fail*", @@ -572,7 +597,7 @@ def test_traceconfig(testdir, monkeypatch): result.stdout.fnmatch_lines([ "*active plugins*" ]) - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED class TestGenericReporting: @@ -714,3 +739,68 @@ def pytest_terminal_summary(terminalreporter): *==== hello ====* world """) + +@pytest.mark.parametrize("exp_color, exp_line, stats_arg", [ + # The method under test only cares about the length of each + # dict value, not the actual contents, so tuples of anything + # suffice + + # Important statuses -- the highest priority of these always wins + ("red", "1 failed", {"failed": (1,)}), + ("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}), + + ("red", "1 error", {"error": (1,)}), + ("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}), + + # (a status that's not known to the code) + ("yellow", "1 weird", {"weird": (1,)}), + ("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}), + + ("yellow", "1 pytest-warnings", {"warnings": (1,)}), + ("yellow", "1 passed, 1 pytest-warnings", {"warnings": (1,), + "passed": (1,)}), + + ("green", "5 passed", {"passed": (1,2,3,4,5)}), + + + # "Boring" statuses. These have no effect on the color of the summary + # line. Thus, if *every* test has a boring status, the summary line stays + # at its default color, i.e. yellow, to warn the user that the test run + # produced no useful information + ("yellow", "1 skipped", {"skipped": (1,)}), + ("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}), + + ("yellow", "1 deselected", {"deselected": (1,)}), + ("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}), + + ("yellow", "1 xfailed", {"xfailed": (1,)}), + ("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}), + + ("yellow", "1 xpassed", {"xpassed": (1,)}), + ("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}), + + # Likewise if no tests were found at all + ("yellow", "", {}), + + # Test the empty-key special case + ("yellow", "", {"": (1,)}), + ("green", "1 passed", {"": (1,), "passed": (1,)}), + + + # A couple more complex combinations + ("red", "1 failed, 2 passed, 3 xfailed", + {"passed": (1,2), "failed": (1,), "xfailed": (1,2,3)}), + + ("green", "1 passed, 2 skipped, 3 deselected, 2 xfailed", + {"passed": (1,), + "skipped": (1,2), + "deselected": (1,2,3), + "xfailed": (1,2)}), +]) +def test_summary_stats(exp_line, exp_color, stats_arg): + print("Based on stats: %s" % stats_arg) + print("Expect summary: \"%s\"; with color \"%s\"" % (exp_line, exp_color)) + (line, color) = build_summary_stats_line(stats_arg) + print("Actually got: \"%s\"; with color \"%s\"" % (line, color)) + assert line == exp_line + assert color == exp_color diff --git a/testing/test_tmpdir.py b/testing/test_tmpdir.py index e6c87dd5bc2..05b24fc596d 100644 --- a/testing/test_tmpdir.py +++ b/testing/test_tmpdir.py @@ -1,7 +1,7 @@ import py import pytest -from _pytest.tmpdir import tmpdir, TempdirHandler +from _pytest.tmpdir import tmpdir def test_funcarg(testdir): testdir.makepyfile(""" @@ -10,19 +10,19 @@ def pytest_generate_tests(metafunc): metafunc.addcall(id='b') def test_func(tmpdir): pass """) + from _pytest.tmpdir import TempdirFactory reprec = testdir.inline_run() calls = reprec.getcalls("pytest_runtest_setup") item = calls[0].item - # pytest_unconfigure has deleted the TempdirHandler already config = item.config - config._tmpdirhandler = TempdirHandler(config) + tmpdirhandler = TempdirFactory(config) item._initrequest() - p = tmpdir(item._request) + p = tmpdir(item._request, tmpdirhandler) assert p.check() bn = p.basename.strip("0123456789") assert bn.endswith("test_func_a_") item.name = "qwe/\\abc" - p = tmpdir(item._request) + p = tmpdir(item._request, tmpdirhandler) assert p.check() bn = p.basename.strip("0123456789") assert bn == "qwe__abc" @@ -36,9 +36,10 @@ def test_ensuretemp(recwarn): class TestTempdirHandler: def test_mktemp(self, testdir): + from _pytest.tmpdir import TempdirFactory config = testdir.parseconfig() config.option.basetemp = testdir.mkdir("hello") - t = TempdirHandler(config) + t = TempdirFactory(config) tmp = t.mktemp("world") assert tmp.relto(t.getbasetemp()) == "world0" tmp = t.mktemp("this") @@ -49,17 +50,19 @@ def test_mktemp(self, testdir): class TestConfigTmpdir: def test_getbasetemp_custom_removes_old(self, testdir): - p = testdir.tmpdir.join("xyz") - config = testdir.parseconfigure("--basetemp=xyz") - b = config._tmpdirhandler.getbasetemp() - assert b == p - h = b.ensure("hello") - config._tmpdirhandler.getbasetemp() - assert h.check() - config = testdir.parseconfigure("--basetemp=xyz") - b2 = config._tmpdirhandler.getbasetemp() - assert b2.check() - assert not h.check() + mytemp = testdir.tmpdir.join("xyz") + p = testdir.makepyfile(""" + def test_1(tmpdir): + pass + """) + testdir.runpytest(p, '--basetemp=%s' % mytemp) + mytemp.check() + mytemp.ensure("hello") + + testdir.runpytest(p, '--basetemp=%s' % mytemp) + mytemp.check() + assert not mytemp.join("hello").check() + def test_basetemp(testdir): mytemp = testdir.tmpdir.mkdir("mytemp") @@ -92,6 +95,7 @@ def test_1(tmpdir): result = testdir.runpytest("-s", p, '--basetemp=%s/bt' % linktemp) assert not result.ret + def test_tmpdir_too_long_on_parametrization(testdir): testdir.makepyfile(""" import pytest @@ -101,3 +105,16 @@ def test_some(arg, tmpdir): """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) + + +def test_tmpdir_factory(testdir): + testdir.makepyfile(""" + import pytest + @pytest.fixture(scope='session') + def session_dir(tmpdir_factory): + return tmpdir_factory.mktemp('data', numbered=False) + def test_some(session_dir): + session_dir.isdir() + """) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=1) \ No newline at end of file diff --git a/testing/test_unittest.py b/testing/test_unittest.py index 9e3962aadb1..aa055f89c79 100644 --- a/testing/test_unittest.py +++ b/testing/test_unittest.py @@ -1,9 +1,9 @@ +from _pytest.main import EXIT_NOTESTSCOLLECTED import pytest def test_simple_unittest(testdir): testpath = testdir.makepyfile(""" import unittest - pytest_plugins = "pytest_unittest" class MyTestCase(unittest.TestCase): def testpassing(self): self.assertEquals('foo', 'foo') @@ -17,7 +17,6 @@ def test_failing(self): def test_runTest_method(testdir): testdir.makepyfile(""" import unittest - pytest_plugins = "pytest_unittest" class MyTestCaseWithRunTest(unittest.TestCase): def runTest(self): self.assertEquals('foo', 'foo') @@ -43,7 +42,7 @@ def __getattr__(self, tag): E = _E() """) result = testdir.runpytest(testpath) - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED def test_setup(testdir): testpath = testdir.makepyfile(""" @@ -574,7 +573,7 @@ class Test(unittest.TestCase): """) result = testdir.runpytest() assert "TypeError" not in result.stdout.str() - assert result.ret == 0 + assert result.ret == EXIT_NOTESTSCOLLECTED def test_unittest_typerror_traceback(testdir): testdir.makepyfile(""" diff --git a/tox.ini b/tox.ini index be2d9baa0bf..73ecdfb4501 100644 --- a/tox.ini +++ b/tox.ini @@ -19,6 +19,15 @@ deps= nose mock<1.1 # last supported version for py26 +[testenv:py27-subprocess] +changedir=. +basepython=python2.7 +deps=pytest-xdist>=1.13 + mock + nose +commands= + py.test -n3 -rfsxX --runpytest=subprocess {posargs:testing} + [testenv:genscript] commands= py.test --genscript=pytest1 @@ -28,7 +37,7 @@ deps = pytest-flakes>=0.2 commands = py.test --flakes -m flakes _pytest testing [testenv:py27-xdist] -deps=pytest-xdist +deps=pytest-xdist>=1.13 mock nose commands= @@ -37,7 +46,7 @@ commands= [testenv:py34-xdist] deps={[testenv:py27-xdist]deps} commands= - py.test -n3 -rfsxX testing + py.test -n3 -rfsxX {posargs:testing} [testenv:py27-pexpect] changedir=testing @@ -54,7 +63,7 @@ commands= py.test -rfsxX test_pdb.py test_terminal.py test_unittest.py [testenv:py27-nobyte] -deps=pytest-xdist +deps=pytest-xdist>=1.13 distribute=true setenv= PYTHONDONTWRITEBYTECODE=1 @@ -68,7 +77,7 @@ commands= [testenv:py34-trial] # py34-trial does not work -platform=linux|darwin +platform=linux|darwin deps={[testenv:py27-trial]deps} commands= py.test -rsxf {posargs:testing/test_unittest.py} @@ -115,25 +124,26 @@ commands= {envpython} install_cx_freeze.py {envpython} runtests_setup.py build --build-exe build {envpython} tox_run.py - + [testenv:coveralls] -basepython = python3.4 -changedir=testing +passenv = TRAVIS TRAVIS_JOB_ID TRAVIS_BRANCH +usedevelop=True +basepython=python3.4 +changedir=. deps = {[testenv]deps} coveralls commands= - coverage run --source=_pytest {envdir}/bin/py.test + coverage run --source=_pytest {envdir}/bin/py.test testing coverage report -m coveralls -passenv=COVERALLS_REPO_TOKEN [pytest] minversion=2.0 plugins=pytester #--pyargs --doctest-modules --ignore=.tox -addopts= -rxsX +addopts= -rxsX -p pytester --ignore=testing/cx_freeze rsyncdirs=tox.ini pytest.py _pytest testing python_files=test_*.py *_test.py testing/*/*.py python_classes=Test Acceptance