Commits

holger krekel committed bf8509a Merge

merge heads

Comments (0)

Files changed (77)

 8b8e7c25a13cf863f01b2dd955978285ae9daf6a 1.3.1
 3bff44b188a7ec1af328d977b9d39b6757bb38df 1.3.2
 c59d3fa8681a5b5966b8375b16fccd64a3a8dbeb 1.3.3
+79ef6377705184c55633d456832eea318fedcf61 1.3.4
+79ef6377705184c55633d456832eea318fedcf61 1.3.4
+90fffd35373e9f125af233f78b19416f0938d841 1.3.4
+
+Changes between 1.3.4 and 1.4.0.dev0
+==================================================
+
+- introduce (customizable) assertion failure representations (Floris Bruynooghe)
+- major refactoring of internal collection handling
+- majorly reduce py.test core code, shift function/python testing to own plugin
+- fix issue88 (finding custom test nodes from command line arg)
+
+Changes between 1.3.3 and 1.3.4
+==================================================
+
+- fix issue111: improve install documentation for windows
+- fix issue119: fix custom collectability of __init__.py as a module
+- fix issue116: --doctestmodules work with __init__.py files as well
+- fix issue115: unify internal exception passthrough/catching/GeneratorExit
+- fix issue118: new --tb=native for presenting cpython-standard exceptions
+
 Changes between 1.3.2 and 1.3.3
 ==================================================
 
+checks / deprecations for next release
+---------------------------------------------------------------
+tags: bug 1.4 core xdist
+
+* reportinfo -> location in hooks and items
+* check oejskit plugin compatibility
+* terminal reporting - dot-printing
+* some simple profiling 
+
 refine session initialization / fix custom collect crash
 ---------------------------------------------------------------
 tags: bug 1.4 core xdist

doc/announce/release-1.3.4.txt

+py.test/pylib 1.3.4: fixes and new native traceback option
+===========================================================================
+
+pylib/py.test 1.3.4 is a minor maintenance release mostly containing bug fixes
+and a new "--tb=native" traceback option to show "normal" Python standard
+tracebacks instead of the py.test enhanced tracebacks.  See below for more
+change info and http://pytest.org for more general information on features
+and configuration of the testing tool.
+
+Thanks to the issue reporters and generally to Ronny Pfannschmidt for help.
+
+cheers,
+holger krekel
+
+Changes between 1.3.3 and 1.3.4
+==================================================
+
+- fix issue111: improve install documentation for windows
+- fix issue119: fix custom collectability of __init__.py as a module
+- fix issue116: --doctestmodules work with __init__.py files as well
+- fix issue115: unify internal exception passthrough/catching/GeneratorExit
+- fix issue118: new --tb=native for presenting cpython-standard exceptions

doc/example/assertion/failure_demo.py

     module.foo()
 
 
+class TestSpecialisedExplanations(object):
+    def test_eq_text(self):
+        assert 'spam' == 'eggs'
+
+    def test_eq_similar_text(self):
+        assert 'foo 1 bar' == 'foo 2 bar'
+
+    def test_eq_multiline_text(self):
+        assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
+
+    def test_eq_long_text(self):
+        a = '1'*100 + 'a' + '2'*100
+        b = '1'*100 + 'b' + '2'*100
+        assert a == b
+
+    def test_eq_long_text_multiline(self):
+        a = '1\n'*100 + 'a' + '2\n'*100
+        b = '1\n'*100 + 'b' + '2\n'*100
+        assert a == b
+
+    def test_eq_list(self):
+        assert [0, 1, 2] == [0, 1, 3]
+
+    def test_eq_list_long(self):
+        a = [0]*100 + [1] + [3]*100
+        b = [0]*100 + [2] + [3]*100
+        assert a == b
+
+    def test_eq_dict(self):
+        assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2}
+
+    def test_eq_set(self):
+        assert set([0, 10, 11, 12]) == set([0, 20, 21])
+
+    def test_in_list(self):
+        assert 1 in [0, 2, 3, 4, 5]
+
+
 def globf(x):
     return x+1
 .. _`bin`: bin.html
 
 
-Best practise: install tool and dependencies virtually
+.. _`easy_install`:
+
+Installation using easy_install
+===================================================
+
+Both `Distribute`_ and setuptools_ provide the ``easy_install``
+installation tool with which you can type into a command line window::
+
+    easy_install -U py
+
+to install the latest release of the py lib and py.test.  The ``-U`` switch
+will trigger an upgrade if you already have an older version installed.
+Note that setuptools works ok with Python2 interpreters while `Distribute`_
+additionally works with Python3 and also avoid some issues on Windows.
+
+Known issues:
+
+- **Windows**: If "easy_install" or "py.test" are not found
+  please see here for preparing your environment for running
+  command line tools: `Python for Windows`_.  You may alternatively
+  use an `ActivePython install`_ which makes command line tools
+  automatically available under Windows.
+
+.. _`ActivePython install`: http://www.activestate.com/activepython/downloads
+
+.. _`Jython does not create command line launchers`: http://bugs.jython.org/issue1491
+
+- **Jython2.5.1 on Windows XP**: `Jython does not create command line launchers`_
+  so ``py.test`` will not work correctly.  You may install py.test on
+  CPython and type ``py.test --genscript=mytest`` and then use
+  ``jython mytest`` to run py.test for your tests to run in Jython.
+
+- **On Linux**: If ``easy_install`` fails because it needs to run
+  as the superuser you are trying to install things globally
+  and need to put ``sudo`` in front of the command.
+
+
+.. _quickstart: test/quickstart.html
+
+
+Recommendation: install tool and dependencies virtually
 ===========================================================
 
 It is recommended to work with virtual environments
 (as opposed to system-wide "global" environments) make for a more
 reproducible and reliable test environment.
 
-
 .. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
 .. _`buildout`: http://www.buildout.org/
 .. _pip: http://pypi.python.org/pypi/pip
-.. _`easy_install`:
-
-using easy_install (from setuptools or Distribute)
-===================================================
-
-Both `Distribute`_ and setuptools_ provide the ``easy_install``
-installation tool.  While setuptools should work ok with
-Python2 interpreters, `Distribute`_ also works with Python3
-and it avoids some issues on Windows.  In both cases you
-can open a command line window and then type::
-
-    easy_install -U py
-
-to install the latest release of the py lib and py.test.  The ``-U`` switch
-will trigger an upgrade if you already have an older version installed.
-
-If you now type::
-
-    py.test --version
-
-you should see the version number and the import location of the tool.
-Maybe you want to head on with the `quickstart`_ now?
-
-.. _quickstart: test/quickstart.html
 
 .. _standalone:
 
 and ask them to send you the resulting URL.  The resulting script has
 all core features and runs unchanged under Python2 and Python3 interpreters.
 
-Troubleshooting / known issues
-===============================
-
-.. _`Jython does not create command line launchers`: http://bugs.jython.org/issue1491
-
-**Jython2.5.1 on XP**: `Jython does not create command line launchers`_
-so ``py.test`` will not work correctly.  You may install py.test on
-CPython and type ``py.test --genscript=mytest`` and then use
-``jython mytest`` to run py.test for your tests to run in Jython.
-
-**On Linux**: If ``easy_install`` fails because it needs to run
-as the superuser you are trying to install things globally
-and need to put ``sudo`` in front of the command.
-
-**On Windows**: If "easy_install" or "py.test" are not found
-please see here: `How do i run a Python program under Windows?`_
-
-.. _`How do i run a Python program under Windows?`: http://www.python.org/doc/faq/windows/#how-do-i-run-a-python-program-under-windows
+.. _`Python for Windows`: http://www.imladris.com/Scripts/PythonForWindows.html
 
 .. _mercurial: http://mercurial.selenic.com/wiki/
 .. _`Distribute`:

doc/test/plugin/capturelog.txt

 Installation
 ------------
 
-You can install the `pytest-capturelog pypi`_ package
+You can install the `pytest-capturelog pypi`_ package 
 with pip::
 
-    pip install pytest-capturelog
+    pip install pytest-capturelog 
 
 or with easy install::
 

doc/test/plugin/cov.txt

 .. contents::
   :local:
 
-This plugin produces coverage reports using the coverage package.  It
-supports centralised testing and distributed testing in both load and
-each modes.
+This plugin produces coverage reports.  It supports centralised testing and distributed testing in
+both load and each modes.  It also supports coverage of subprocesses.
 
-All features offered by the coverage package should be available,
-either through this plugin or through coverage's own config file.
+All features offered by the coverage package should be available, either through pytest-cov or
+through coverage's config file.
 
 
 Installation
 ------------
 
-The `pytest-cov pypi`_ package may be installed / uninstalled with pip::
+The `pytest-cov`_ package may be installed with pip or easy_install::
 
     pip install pytest-cov
+    easy_install pytest-cov
+
+.. _`pytest-cov`: http://pypi.python.org/pypi/pytest-cov/
+
+
+Uninstallation
+--------------
+
+Uninstalling packages is supported by pip::
+
     pip uninstall pytest-cov
 
-Alternatively easy_install can be used::
+However easy_install does not provide an uninstall facility.
 
-    easy_install pytest-cov
+.. IMPORTANT::
 
-.. _`pytest-cov pypi`: http://pypi.python.org/pypi/pytest-cov/
+    Ensure that you manually delete the init_cov_core.pth file in your site-packages directory.
+
+    This file starts coverage collection of subprocesses if appropriate during site initialisation
+    at python startup.
 
 
 Usage
 Centralised Testing
 ~~~~~~~~~~~~~~~~~~~
 
+Centralised testing will report on the combined coverage of the main process and all of it's
+subprocesses.
+
 Running centralised testing::
 
     py.test --cov myproj tests/
 Shows a terminal report::
 
     -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
-    Name                 Stmts   Exec  Cover   Missing
-    --------------------------------------------------
-    myproj/__init__          2      2   100%
-    myproj/myproj          257    244    94%   24-26, 99, 149, 233-236, 297-298, 369-370
-    myproj/feature4286      94     87    92%   183-188, 197
-    --------------------------------------------------
-    TOTAL                  353    333    94%
+    Name                 Stmts   Miss  Cover
+    ----------------------------------------
+    myproj/__init__          2      0   100%
+    myproj/myproj          257     13    94%
+    myproj/feature4286      94      7    92%
+    ----------------------------------------
+    TOTAL                  353     20    94%
 
 
-Distributed Testing
-~~~~~~~~~~~~~~~~~~~
+Distributed Testing: Load
+~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Distributed testing with dist mode set to load::
+Distributed testing with dist mode set to load will report on the combined coverage of all slaves.
+The slaves may be spread out over any number of hosts and each slave may be located anywhere on the
+file system.  Each slave will have it's subprocesses measured.
+
+Running distributed testing with dist mode set to load::
 
     py.test --cov myproj -n 2 tests/
 
-The results from the slaves will be combined like so::
+Shows a terminal report::
 
     -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
-    Name                 Stmts   Exec  Cover   Missing
-    --------------------------------------------------
-    myproj/__init__          2      2   100%
-    myproj/myproj          257    244    94%   24-26, 99, 149, 233-236, 297-298, 369-370
-    myproj/feature4286      94     87    92%   183-188, 197
-    --------------------------------------------------
-    TOTAL                  353    333    94%
+    Name                 Stmts   Miss  Cover
+    ----------------------------------------
+    myproj/__init__          2      0   100%
+    myproj/myproj          257     13    94%
+    myproj/feature4286      94      7    92%
+    ----------------------------------------
+    TOTAL                  353     20    94%
 
 
-Distributed testing in each mode::
+Again but spread over different hosts and different directories::
 
-    py.test --cov myproj --dist=each
-            --tx=popen//python=/usr/local/python265/bin/python
-            --tx=popen//python=/usr/local/python27b1/bin/python
+    py.test --cov myproj --dist load
+            --tx ssh=memedough@host1//chdir=testenv1
+            --tx ssh=memedough@host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python
+            --rsyncdir myproj --rsyncdir tests --rsync examples
             tests/
 
-Will produce a report for each slave::
+Shows a terminal report::
 
-    -------------------- coverage: platform linux2, python 2.6.5-final-0 ---------------------
-    Name                 Stmts   Exec  Cover   Missing
-    --------------------------------------------------
-    myproj/__init__          2      2   100%
-    myproj/myproj          257    244    94%   24-26, 99, 149, 233-236, 297-298, 369-370
-    myproj/feature4286      94     87    92%   183-188, 197
-    --------------------------------------------------
-    TOTAL                  353    333    94%
-    --------------------- coverage: platform linux2, python 2.7.0-beta-1 ---------------------
-    Name                 Stmts   Exec  Cover   Missing
-    --------------------------------------------------
-    myproj/__init__          2      2   100%
-    myproj/myproj          257    244    94%   24-26, 99, 149, 233-236, 297-298, 369-370
-    myproj/feature4286      94     87    92%   183-188, 197
-    --------------------------------------------------
-    TOTAL                  353    333    94%
+    -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+    Name                 Stmts   Miss  Cover
+    ----------------------------------------
+    myproj/__init__          2      0   100%
+    myproj/myproj          257     13    94%
+    myproj/feature4286      94      7    92%
+    ----------------------------------------
+    TOTAL                  353     20    94%
 
 
-Distributed testing in each mode can also produce a single combined
-report.  This is useful to get coverage information spanning things
-such as all python versions::
+Distributed Testing: Each
+~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    py.test --cov myproj --cov-combine-each --dist=each
-            --tx=popen//python=/usr/local/python265/bin/python
-            --tx=popen//python=/usr/local/python27b1/bin/python
+Distributed testing with dist mode set to each will report on the combined coverage of all slaves.
+Since each slave is running all tests this allows generating a combined coverage report for multiple
+environments.
+
+Running distributed testing with dist mode set to each::
+
+    py.test --cov myproj --dist each
+            --tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python
+            --tx ssh=memedough@host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python
+            --rsyncdir myproj --rsyncdir tests --rsync examples
             tests/
 
-Which looks like::
+Shows a terminal report::
 
     ---------------------------------------- coverage ----------------------------------------
                               platform linux2, python 2.6.5-final-0
-                               platform linux2, python 2.7.0-beta-1
-    Name                 Stmts   Exec  Cover   Missing
-    --------------------------------------------------
-    myproj/__init__          2      2   100%
-    myproj/myproj          257    244    94%   24-26, 99, 149, 233-236, 297-298, 369-370
-    myproj/feature4286      94     87    92%   183-188, 197
-    --------------------------------------------------
-    TOTAL                  353    333    94%
+                              platform linux2, python 2.7.0-final-0
+    Name                 Stmts   Miss  Cover
+    ----------------------------------------
+    myproj/__init__          2      0   100%
+    myproj/myproj          257     13    94%
+    myproj/feature4286      94      7    92%
+    ----------------------------------------
+    TOTAL                  353     20    94%
 
 
 Reporting
 ---------
 
-By default a terminal report is output.  This report can be disabled
-if desired, such as when results are going to a continuous integration
-system and the terminal output won't be seen.
+It is possible to generate any combination of the reports for a single test run.
 
-In addition and without rerunning tests it is possible to generate
-annotated source code, a html report and an xml report.
+The available reports are terminal (with or without missing line numbers shown), HTML, XML and
+annotated source code.
 
-The directories for annotated source code and html reports can be
-specified as can the file name for the xml report.
+The terminal report without line numbers (default)::
 
-Since testing often takes a non trivial amount of time at the end of
-testing any / all of the reports may be generated.
+    py.test --cov-report term --cov myproj tests/
+
+    -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+    Name                 Stmts   Miss  Cover
+    ----------------------------------------
+    myproj/__init__          2      0   100%
+    myproj/myproj          257     13    94%
+    myproj/feature4286      94      7    92%
+    ----------------------------------------
+    TOTAL                  353     20    94%
+
+
+The terminal report with line numbers::
+
+    py.test --cov-report term-missing --cov myproj tests/
+
+    -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+    Name                 Stmts   Miss  Cover   Missing
+    --------------------------------------------------
+    myproj/__init__          2      0   100%
+    myproj/myproj          257     13    94%   24-26, 99, 149, 233-236, 297-298, 369-370
+    myproj/feature4286      94      7    92%   183-188, 197
+    --------------------------------------------------
+    TOTAL                  353     20    94%
+
+
+The remaining three reports output to files without showing anything on the terminal (useful for
+when the output is going to a continuous integration server)::
+
+    py.test --cov-report html --cov-report xml --cov-report annotate --cov myproj tests/
 
 
 Coverage Data File
 ------------------
 
-During testing there may be many data files with coverage data.  These
-will have unique suffixes and will be combined at the end of testing.
+The data file is erased at the beginning of testing to ensure clean data for each test run.
 
-Upon completion, for --dist=load (and also for --dist=each when the
---cov-combine-each option is used) there will only be one data file.
-
-For --dist=each there may be many data files where each one will have
-the platform / python version info appended to the name.
-
-These data files are left at the end of testing so that it is possible
-to use normal coverage tools to examine them.
-
-At the beginning of testing any data files that are about to be used
-will first be erased so ensure the data is clean for each test run.
-
-It is possible to set the name of the data file.  If needed the
-platform / python version will be appended automatically to this name.
-
-
-Coverage Config File
---------------------
-
-Coverage by default will read its own config file.  An alternative
-file name may be specified or reading config can be disabled entirely.
-
-Care has been taken to ensure that the coverage env vars and config
-file options work the same under this plugin as they do under coverage
-itself.
-
-Since options may be specified in different ways the order of
-precedence between pytest-cov and coverage from highest to lowest is:
-
-1. pytest command line
-2. pytest env var
-3. pytest conftest
-4. coverage env var
-5. coverage config file
-6. coverage default
+The data file is left at the end of testing so that it is possible to use normal coverage tools to
+examine it.
 
 
 Limitations
 -----------
 
-For distributed testing the slaves must have the pytest-cov package
-installed.  This is needed since the plugin must be registered through
-setuptools / distribute for pytest to start the plugin on the slave.
+For distributed testing the slaves must have the pytest-cov package installed.  This is needed since
+the plugin must be registered through setuptools / distribute for pytest to start the plugin on the
+slave.
+
+For subprocess measurement environment variables must make it from the main process to the
+subprocess.  The python used by the subprocess must have pytest-cov installed.  The subprocess must
+do normal site initialisation so that the environment variables can be detected and coverage
+started.
 
 
 Acknowledgements
 
 Holger Krekel for pytest with its distributed testing support.
 
-Ned Batchelder for coverage and its ability to combine the coverage
-results of parallel runs.
+Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs.
 
-Whilst this plugin has been built fresh from the ground up to support
-distributed testing it has been influenced by the work done on
-pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and
-nose-cover (Jason Pellerin) which are other coverage plugins for
-pytest and nose respectively.
+Whilst this plugin has been built fresh from the ground up to support distributed testing it has
+been influenced by the work done on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and
+nose-cover (Jason Pellerin) which are other coverage plugins for pytest and nose respectively.
 
 No doubt others have contributed to these tools as well.
 
 --------------------
 
 
-``--cov-on``
-    enable coverage, only needed if not specifying any --cov options
-``--cov=package``
-    collect coverage for the specified package (multi-allowed)
-``--cov-no-terminal``
-    disable printing a report on the terminal
-``--cov-annotate``
-    generate an annotated source code report
-``--cov-html``
-    generate a html report
-``--cov-xml``
-    generate an xml report
-``--cov-annotate-dir=dir``
-    directory for the annotate report, default: %default
-``--cov-html-dir=dir``
-    directory for the html report, default: coverage_html
-``--cov-xml-file=path``
-    file for the xml report, default: coverage.xml
-``--cov-data-file=path``
-    file containing coverage data, default: .coverage
-``--cov-combine-each``
-    for dist=each mode produce a single combined report
-``--cov-branch``
-    enable branch coverage
-``--cov-pylib``
-    enable python library coverage
-``--cov-timid``
-    enable slower and simpler tracing
-``--cov-no-missing-lines``
-    disable showing missing lines, only relevant to the terminal report
-``--cov-no-missing-files``
-    disable showing message about missing source files
-``--cov-omit=prefix1,prefix2,...``
-    ignore files with these prefixes
-``--cov-no-config``
-    disable coverage reading its config file
-``--cov-config-file=path``
-    config file for coverage, default: %default
+``--cov=path``
+    measure coverage for filesystem path (multi-allowed)
+``--cov-report=type``
+    type of report to generate: term, term-missing, annotate, html, xml (multi-allowed)
+``--cov-config=path``
+    config file for coverage, default: .coveragerc
 
 .. include:: links.txt

doc/test/plugin/coverage.txt

 .. contents::
   :local:
 
-Original code by Ross Lawley.
+Note: Original code by Ross Lawley. 
 
-Requires Ned Batchelder's excellent coverage:
-http://nedbatchelder.com/code/coverage/
+Install
+--------------
+
+Use pip to (un)install::
+
+    pip install pytest-coverage 
+    pip uninstall pytest-coverage 
+
+or alternatively use easy_install to install::
+
+    easy_install pytest-coverage 
+
+
+Usage 
+-------------
+
+To get full test coverage reports for a particular package type::
+
+    py.test --cover-report=report
 
 command line options
 --------------------
     html: Directory for html output.
                     report: Output a text report.
                     annotate: Annotate your source code for which lines were executed and which were not.
+                    xml: Output an xml report compatible with the cobertura plugin for hudson.
 ``--cover-directory=DIRECTORY``
     Directory for the reports (html / annotate results) defaults to ./coverage
+``--cover-xml-file=XML_FILE``
+    File for the xml report defaults to ./coverage.xml
 ``--cover-show-missing``
     Show missing files
 ``--cover-ignore-errors=IGNORE_ERRORS``

doc/test/plugin/figleaf.txt

 .. contents::
   :local:
 
+Install
+---------------
+
+To install the plugin issue::
+
+    easy_install pytest-figleaf  # or
+    pip install pytest-figleaf   
+
+and if you are using pip you can also uninstall::
+
+    pip uninstall pytest-figleaf
+
+
 Usage
 ---------------
 
-after pip or easy_install mediated installation of ``pytest-figleaf`` you can type::
+After installation you can simply type::
 
     py.test --figleaf [...]
 
 to enable figleaf coverage in your test run.  A default ".figleaf" data file
-and "html" directory will be created.  You can use ``--fig-data``
-and ``fig-html`` to modify the paths.
+and "html" directory will be created.  You can use command line options
+to control where data and html files are created.
 
 command line options
 --------------------

doc/test/plugin/helpconfig.txt

 ``--traceconfig``
     trace considerations of conftest.py files.
 ``--nomagic``
-    don't reinterpret asserts, no traceback cutting.
+    don't reinterpret asserts, no traceback cutting. 
 ``--debug``
     generate and show internal debugging information.
 ``--help-config``

doc/test/plugin/hookspec.txt

     """
     hook specifications for py.test plugins
     """
-
+    
     # -------------------------------------------------------------------------
     # Command line and configuration
     # -------------------------------------------------------------------------
-
+    
     def pytest_namespace():
         "return dict of name->object which will get stored at py.test. namespace"
-
+    
     def pytest_addoption(parser):
         "add optparse-style options via parser.addoption."
-
+    
     def pytest_addhooks(pluginmanager):
         "add hooks via pluginmanager.registerhooks(module)"
-
+    
     def pytest_configure(config):
         """ called after command line options have been parsed.
             and all plugins and initial conftest files been loaded.
         """
-
+    
     def pytest_unconfigure(config):
         """ called before test process is exited.  """
-
+    
     # -------------------------------------------------------------------------
     # collection hooks
     # -------------------------------------------------------------------------
-
+    
     def pytest_ignore_collect(path, config):
         """ return true value to prevent considering this path for collection.
         This hook is consulted for all files and directories prior to considering
         collection hooks.
         """
     pytest_ignore_collect.firstresult = True
-
+    
     def pytest_collect_directory(path, parent):
         """ return Collection node or None for the given path. """
     pytest_collect_directory.firstresult = True
-
+    
     def pytest_collect_file(path, parent):
         """ return Collection node or None for the given path. """
-
+    
     def pytest_collectstart(collector):
         """ collector starts collecting. """
-
+    
     def pytest_collectreport(report):
         """ collector finished collecting. """
-
+    
     def pytest_deselected(items):
         """ called for test items deselected by keyword. """
-
+    
     def pytest_make_collect_report(collector):
         """ perform a collection and return a collection. """
     pytest_make_collect_report.firstresult = True
-
+    
     # XXX rename to item_collected()?  meaning in distribution context?
     def pytest_itemstart(item, node=None):
         """ test item gets collected. """
-
+    
     # -------------------------------------------------------------------------
     # Python test function related hooks
     # -------------------------------------------------------------------------
-
+    
     def pytest_pycollect_makemodule(path, parent):
         """ return a Module collector or None for the given path.
         This hook will be called for each matching test module path.
         create test modules for files that do not match as a test module.
         """
     pytest_pycollect_makemodule.firstresult = True
-
+    
     def pytest_pycollect_makeitem(collector, name, obj):
         """ return custom item/collector for a python object in a module, or None.  """
     pytest_pycollect_makeitem.firstresult = True
-
+    
     def pytest_pyfunc_call(pyfuncitem):
         """ call underlying test function. """
     pytest_pyfunc_call.firstresult = True
-
+    
     def pytest_generate_tests(metafunc):
         """ generate (multiple) parametrized calls to a test function."""
-
+    
     # -------------------------------------------------------------------------
     # generic runtest related hooks
     # -------------------------------------------------------------------------
-
+    
     def pytest_runtest_protocol(item):
         """ implement fixture, run and report about the given test item. """
     pytest_runtest_protocol.firstresult = True
-
+    
     def pytest_runtest_setup(item):
         """ called before pytest_runtest_call(). """
-
+    
     def pytest_runtest_call(item):
         """ execute test item. """
-
+    
     def pytest_runtest_teardown(item):
         """ called after pytest_runtest_call(). """
-
+    
     def pytest_runtest_makereport(item, call):
         """ make a test report for the given item and call outcome. """
     pytest_runtest_makereport.firstresult = True
-
+    
     def pytest_runtest_logreport(report):
         """ process item test report. """
-
+    
     # special handling for final teardown - somewhat internal for now
     def pytest__teardown_final(session):
         """ called before test session finishes. """
     pytest__teardown_final.firstresult = True
-
+    
     def pytest__teardown_final_logerror(report):
         """ called if runtest_teardown_final failed. """
-
+    
     # -------------------------------------------------------------------------
     # test session related hooks
     # -------------------------------------------------------------------------
-
+    
     def pytest_sessionstart(session):
         """ before session.main() is called. """
-
+    
     def pytest_sessionfinish(session, exitstatus):
         """ whole test run finishes. """
-
+    
     # -------------------------------------------------------------------------
     # hooks for influencing reporting (invoked from pytest_terminal)
     # -------------------------------------------------------------------------
-
+    
     def pytest_report_header(config):
         """ return a string to be displayed as header info for terminal reporting."""
-
+    
     def pytest_report_teststatus(report):
         """ return result-category, shortletter and verbose word for reporting."""
     pytest_report_teststatus.firstresult = True
-
+    
     def pytest_terminal_summary(terminalreporter):
         """ add additional section in terminal summary reporting. """
-
+    
     def pytest_report_iteminfo(item):
         """ return (fspath, lineno, name) for the item.
             the information is used for result display and to sort tests
         """
     pytest_report_iteminfo.firstresult = True
-
+    
     # -------------------------------------------------------------------------
     # doctest hooks
     # -------------------------------------------------------------------------
-
+    
     def pytest_doctest_prepare_content(content):
         """ return processed content for a given doctest"""
     pytest_doctest_prepare_content.firstresult = True
-
-
+    
+    
     # -------------------------------------------------------------------------
     # error handling and internal debugging hooks
     # -------------------------------------------------------------------------
-
+    
     def pytest_plugin_registered(plugin, manager):
         """ a new py lib plugin got registered. """
-
+    
     def pytest_plugin_unregistered(plugin):
         """ a py lib plugin got unregistered. """
-
+    
     def pytest_internalerror(excrepr):
         """ called for internal errors. """
-
+    
     def pytest_keyboard_interrupt(excinfo):
         """ called for keyboard interrupt. """
-
+    
     def pytest_trace(category, msg):
         """ called for debug info. """
 
 
 .. sourcecode:: python
 
-
+    
     def pytest_gwmanage_newgateway(gateway, platinfo):
         """ called on new raw gateway creation. """
-
+    
     def pytest_gwmanage_rsyncstart(source, gateways):
         """ called before rsyncing a directory to remote gateways takes place. """
-
+    
     def pytest_gwmanage_rsyncfinish(source, gateways):
         """ called after rsyncing a directory to remote gateways takes place. """
-
+    
     def pytest_configure_node(node):
         """ configure node information before it gets instantiated. """
-
+    
     def pytest_testnodeready(node):
         """ Test Node is ready to operate. """
-
+    
     def pytest_testnodedown(node, error):
         """ Test Node is down. """
-
+    
     def pytest_rescheduleitems(items):
         """ reschedule Items from a node that went down. """
 

doc/test/plugin/links.txt

 .. _`helpconfig`: helpconfig.html
-.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_recwarn.py
+.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_recwarn.py
 .. _`unittest`: unittest.html
-.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_monkeypatch.py
-.. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_genscript.py
+.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_monkeypatch.py
+.. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_genscript.py
 .. _`pastebin`: pastebin.html
 .. _`skipping`: skipping.html
 .. _`genscript`: genscript.html
 .. _`plugins`: index.html
 .. _`mark`: mark.html
 .. _`tmpdir`: tmpdir.html
-.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_doctest.py
+.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_doctest.py
 .. _`capture`: capture.html
-.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_nose.py
-.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_restdoc.py
+.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_nose.py
+.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_restdoc.py
 .. _`restdoc`: restdoc.html
 .. _`xdist`: xdist.html
-.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_pastebin.py
-.. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_tmpdir.py
+.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_pastebin.py
+.. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_tmpdir.py
 .. _`terminal`: terminal.html
-.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_hooklog.py
+.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_hooklog.py
 .. _`capturelog`: capturelog.html
 .. _`junitxml`: junitxml.html
-.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_skipping.py
+.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_skipping.py
 .. _`checkout the py.test development version`: ../../install.html#checkout
-.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_helpconfig.py
+.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_helpconfig.py
 .. _`oejskit`: oejskit.html
 .. _`doctest`: doctest.html
-.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_mark.py
+.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_mark.py
 .. _`get in contact`: ../../contact.html
-.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_capture.py
+.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_capture.py
 .. _`figleaf`: figleaf.html
 .. _`customize`: ../customize.html
 .. _`hooklog`: hooklog.html
-.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_terminal.py
+.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_terminal.py
 .. _`recwarn`: recwarn.html
-.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_pdb.py
+.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_pdb.py
 .. _`monkeypatch`: monkeypatch.html
 .. _`coverage`: coverage.html
 .. _`resultlog`: resultlog.html
 .. _`cov`: cov.html
-.. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_junitxml.py
+.. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_junitxml.py
 .. _`django`: django.html
-.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_unittest.py
+.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_unittest.py
 .. _`nose`: nose.html
-.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.2/py/_plugin/pytest_resultlog.py
+.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_resultlog.py
 .. _`pdb`: pdb.html

doc/test/plugin/xdist.txt

     box each test run in a separate process (unix)
 ``--dist=distmode``
     set mode for distributing tests to exec environments.
-
+    
     each: send each test to each available environment.
-
+    
     load: send each test to available environment.
-
+    
     (default) no: run tests inprocess, don't distribute.
 ``--tx=xspec``
     add a test execution environment. some examples: --tx popen//python=python2.5 --tx socket=192.168.1.102:8888 --tx ssh=user@codespeak.net//chdir=testcache
 
 (c) Holger Krekel and others, 2004-2010
 """
-__version__ = version = "1.3.4a1"
+__version__ = version = "1.4.0a1"
 
 import py.apipkg
 
             'Directory' : '._test.collect:Directory',
             'File'      : '._test.collect:File',
             'Item'      : '._test.collect:Item',
-            'Module'    : '._test.pycollect:Module',
-            'Class'     : '._test.pycollect:Class',
-            'Instance'  : '._test.pycollect:Instance',
-            'Generator' : '._test.pycollect:Generator',
-            'Function'  : '._test.pycollect:Function',
-            '_fillfuncargs' : '._test.funcargs:fillfuncargs',
         },
         'cmdline': {
-            'main' : '._test.cmdline:main', # backward compat
+            'main' : '._test.session:main', # backward compat
         },
     },
 
         '_AssertionError'   : '._code.assertion:AssertionError',
         '_reinterpret_old'  : '._code.assertion:reinterpret_old',
         '_reinterpret'      : '._code.assertion:reinterpret',
+        '_reprcompare'      : '._code.assertion:_reprcompare',
     },
 
     # backports and additions of builtins
         'frozenset'      : '._builtin:frozenset',
         'BaseException'  : '._builtin:BaseException',
         'GeneratorExit'  : '._builtin:GeneratorExit',
+        '_sysex'         : '._builtin:_sysex',
         'print_'         : '._builtin:print_',
         '_reraise'       : '._builtin:_reraise',
         '_tryimport'     : '._builtin:_tryimport',
         pass
     GeneratorExit.__module__ = 'exceptions'
 
+_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
+
 if sys.version_info >= (3, 0):
     exec ("print_ = print ; exec_=exec")
     import builtins

py/_code/_assertionnew.py

 
 
 class DebugInterpreter(ast.NodeVisitor):
-    """Interpret AST nodes to gleam useful debugging information."""
+    """Interpret AST nodes to gleam useful debugging information. """
 
     def __init__(self, frame):
         self.frame = frame
     def visit_Compare(self, comp):
         left = comp.left
         left_explanation, left_result = self.visit(left)
-        got_result = False
         for op, next_op in zip(comp.ops, comp.comparators):
-            if got_result and not result:
-                break
             next_explanation, next_result = self.visit(next_op)
             op_symbol = operator_map[op.__class__]
             explanation = "%s %s %s" % (left_explanation, op_symbol,
                                          __exprinfo_right=next_result)
             except Exception:
                 raise Failure(explanation)
-            else:
-                got_result = True
+            if not result:
+                break
             left_explanation, left_result = next_explanation, next_result
+
+        rcomp = py.code._reprcompare
+        if rcomp:
+            res = rcomp(op_symbol, left_result, next_result)
+            if res:
+                explanation = res
         return explanation, result
 
     def visit_BoolOp(self, boolop):

py/_code/_assertionold.py

 from compiler import parse, ast, pycodegen
 from py._code.assertion import BuiltinAssertionError, _format_explanation
 
-passthroughex = (KeyboardInterrupt, SystemExit, MemoryError)
+passthroughex = py.builtin._sysex
 
 class Failure:
     def __init__(self, node):

py/_code/assertion.py

 
 BuiltinAssertionError = py.builtin.builtins.AssertionError
 
+_reprcompare = None # if set, will be called by assert reinterp for comparison ops
 
 def _format_explanation(explanation):
-    # uck!  See CallFunc for where \n{ and \n} escape sequences are used
+    """This formats an explanation
+
+    Normally all embedded newlines are escaped, however there are
+    three exceptions: \n{, \n} and \n~.  The first two are intended
+    cover nested explanations, see function and attribute explanations
+    for examples (.visit_Call(), visit_Attribute()).  The last one is
+    for when one explanation needs to span multiple lines, e.g. when
+    displaying diffs.
+    """
     raw_lines = (explanation or '').split('\n')
-    # escape newlines not followed by { and }
+    # escape newlines not followed by {, } and ~
     lines = [raw_lines[0]]
     for l in raw_lines[1:]:
-        if l.startswith('{') or l.startswith('}'):
+        if l.startswith('{') or l.startswith('}') or l.startswith('~'):
             lines.append(l)
         else:
             lines[-1] += '\\n' + l
             stackcnt[-1] += 1
             stackcnt.append(0)
             result.append(' +' + '  '*(len(stack)-1) + s + line[1:])
-        else:
+        elif line.startswith('}'):
             assert line.startswith('}')
             stack.pop()
             stackcnt.pop()
             result[stack[-1]] += line[1:]
+        else:
+            assert line.startswith('~')
+            result.append('  '*len(stack) + line[1:])
     assert len(stack) == 1
     return '\n'.join(result)
 
 
 class AssertionError(BuiltinAssertionError):
-
     def __init__(self, *args):
         BuiltinAssertionError.__init__(self, *args)
         if args:
             try:
                 self.msg = str(args[0])
-            except (KeyboardInterrupt, SystemExit):
+            except py.builtin._sysex:
                 raise
             except:
                 self.msg = "<[broken __repr__] %s at %0xd>" %(
         """
         try:
             return self.frame.eval("__tracebackhide__")
-        except (SystemExit, KeyboardInterrupt):
+        except py.builtin._sysex:
             raise
         except:
             return False
             abspath=False, tbfilter=True, funcargs=False):
         """ return str()able representation of this exception info.
             showlocals: show locals per traceback entry
-            style: long|short|no traceback style
+            style: long|short|no|native traceback style
             tbfilter: hide entries (where __tracebackhide__ is true)
         """
+        if style == 'native':
+            import traceback
+            return ''.join(traceback.format_exception(
+                self.type,
+                self.value,
+                self.traceback[0]._rawentry,
+                ))
+
         fmt = FormattedExcinfo(showlocals=showlocals, style=style,
             abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
         return fmt.repr_excinfo(self)

py/_code/source.py

 def findsource(obj):
     try:
         sourcelines, lineno = py.std.inspect.findsource(obj)
-    except (KeyboardInterrupt, SystemExit):
+    except py.builtin._sysex:
         raise
     except:
         return None, None

py/_io/saferepr.py

 
 reprlib = py.builtin._tryimport('repr', 'reprlib')
 
-sysex = (KeyboardInterrupt, MemoryError, SystemExit)
-
 class SafeRepr(reprlib.Repr):
     """ subclass of repr.Repr that limits the resulting size of repr()
         and includes information on exceptions raised during the call.
         try:
             # Try the vanilla repr and make sure that the result is a string
             s = call(x, *args)
-        except sysex:
+        except py.builtin._sysex:
             raise
         except:
             cls, e, tb = sys.exc_info()

py/_io/terminalwriter.py

 def get_terminal_width():
     try:
         height, width = _getdimensions()
-    except (SystemExit, KeyboardInterrupt):
+    except py.builtin._sysex:
         raise
     except:
         # FALLBACK

py/_path/local.py

                 pkg = __import__(pkgpath.basename, None, None, [])
                 names = self.new(ext='').relto(pkgpath.dirpath())
                 names = names.split(self.sep)
+                if names and names[-1] == "__init__":
+                    names.pop()
                 modname = ".".join(names)
             else:
                 # no package scope, still make it possible
             elif modfile.endswith('$py.class'):
                 modfile = modfile[:-9] + '.py'
             if modfile.endswith("__init__.py"):
-                modfile = modfile[:-12]
+                if self.basename != "__init__.py":
+                    modfile = modfile[:-12]
             if not self.samefile(modfile):
                 raise self.ImportMismatchError(modname, modfile, self)
             return mod

py/_plugin/hookspec.py

         and all plugins and initial conftest files been loaded.
     """
 
+def pytest_cmdline_main(config):
+    """ called for performing the main (cmdline) action. """
+pytest_cmdline_main.firstresult = True
+
+def pytest_runtest_mainloop(session):
+    """ called for performing the main runtest loop (after collection. """
+pytest_runtest_mainloop.firstresult = True
+
 def pytest_unconfigure(config):
     """ called before test process is exited.  """
 
 # collection hooks
 # -------------------------------------------------------------------------
 
+def pytest_perform_collection(session):
+    """ perform the collection protocol for the given session. """
+pytest_perform_collection.firstresult = True
+
+def pytest_collection_modifyitems(config, items):
+    """ called to allow filtering and selecting of test items (inplace). """
+
+def pytest_log_finishcollection(collection):
+    """ called after collection has finished. """
+
 def pytest_ignore_collect(path, config):
     """ return true value to prevent considering this path for collection.
     This hook is consulted for all files and directories prior to considering
 def pytest_collect_file(path, parent):
     """ return Collection node or None for the given path. """
 
+# logging hooks for collection
 def pytest_collectstart(collector):
     """ collector starts collecting. """
 
+def pytest_log_itemcollect(item):
+    """ we just collected a test item. """
+
 def pytest_collectreport(report):
     """ collector finished collecting. """
 
     """ perform a collection and return a collection. """
 pytest_make_collect_report.firstresult = True
 
-# XXX rename to item_collected()?  meaning in distribution context?
-def pytest_itemstart(item, node=None):
-    """ test item gets collected. """
-
 # -------------------------------------------------------------------------
 # Python test function related hooks
 # -------------------------------------------------------------------------
 # -------------------------------------------------------------------------
 # generic runtest related hooks
 # -------------------------------------------------------------------------
+def pytest_itemstart(item, node=None):
+    """ (deprecated, use pytest_runtest_logstart). """
 
 def pytest_runtest_protocol(item):
     """ implement fixture, run and report about the given test item. """
 pytest_runtest_protocol.firstresult = True
 
+def pytest_runtest_logstart(nodeid, location, fspath):
+    """ signal the start of a test run. """
+
 def pytest_runtest_setup(item):
     """ called before pytest_runtest_call(). """
 
     """ called before test session finishes. """
 pytest__teardown_final.firstresult = True
 
-def pytest__teardown_final_logerror(report):
+def pytest__teardown_final_logerror(report, session):
     """ called if runtest_teardown_final failed. """
 
 # -------------------------------------------------------------------------
 def pytest_sessionfinish(session, exitstatus):
     """ whole test run finishes. """
 
+
+# -------------------------------------------------------------------------
+# hooks for customising the assert methods
+# -------------------------------------------------------------------------
+
+def pytest_assertrepr_compare(config, op, left, right):
+    """return explanation for comparisons in failing assert expressions.
+
+    Return None for no custom explanation, otherwise return a list
+    of strings.  The strings will be joined by newlines but any newlines
+    *in* a string will be escaped.  Note that all but the first line will
+    be indented sligthly, the intention is for the first line to be a summary.
+    """
+
 # -------------------------------------------------------------------------
 # hooks for influencing reporting (invoked from pytest_terminal)
 # -------------------------------------------------------------------------
     """ add additional section in terminal summary reporting. """
 
 def pytest_report_iteminfo(item):
-    """ return (fspath, lineno, name) for the item.
-        the information is used for result display and to sort tests
+    """ return (fspath, lineno, domainpath) location info for the item.
+        the information is used for result display and to sort tests.
+        fspath,lineno: file and linenumber of source of item definition.
+        domainpath: custom id - e.g. for python: dotted import address
     """
 pytest_report_iteminfo.firstresult = True
 

py/_plugin/pytest__pytest.py

                 l.append(call)
         return l
 
+    def contains(self, entries):
+        from py.builtin import print_
+        i = 0
+        entries = list(entries)
+        backlocals = py.std.sys._getframe(1).f_locals 
+        while entries:
+            name, check = entries.pop(0)
+            for ind, call in enumerate(self.calls[i:]):
+                if call._name == name:
+                    print_("NAMEMATCH", name, call)
+                    if eval(check, backlocals, call.__dict__):
+                        print_("CHECKERMATCH", repr(check), "->", call)
+                    else:
+                        print_("NOCHECKERMATCH", repr(check), "-", call)
+                        continue
+                    i += ind + 1
+                    break
+                print_("NONAMEMATCH", name, "with", call)
+            else:
+                raise AssertionError("could not find %r in %r" %(
+                    name, self.calls[i:]))
+
     def popcall(self, name):
         for i, call in enumerate(self.calls):
             if call._name == name:

py/_plugin/pytest_assertion.py

         help="disable python assert expression reinterpretation."),
 
 def pytest_configure(config):
+    # The _pytesthook attribute on the AssertionError is used by
+    # py._code._assertionnew to detect this plugin was loaded and in
+    # turn call the hooks defined here as part of the
+    # DebugInterpreter.
     if not config.getvalue("noassert") and not config.getvalue("nomagic"):
         warn_about_missing_assertion()
         config._oldassertion = py.builtin.builtins.AssertionError
+        config._oldbinrepr = py.code._reprcompare
         py.builtin.builtins.AssertionError = py.code._AssertionError
+        def callbinrepr(op, left, right):
+            hook_result = config.hook.pytest_assertrepr_compare(
+                config=config, op=op, left=left, right=right)
+            for new_expl in hook_result:
+                if new_expl:
+                    return '\n~'.join(new_expl)
+        py.code._reprcompare = callbinrepr
 
 def pytest_unconfigure(config):
     if hasattr(config, '_oldassertion'):
         py.builtin.builtins.AssertionError = config._oldassertion
+        py.code._reprcompare = config._oldbinrepr
         del config._oldassertion
+        del config._oldbinrepr
 
 def warn_about_missing_assertion():
     try:
     else:
         py.std.warnings.warn("Assertions are turned off!"
                              " (are you using python -O?)")
+
+
+# Provide basestring in python3
+try:
+    basestring = basestring
+except NameError:
+    basestring = str
+
+
+def pytest_assertrepr_compare(op, left, right):
+    """return specialised explanations for some operators/operands"""
+    left_repr = py.io.saferepr(left, maxsize=30)
+    right_repr = py.io.saferepr(right, maxsize=30)
+    summary = '%s %s %s' % (left_repr, op, right_repr)
+
+    issequence = lambda x: isinstance(x, (list, tuple))
+    istext = lambda x: isinstance(x, basestring)
+    isdict = lambda x: isinstance(x, dict)
+    isset = lambda x: isinstance(x, set)
+
+    explanation = None
+    if op == '==':
+        if istext(left) and istext(right):
+            explanation = _diff_text(left, right)
+        elif issequence(left) and issequence(right):
+            explanation = _compare_eq_sequence(left, right)
+        elif isset(left) and isset(right):
+            explanation = _compare_eq_set(left, right)
+        elif isdict(left) and isdict(right):
+            explanation = _diff_text(py.std.pprint.pformat(left),
+                                     py.std.pprint.pformat(right))
+    elif op == 'in':
+        pass                    # XXX
+
+    if not explanation:
+        return None
+
+    # Don't include pageloads of data, should be configurable
+    if len(''.join(explanation)) > 80*8:
+        explanation = ['Detailed information too verbose, truncated']
+
+    return [summary] + explanation
+
+
+def _diff_text(left, right):
+    """Return the explanation for the diff between text
+
+    This will skip leading and trailing characters which are
+    identical to keep the diff minimal.
+    """
+    explanation = []
+    for i in range(min(len(left), len(right))):
+        if left[i] != right[i]:
+            break
+    if i > 42:
+        i -= 10                 # Provide some context
+        explanation = ['Skipping %s identical '
+                       'leading characters in diff' % i]
+        left = left[i:]
+        right = right[i:]
+    if len(left) == len(right):
+        for i in range(len(left)):
+            if left[-i] != right[-i]:
+                break
+        if i > 42:
+            i -= 10     # Provide some context
+            explanation += ['Skipping %s identical '
+                            'trailing characters in diff' % i]
+            left = left[:-i]
+            right = right[:-i]
+    explanation += [line.strip('\n')
+                    for line in py.std.difflib.ndiff(left.splitlines(),
+                                                     right.splitlines())]
+    return explanation
+
+
+def _compare_eq_sequence(left, right):
+    explanation = []
+    for i in range(min(len(left), len(right))):
+        if left[i] != right[i]:
+            explanation += ['First differing item %s: %s != %s' %
+                            (i, left[i], right[i])]
+            break
+    if len(left) > len(right):
+        explanation += ['Left contains more items, '
+                        'first extra item: %s' % left[len(right)]]
+    elif len(left) < len(right):
+        explanation += ['Right contains more items, '
+                        'first extra item: %s' % right[len(left)]]
+    return explanation + _diff_text(py.std.pprint.pformat(left),
+                                    py.std.pprint.pformat(right))
+
+
+def _compare_eq_set(left, right):
+    explanation = []
+    diff_left = left - right
+    diff_right = right - left
+    if diff_left:
+        explanation.append('Extra items in the left set:')
+        for item in diff_left:
+            explanation.append(py.io.saferepr(item))
+    if diff_right:
+        explanation.append('Extra items in the right set:')
+        for item in diff_right:
+            explanation.append(py.io.saferepr(item))
+    return explanation

py/_plugin/pytest_default.py

 import sys
 import py
 
-def pytest_pyfunc_call(__multicall__, pyfuncitem):
-    if not __multicall__.execute():
-        testfunction = pyfuncitem.obj
-        if pyfuncitem._isyieldedfunction():
-            testfunction(*pyfuncitem._args)
-        else:
-            funcargs = pyfuncitem.funcargs
-            testfunction(**funcargs)
+def pytest_cmdline_main(config):
+    from py._test.session import Session
+    return Session(config).main()
 
-def pytest_collect_file(path, parent):
-    ext = path.ext
-    pb = path.purebasename
-    if pb.startswith("test_") or pb.endswith("_test") or \
-       path in parent.config._argfspaths:
-        if ext == ".py":
-            return parent.ihook.pytest_pycollect_makemodule(
-                path=path, parent=parent)
+def pytest_perform_collection(session):
+    collection = session.collection
+    assert not hasattr(collection, 'items')
+    hook = session.config.hook
+    collection.items = items = collection.perform_collect()
+    hook.pytest_collection_modifyitems(config=session.config, items=items)
+    hook.pytest_log_finishcollection(collection=collection)
+    return True
 
-def pytest_pycollect_makemodule(path, parent):
-    return parent.Module(path, parent)
-
-def pytest_funcarg__pytestconfig(request):
-    """ the pytest config object with access to command line opts."""
-    return request.config
+def pytest_runtest_mainloop(session):
+    if session.config.option.collectonly:
+        return True
+    for item in session.collection.items:
+        item.config.hook.pytest_runtest_protocol(item=item)
+        if session.shouldstop:
+            raise session.Interrupted(session.shouldstop)
+    return True
 
 def pytest_ignore_collect(path, config):
     ignore_paths = config.getconftest_pathlist("collect_ignore", path=path)
     if excludeopt:
         ignore_paths.extend([py.path.local(x) for x in excludeopt])
     return path in ignore_paths
-    # XXX more refined would be:
-    if ignore_paths:
-        for p in ignore_paths:
-            if path == p or path.relto(p):
-                return True
-
 
 def pytest_collect_directory(path, parent):
     # XXX reconsider the following comment
     # define Directory(dir) already
     if not parent.recfilter(path): # by default special ".cvs", ...
         # check if cmdline specified this dir or a subdir directly
-        for arg in parent.config._argfspaths:
+        for arg in parent.collection._argfspaths:
             if path == arg or arg.relto(path):
                 break
         else:
     group._addoption('--maxfail', metavar="num",
                action="store", type="int", dest="maxfail", default=0,
                help="exit after first num failures or errors.")
-    group._addoption('-k',
-        action="store", dest="keyword", default='',
-        help="only run test items matching the given "
-             "space separated keywords.  precede a keyword with '-' to negate. "
-             "Terminate the expression with ':' to treat a match as a signal "
-             "to run all subsequent tests. ")
 
     group = parser.getgroup("collect", "collection")
     group.addoption('--collectonly',
                help="base temporary directory for this test run.")
 
 def pytest_configure(config):
-    setsession(config)
     # compat
     if config.getvalue("exitfirst"):
         config.option.maxfail = 1
 
-def setsession(config):
-    val = config.getvalue
-    if val("collectonly"):
-        from py._test.session import Session
-        config.setsessionclass(Session)
-
-# pycollect related hooks and code, should move to pytest_pycollect.py
-
-def pytest_pycollect_makeitem(__multicall__, collector, name, obj):
-    res = __multicall__.execute()
-    if res is not None:
-        return res
-    if collector._istestclasscandidate(name, obj):
-        res = collector._deprecated_join(name)
-        if res is not None:
-            return res
-        return collector.Class(name, parent=collector)
-    elif collector.funcnamefilter(name) and hasattr(obj, '__call__'):
-        res = collector._deprecated_join(name)
-        if res is not None:
-            return res
-        if is_generator(obj):
-            # XXX deprecation warning
-            return collector.Generator(name, parent=collector)
-        else:
-            return collector._genfunctions(name, obj)
-
-def is_generator(func):
-    try:
-        return py.code.getrawcode(func).co_flags & 32 # generator function
-    except AttributeError: # builtin functions have no bytecode
-        # assume them to not be generators
-        return False

py/_plugin/pytest_doctest.py

         else:
             return super(DoctestItem, self).repr_failure(excinfo)
 
+    def reportinfo(self):
+        return self.fspath, None, "[doctest]"
+
 class DoctestTextfile(DoctestItem):
     def runtest(self):
         if not self._deprecated_testexecution():

py/_plugin/pytest_genscript.py

         dest="genscript", metavar="path",
         help="create standalone py.test script at given target path.")
 
-def pytest_configure(config):
+def pytest_cmdline_main(config):
     genscript = config.getvalue("genscript")
     if genscript:
         import py
         pybasedir = py.path.local(py.__file__).dirpath().dirpath()
         genscript = py.path.local(genscript)
         main(pybasedir, outfile=genscript, infile=infile)
-        raise SystemExit(0)
+        return 0
 
 def main(pybasedir, outfile, infile):
     import base64

py/_plugin/pytest_helpconfig.py

             help="show available conftest.py and ENV-variable names.")
 
 
-def pytest_configure(__multicall__, config):
+def pytest_cmdline_main(config):
     if config.option.version:
         p = py.path.local(py.__file__).dirpath()
         sys.stderr.write("This is py.test version %s, imported from %s\n" %
             (py.__version__, p))
-        sys.exit(0)
-    if not config.option.helpconfig:
-        return
-    __multicall__.execute()
+        return 0
+    elif config.option.helpconfig:
+        config.pluginmanager.do_configure(config)
+        showpluginhelp(config)
+        return 0
+
+def showpluginhelp(config):
     options = []
     for group in config._parser._groups:
         options.extend(group.options)
             help,
             )
         tw.line(line[:tw.fullwidth])
-
     tw.sep("-")
-    sys.exit(0)
 
 conftest_options = (
     ('pytest_plugins', 'list of plugin names to load'),

py/_plugin/pytest_junitxml.py

         self._durations = {}
 
     def _opentestcase(self, report):
-        if hasattr(report, 'item'):
-            node = report.item
-        else:
-            node = report.collector
-        d = {'time': self._durations.pop(node, "0")}
-        names = [x.replace(".py", "") for x in node.listnames() if x != "()"]
+        names = report.nodenames
+        d = {'time': self._durations.pop(names, "0")}
+        names = [x.replace(".py", "") for x in names if x != "()"]
         classnames = names[:-1]
         if self.prefix:
             classnames.insert(0, self.prefix)
             self.append_skipped(report)
 
     def pytest_runtest_call(self, item, __multicall__):
+        names = tuple(item.listnames())
         start = time.time()
         try:
             return __multicall__.execute()
         finally:
-            self._durations[item] = time.time() - start
+            self._durations[names] = time.time() - start
 
     def pytest_collectreport(self, report):
         if not report.passed:

py/_plugin/pytest_keyword.py

+
+def pytest_addoption(parser):
+    group = parser.getgroup("general")
+    group._addoption('-k',
+        action="store", dest="keyword", default='',
+        help="only run test items matching the given "
+             "space separated keywords.  precede a keyword with '-' to negate. "
+             "Terminate the expression with ':' to treat a match as a signal "
+             "to run all subsequent tests. ")
+
+def pytest_collection_modifyitems(items, config):
+    keywordexpr = config.option.keyword
+    if not keywordexpr:
+        return
+    selectuntil = False
+    if keywordexpr[-1] == ":":
+        selectuntil = True
+        keywordexpr = keywordexpr[:-1]
+
+    remaining = []
+    deselected = []
+    for colitem in items:
+        if keywordexpr and skipbykeyword(colitem, keywordexpr):
+            deselected.append(colitem)
+        else:
+            remaining.append(colitem)
+            if selectuntil:
+                keywordexpr = None
+
+    if deselected:
+        config.hook.pytest_deselected(items=deselected)
+        items[:] = remaining
+
+def skipbykeyword(colitem, keywordexpr):
+    """ return True if they given keyword expression means to
+        skip this collector/item.
+    """
+    if not keywordexpr:
+        return
+    chain = colitem.listchain()
+    for key in filter(None, keywordexpr.split()):
+        eor = key[:1] == '-'
+        if eor:
+            key = key[1:]
+        if not (eor ^ matchonekeyword(key, chain)):
+            return True
+
+def matchonekeyword(key, chain):
+    elems = key.split(".")
+    # XXX O(n^2), anyone cares?
+    chain = [item.keywords for item in chain if item.keywords]
+    for start, _ in enumerate(chain):
+        if start + len(elems) > len(chain):
+            return False
+        for num, elem in enumerate(elems):
+            for keyword in chain[num + start]:
+                ok = False
+                if elem in keyword:
+                    ok = True
+                    break
+            if not ok:
+                break
+        if num == len(elems) - 1 and ok:
+            return True
+    return False

py/_plugin/pytest_monkeypatch.py

 .. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
 """
 
-import py, os, sys
+import os, sys
 
 def pytest_funcarg__monkeypatch(request):
     """The returned ``monkeypatch`` funcarg provides these

py/_plugin/pytest_pytester.py

 import re
 import inspect
 import time
+from fnmatch import fnmatch
 from py._test.config import Config as pytestConfig
 from py.builtin import print_
 
 def pytest_addoption(parser):
     group = parser.getgroup("pylib")
-    group.addoption('--tools-on-path',
-           action="store_true", dest="toolsonpath", default=False,
+    group.addoption('--no-tools-on-path',
+           action="store_true", dest="notoolsonpath", default=False,
            help=("discover tools on PATH instead of going through py.cmdline.")
     )
 
     def __repr__(self):
         return "<TmpTestdir %r>" % (self.tmpdir,)
 
-    def Config(self, topdir=None):
-        if topdir is None:
-            topdir = self.tmpdir.dirpath()
-        return pytestConfig(topdir=topdir)