Commits

Ned Batchelder  committed 744a7e9

Initial coverage.py 3.0 beta 1

  • Participants

Comments (0)

Files changed (28)

+# lint Python modules using external checkers.
+# 
+# This is the main checker controling the other ones and the reports
+# generation. It is itself both a raw checker and an astng checker in order
+# to:
+# * handle message activation / deactivation at the module level
+# * handle some basic but necessary stats'data (number of classes, methods...)
+# 
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add <file or directory> to the black list. It should be a base name, not a
+# path. You may set this option multiple times.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Set the cache size for astng objects.
+cache-size=500
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+
+[MESSAGES CONTROL]
+
+# Enable only checker(s) with the given id(s). This option conflicts with the
+# disable-checker option
+#enable-checker=
+
+# Enable all checker(s) except those with the given id(s). This option
+# conflicts with the enable-checker option
+#disable-checker=
+
+# Enable all messages in the listed categories.
+#enable-msg-cat=
+
+# Disable all messages in the listed categories.
+#disable-msg-cat=
+
+# Enable the message(s) with the given id(s).
+#enable-msg=
+
+# Disable the message(s) with the given id(s).
+# Messages that are just silly:
+#   I0011:106: Locally disabling E1101
+#   W0603: 28:call_singleton_method: Using the global statement
+#   W0142: 31:call_singleton_method: Used * or ** magic
+#   C0323:311:coverage.report: Operator not followed by a space
+# Messages that may be silly:
+#   R0201: 42:Tracer.stop: Method could be a function
+# Messages that are noisy for now, eventually maybe we'll turn them on:
+#   C0111:169:coverage.analyze_morf: Missing docstring
+#   C0103:256:coverage.morf_filename: Invalid name "f" (should match [a-z_][a-z0-9_]{2,30}$)
+disable-msg=I0011,W0603,W0142,C0323, R0201, C0111,C0103
+
+[REPORTS]
+
+# set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html
+output-format=text
+
+# Include message's id in output
+include-ids=yes
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells wether to display a full report or only the messages
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note).You have access to the variables errors warning, statement which
+# respectivly contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (R0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (R0004).
+comment=no
+
+# Enable the report(s) with the given id(s).
+#enable-report=
+
+# Disable the report(s) with the given id(s).
+#disable-report=
+
+
+# checks for :
+# * doc strings
+# * modules / classes / functions / methods / arguments / variables name
+# * number of arguments, local variables, branchs, returns and statements in
+# functions, methods
+# * required module attributes
+# * dangerous default values as arguments
+# * redefinition of function / method / class
+# * uses of the global statement
+# 
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# Regular expression which should only match functions or classes name which do
+# not require a docstring
+no-docstring-rgx=__.*__
+
+# Regular expression which should only match correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression which should only match correct module level names
+const-rgx=(([A-Z_][A-Z1-9_]*)|(__.*__))$
+
+# Regular expression which should only match correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression which should only match correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct instance attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct list comprehension /
+# generator expression variable names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=
+
+
+# try to find bugs in the code using type inference
+# 
+[TYPECHECK]
+
+# Tells wether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamicaly set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, consider the acquired-members option to ignore
+# access to some undefined attributes.
+zope=no
+
+# List of members which are usually get through zope's acquisition mecanism and
+# so shouldn't trigger E0201 when accessed (need zope=yes to be considered).
+acquired-members=REQUEST,acl_users,aq_parent
+
+
+# checks for
+# * unused variables / imports
+# * undefined variables
+# * redefinition of variable from builtins or from an outer scope
+# * use of variable before assigment
+# 
+[VARIABLES]
+
+# Tells wether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=_|dummy|unused|.*_unused
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+
+# checks for :
+# * methods without self as first argument
+# * overridden methods signature
+# * access only to existant members via self
+# * attributes not defined in the __init__ method
+# * supported interfaces implementation
+# * unreachable code
+# 
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+
+# checks for sign of poor/misdesign:
+# * number of methods, attributes, local variables...
+# * size, complexity of functions, methods
+# 
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=15
+
+# Maximum number of locals for function / method body
+max-locals=50
+
+# Maximum number of return / yield for function / method body
+max-returns=20
+
+# Maximum number of branch for function / method body
+max-branchs=50
+
+# Maximum number of statements in function / method body
+max-statements=150
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=40
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=1
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=500
+
+
+# checks for
+# * external modules dependencies
+# * relative / wildcard imports
+# * cyclic imports
+# * uses of deprecated modules
+# 
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report R0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report R0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report R0402 must
+# not be disabled)
+int-import-graph=
+
+
+# checks for :
+# * unauthorized constructions
+# * strict indentation
+# * line length
+# * use of <> instead of !=
+# 
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=80
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string='    '
+
+
+# checks for:
+# * warning notes in the code like FIXME, XXX
+# * PEP 263: source code with non ascii character but no encoding declaration
+# 
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+# checks for similarities and duplicated code. This computation may be
+# memory / CPU intensive, so you should disable it if you experiments some
+# problems.
+# 
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+------------------------------
+CHANGE HISTORY for coverage.py
+------------------------------
+
+Version 3.0b1
+-------------
+
+Major overhaul.
+
+- Coverage.py is now a package rather than a module, so the name is a bit of a
+  misnomer, since there is no longer a file named coverage.py.  Functionality
+  has been split into classes.
+
+- The trace function is implemented in C for speed.
+
+- Executable lines are identified by reading the line number tables in the
+  compiled code, removing a great deal of complicated analysis code.
+  
+- The singleton coverage object is only created if the module-level functions
+  are used.  This maintains the old interface while allowing better
+  programmatic use of coverage.py.
+  
+- Minimum supported Python version is 2.3.
+
+
+Version 2.85, 14 September 2008
+-------------------------------
+
+- Add support for finding source files in eggs. Don't check for
+  morf's being instances of ModuleType, instead use duck typing so that
+  pseudo-modules can participate. Thanks, Imri Goldberg.
+  
+- Use os.realpath as part of the fixing of filenames so that symlinks won't
+  confuse things. Thanks, Patrick Mezard.
+
+
+Version 2.80, 25 May 2008
+-------------------------
+
+- Open files in rU mode to avoid line ending craziness. Thanks, Edward Loper.
+
+
+Version 2.78, 30 September 2007
+-------------------------------
+
+- Don't try to predict whether a file is Python source based on the extension.
+  Extensionless files are often Pythons scripts. Instead, simply parse the file
+  and catch the syntax errors. Hat tip to Ben Finney.
+
+
+Version 2.77, 29 July 2007
+--------------------------
+
+- Better packaging.
+
+
+Version 2.76, 23 July 2007
+--------------------------
+
+- Now Python 2.5 is *really* fully supported: the body of the new with
+  statement is counted as executable.
+
+
+Version 2.75, 22 July 2007
+--------------------------
+
+- Python 2.5 now fully supported. The method of dealing with multi-line
+  statements is now less sensitive to the exact line that Python reports during
+  execution. Pass statements are handled specially so that their disappearance
+  during execution won't throw off the measurement.
+
+
+Version 2.7, 21 July 2007
+-------------------------
+
+- "#pragma: nocover" is excluded by default.
+
+- Properly ignore docstrings and other constant expressions that appear in the
+  middle of a function, a problem reported by Tim Leslie.
+
+- coverage.erase() shouldn't clobber the exclude regex. Change how parallel
+  mode is invoked, and fix erase() so that it erases the cache when called
+  programmatically.
+
+- In reports, ignore code executed from strings, since we can't do anything
+  useful with it anyway.
+  
+- Better file handling on Linux, thanks Guillaume Chazarain.
+
+- Better shell support on Windows, thanks Noel O'Boyle.
+
+- Python 2.2 support maintained, thanks Catherine Proulx.
+
+- Minor changes to avoid lint warnings.
+
+
+Version 2.6, 23 August 2006
+---------------------------
+
+- Applied Joseph Tate's patch for function decorators.
+
+- Applied Sigve Tjora and Mark van der Wal's fixes for argument handling.
+
+- Applied Geoff Bache's parallel mode patch.
+
+- Refactorings to improve testability. Fixes to command-line logic for parallel
+  mode and collect.
+
+
+Version 2.5, 4 December 2005
+----------------------------
+
+- Call threading.settrace so that all threads are measured. Thanks Martin
+  Fuzzey.
+  
+- Add a file argument to report so that reports can be captured to a different
+  destination.
+
+- coverage.py can now measure itself.
+
+- Adapted Greg Rogers' patch for using relative filenames, and sorting and
+  omitting files to report on.
+
+
+Version 2.2, 31 December 2004
+-----------------------------
+
+- Allow for keyword arguments in the module global functions. Thanks, Allen.
+
+
+Version 2.1, 14 December 2004
+-----------------------------
+
+- Return 'analysis' to its original behavior and add 'analysis2'. Add a global
+  for 'annotate', and factor it, adding 'annotate_file'.
+
+
+Version 2.0, 12 December 2004
+-----------------------------
+
+Significant code changes.
+
+- Finding executable statements has been rewritten so that docstrings and
+  other quirks of Python execution aren't mistakenly identified as missing
+  lines.
+
+- Lines can be excluded from consideration, even entire suites of lines.
+
+- The filesystem cache of covered lines can be disabled programmatically.
+
+- Modernized the code.
+
+
+Earlier History
+---------------
+
+2001-12-04 GDR Created.
+
+2001-12-06 GDR Added command-line interface and source code annotation.
+
+2001-12-09 GDR Moved design and interface to separate documents.
+
+2001-12-10 GDR Open cache file as binary on Windows. Allow simultaneous -e and
+-x, or -a and -r.
+
+2001-12-12 GDR Added command-line help. Cache analysis so that it only needs to
+be done once when you specify -a and -r.
+
+2001-12-13 GDR Improved speed while recording. Portable between Python 1.5.2
+and 2.1.1.
+
+2002-01-03 GDR Module-level functions work correctly.
+
+2002-01-07 GDR Update sys.path when running a file with the -x option, so that
+it matches the value the program would get if it were run on its own.
+# For some reason, building the egg includes everything!
+exclude *.* *
+include coverage.egg-info/*.*
+include coverage/*.py
+include ez_setup.py
+include setup.py
+include README.txt
+# Makefile for utility work on coverage.py
+
+default:
+	@echo "* No default action *"
+
+TEST_ZIP = test/zipmods.zip
+
+clean:
+	-rm -rf build
+	-rm -rf dist
+	-rm -rf coverage.egg-info
+	-rm -f *.pyd */*.pyd 
+	-rm -f *.pyc */*.pyc */*/*.pyc */*/*/*.pyc
+	-rm -f *.pyo */*.pyo */*/*.pyo */*/*/*.pyo
+	-rm -f *.bak */*.bak */*/*.bak */*/*/*.bak
+	-rm -f MANIFEST
+	-rm -f .coverage .coverage.*
+	-rm -f $(TEST_ZIP)
+
+lint: clean
+	python -x /Python25/Scripts/pylint.bat --rcfile=.pylintrc coverage
+	python /Python25/Lib/tabnanny.py coverage
+	python checkeol.py
+
+tests: $(TEST_ZIP)
+	python test_coverage.py
+
+$(TEST_ZIP): test/covmodzip1.py
+	zip -j $@ $+
+
+coverage:
+	python coverage_coverage.py
+
+WEBHOME = c:/ned/web/stellated/pages/code/modules
+
+publish: kit
+	cp coverage.py $(WEBHOME)
+	cp test_coverage.py $(WEBHOME)
+	cp coverage_coverage.py $(WEBHOME)
+	cp doc/coverage.px $(WEBHOME)
+	cp dist/coverage*.tar.gz $(WEBHOME)
+
+kit:
+	python setup.py sdist --formats=gztar
+	python setup.py bdist_wininst
+
+pypi:
+	python setup.py register
+
+install:
+	python setup.py install
+
+devinst:
+	python setup.py develop
+
+uninstall:
+	-rm -rf $(PYHOME)/lib/site-packages/coverage*
+	-rm -rf $(PYHOME)/scripts/coverage*
+Coverage: code coverage testing for Python
+
+Coverage.py is a Python module that measures code coverage during test execution.
+It uses the code analysis tools and tracing hooks provided in the Python standard
+library to determine which lines are executable, and which have been executed.
+
+For more information, see http://nedbatchelder.com/code/modules/coverage.html
+Coverage TODO
+
+* v3.0 beta
+
+- Windows kit.
+    - Why doesn't setup.py install work? procmon to the rescue?
+- Try installation on Ubuntu.
+- Proper project layout.
+- Code moved to Google code.
+- Investigate package over module installation.
+
+
+* BUGS
+
++ Threading is broken: C and Python trace fns called differently?
+
+
+* Speed
+
++ C extension collector
+- Ignore certain modules
+- Tricky swapping of collector like figleaf, pycov, et al.
+- Seems like there should be a faster way to manage all the line number sets in
+    CodeAnalyzer.raw_analyze.
+- If tracing, canonical_filename_cache overlaps with should_trace_cache.  Skip
+    canonical_filename_cache. Maybe it isn't even worth it...
+
+* Accuracy
+
+- Record magic number of module to ensure code hasn't changed
+- Record version of coverage data file, so we can update what's stored there.
+- Record options in coverage data file, so multiple runs are certain to make
+    sense together.
+- Do I still need the lines in annotate_file that deal specially with "else"?
+
+* Power
+
+- API for getting coverage data.
+- Instruction tracing instead of line tracing.
+- Path tracing (how does this even work?)
+- Branch coverage
+- Count execution of lines
+- Track callers of functions (ala std module trace)
+- Method/Class/Module coverage reporting.
+
+* Convenience
+
+- Why can't you specify execute (-x) and report (-r) in the same invocation?
+    Maybe just because -x needs the rest of the command line?
+- How will coverage.py package install over coverage.py module?
+- Support 2.3 - 3.0?
+    http://pythonology.blogspot.com/2009/02/making-code-run-on-python-20-through-30.html
+
+* Beauty
+
+- HTML report
+- Syntax coloring in HTML report
+- Dynamic effects in HTML report
+- Footer in reports pointing to coverage home page.
+
+* Community
+
+- New docs, rather than pointing to Gareth's
+    - Min version is 2.3.
+    - Distinction between ignore (files not to trace), exclude (lines not to trace),
+        and omit (files not to report)
+    - Changes from coverage 2.x:
+        - Bare "except:" lines now count as executable code.
+        - Double function decorators: all decorator lines count as executable code.
++ Be sure --help text is complete (-i is missing).
+- Host the project somewhere with a real bug tracker, google code I guess.
+- Point discussion to TIP
+- PEP 8 compliance?
+
+* Modernization
+
++ Decide on minimum supported version
+    + 2.3
+    + Get rid of the basestring protection
+    + Use enumerate
+    + Use sets instead of dicts
+- Get rid of the recursive nonsense.
+- Docstrings.
+- Remove huge document-style comments.
++ Remove singleton
+    + Initialization of instance variables in the class.
+- Better names:
+    + self.cache -> self.cache_filename -> CoverageData.filename
+    + self.usecache -> CoverageData.use_file
+- More classes:
+    - Module munging
+    + Coverage data files
+- Why are some imports at the top of the file, and some in functions?
++ Get rid of sys.exitfunc use.
++ True and False (with no backward adaptation: the constants are new in 2.2.1)
++ Get rid of compiler module
+    + In analyzing code
+    + In test_coverage.py
+- Style:
+    + lineno
+    + filename
+
+* Correctness
+
+- What does -p (parallel mode) mean with -e (erase data)?
+
+* Tests
+
+- Tests about the .coverage file.
+- Tests about the --long-form of arguments.
+- Tests about overriding the .coverage filename.
+- Tests about parallel mode.
++ Tests about assigning a multi-line string.
+- Tests about tricky docstrings.
+- Coverage test coverage.py!
+- Tests that tracing stops after calling stop()
+- More intensive thread testing.
+call \ned\bin\switchpy 23
+python setup.py sdist --formats=gztar
+python setup.py bdist_wininst
+call \ned\bin\switchpy 24
+python setup.py sdist --formats=gztar
+python setup.py bdist_wininst
+call \ned\bin\switchpy 25
+python setup.py sdist --formats=gztar
+python setup.py bdist_wininst
+call \ned\bin\switchpy 26
+python setup.py sdist --formats=gztar
+python setup.py bdist_wininst

File alltests.cmd

+call \ned\bin\switchpy 23
+python setup.py develop
+python test_coverage.py
+call \ned\bin\switchpy 24
+python setup.py develop
+python test_coverage.py
+call \ned\bin\switchpy 25
+python setup.py develop
+python test_coverage.py
+call \ned\bin\switchpy 26
+python setup.py develop
+python test_coverage.py
+# Check files for incorrect newlines
+
+import fnmatch, os
+
+def check_file(fname):
+    for n, line in enumerate(open(fname, "rb")):
+        if "\r" in line:
+            print "%s@%d: CR found" % (fname, n)
+            return
+
+def check_files(root, patterns):
+    for root, dirs, files in os.walk(root):
+        for f in files:
+            fname = os.path.join(root, f)
+            for p in patterns:
+                if fnmatch.fnmatch(fname, p):
+                    check_file(fname)
+                    break
+        if '.svn' in dirs:
+            dirs.remove('.svn')
+
+check_files("coverage", ["*.py"])
+check_files("test", ["*.py"])
+check_file("setup.py")

File coverage/__init__.py

+"""Code coverage measurement for Python.
+
+Ned Batchelder
+http://nedbatchelder.com/code/modules/coverage.html
+
+"""
+
+__version__ = "3.0b1"    # see detailed history in CHANGES
+
+import sys
+
+from coverage.control import coverage
+from coverage.data import CoverageData
+from coverage.cmdline import main, CoverageScript
+from coverage.misc import CoverageException
+
+
+# Module-level functions.  The original API to this module was based on
+# functions defined directly in the module, with a singleton of the coverage()
+# class.  This design hampered programmability.  Here we define the top-level
+# functions to create the singleton when they are first called.
+
+# Singleton object for use with module-level functions.  The singleton is
+# created as needed when one of the module-level functions is called.
+the_coverage = None
+
+def call_singleton_method(name, args, kwargs):
+    global the_coverage
+    if not the_coverage:
+        the_coverage = coverage()
+    return getattr(the_coverage, name)(*args, **kwargs)
+
+mod_funcs = """
+    use_cache start stop erase begin_recursive end_recursive exclude
+    analysis analysis2 report annotate annotate_file
+    """
+
+coverage_module = sys.modules[__name__]
+
+for func_name in mod_funcs.split():
+    # Have to define a function here to make a closure so the function name
+    # is locked in.
+    def func(name):
+        return lambda *a, **kw: call_singleton_method(name, a, kw)
+    setattr(coverage_module, func_name, func(func_name))
+
+
+# COPYRIGHT AND LICENSE
+#
+# Copyright 2001 Gareth Rees.  All rights reserved.
+# Copyright 2004-2009 Ned Batchelder.  All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the
+#    distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.

File coverage/analyzer.py

+"""Code analysis for coverage.py"""
+
+import re, token, tokenize, types
+import cStringIO as StringIO
+
+from coverage.misc import nice_pair, CoverageException
+
+
+# Python version compatibility
+try:
+    set()       # new in 2.4
+except NameError:
+    import sets
+    set = sets.Set      # pylint: disable-msg=W0622
+    
+
+class CodeAnalyzer:
+    """Analyze code to find executable lines, excluded lines, etc."""
+    
+    def __init__(self, show_tokens=False):
+        self.show_tokens = show_tokens
+
+        # The text lines of the analyzed code.
+        self.lines = None
+
+        # The line numbers of excluded lines of code.
+        self.excluded = set()
+        
+        # The line numbers of docstring lines.
+        self.docstrings = set()
+        
+        # A dict mapping line numbers to (lo,hi) for multi-line statements.
+        self.multiline = {}
+        
+        # The line numbers that start statements.
+        self.statement_starts = set()
+
+    def find_statement_starts(self, code):
+        """Find the starts of statements in compiled code.
+    
+        Uses co_lnotab described in Python/compile.c to find line numbers that
+        start statements, adding them to `self.statement_starts`.
+    
+        """
+        # Adapted from dis.py in the standard library.
+        byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
+        line_increments = [ord(c) for c in code.co_lnotab[1::2]]
+    
+        last_line_num = None
+        line_num = code.co_firstlineno
+        for byte_incr, line_incr in zip(byte_increments, line_increments):
+            if byte_incr:
+                if line_num != last_line_num:
+                    self.statement_starts.add(line_num)
+                    last_line_num = line_num
+            line_num += line_incr
+        if line_num != last_line_num:
+            self.statement_starts.add(line_num)
+
+    def find_statements(self, code):
+        """Find the statements in `code`.
+        
+        Update `self.statement_starts`, a set of line numbers that start
+        statements.  Recurses into all code objects reachable from `code`.
+        
+        """
+        # Adapted from trace.py in the standard library.
+
+        # Get all of the lineno information from this code.
+        self.find_statement_starts(code)
+    
+        # Check the constants for references to other code objects.
+        for c in code.co_consts:
+            if isinstance(c, types.CodeType):
+                # Found another code object, so recurse into it.
+                self.find_statements(c)
+
+    def raw_analyze(self, text=None, filename=None, exclude=None):
+        """Analyze `text` to find the interesting facts about its lines.
+        
+        A handful of member fields are updated.
+        
+        """
+        if not text:
+            sourcef = open(filename, 'rU')
+            text = sourcef.read()
+            sourcef.close()
+        text = text.replace('\r\n', '\n')
+        self.lines = text.split('\n')
+
+        # Find lines which match an exclusion pattern.
+        if exclude:
+            re_exclude = re.compile(exclude)
+            for i, ltext in enumerate(self.lines):
+                if re_exclude.search(ltext):
+                    self.excluded.add(i+1)
+    
+        # Tokenize, to find excluded suites, to find docstrings, and to find
+        # multi-line statements.
+        indent = 0
+        exclude_indent = 0
+        excluding = False
+        prev_toktype = token.INDENT
+        first_line = None
+
+        tokgen = tokenize.generate_tokens(StringIO.StringIO(text).readline)
+        for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
+            if self.show_tokens:
+                print "%10s %5s %-20r %r" % (
+                    tokenize.tok_name.get(toktype, toktype),
+                    nice_pair((slineno, elineno)), ttext, ltext
+                    )
+            if toktype == token.INDENT:
+                indent += 1
+            elif toktype == token.DEDENT:
+                indent -= 1
+            elif toktype == token.OP and ttext == ':':
+                if not excluding and elineno in self.excluded:
+                    # Start excluding a suite.  We trigger off of the colon
+                    # token so that the #pragma comment will be recognized on
+                    # the same line as the colon.
+                    exclude_indent = indent
+                    excluding = True
+            elif toktype == token.STRING and prev_toktype == token.INDENT:
+                # Strings that are first on an indented line are docstrings.
+                # (a trick from trace.py in the stdlib.)
+                for i in xrange(slineno, elineno+1):
+                    self.docstrings.add(i)
+            elif toktype == token.NEWLINE:
+                if first_line is not None and elineno != first_line:
+                    # We're at the end of a line, and we've ended on a
+                    # different line than the first line of the statement,
+                    # so record a multi-line range.
+                    rng = (first_line, elineno)
+                    for l in xrange(first_line, elineno+1):
+                        self.multiline[l] = rng
+                first_line = None
+                
+            if ttext.strip() and toktype != tokenize.COMMENT:
+                # A non-whitespace token.
+                if first_line is None:
+                    # The token is not whitespace, and is the first in a
+                    # statement.
+                    first_line = slineno
+                    # Check whether to end an excluded suite.
+                    if excluding and indent <= exclude_indent:
+                        excluding = False
+                    if excluding:
+                        self.excluded.add(elineno)
+                        
+            prev_toktype = toktype
+
+        # Find the starts of the executable statements.
+        filename = filename or "<code>"
+        try:
+            # Python 2.3 and 2.4 don't like partial last lines, so be sure the
+            # text ends nicely for them.
+            text += '\n'
+            code = compile(text, filename, "exec")
+        except SyntaxError, synerr:
+            raise CoverageException(
+                "Couldn't parse '%s' as Python source: '%s' at line %d" %
+                    (filename, synerr.msg, synerr.lineno)
+                )
+
+        self.find_statements(code)
+
+    def map_to_first_line(self, lines, ignore=None):
+        """Map the line numbers in `lines` to the correct first line of the
+        statement.
+        
+        Skip any line mentioned in `ignore`.
+        
+        Returns a sorted list of the first lines.
+        
+        """
+        ignore = ignore or []
+        lset = set()
+        for l in lines:
+            if l in ignore:
+                continue
+            rng = self.multiline.get(l)
+            if rng:
+                new_l = rng[0]
+            else:
+                new_l = l
+            if new_l not in ignore:
+                lset.add(new_l)
+        lines = list(lset)
+        lines.sort()
+        return lines
+    
+    def analyze_source(self, text=None, filename=None, exclude=None):
+        """Analyze source text to find executable lines, excluded lines, etc.
+        
+        Source can be provided as `text`, the text itself, or `filename`, from
+        which text will be read.  Excluded lines are those that match `exclude`,
+        a regex.
+        
+        Return values are 1) a sorted list of executable line numbers,
+        2) a sorted list of excluded line numbers, and 3) a dict mapping line
+        numbers to pairs (lo,hi) for multi-line statements.
+        
+        """
+        self.raw_analyze(text, filename, exclude)
+        
+        excluded_lines = self.map_to_first_line(self.excluded)
+        ignore = excluded_lines + list(self.docstrings)
+        lines = self.map_to_first_line(self.statement_starts, ignore)
+    
+        return lines, excluded_lines, self.multiline
+
+    def print_analysis(self):
+        """Print the results of the analysis."""
+        for i, ltext in enumerate(self.lines):
+            lineno = i+1
+            m0 = m1 = m2 = ' '
+            if lineno in self.statement_starts:
+                m0 = '-'
+            if lineno in self.docstrings:
+                m1 = '"'
+            if lineno in self.excluded:
+                m2 = 'x'
+            print "%4d %s%s%s %s" % (lineno, m0, m1, m2, ltext)
+
+
+if __name__ == '__main__':
+    import sys
+    
+    analyzer = CodeAnalyzer(show_tokens=True)
+    analyzer.raw_analyze(filename=sys.argv[1], exclude=r"no\s*cover")
+    analyzer.print_analysis()

File coverage/cmdline.py

+"""Command-line support for coverage.py"""
+
+import getopt, os, sys
+
+USAGE = r"""
+Coverage version %(__version__)s
+
+Usage:
+
+coverage -x [-p] MODULE.py [ARG1 ARG2 ...]
+    Execute module, passing the given command-line arguments, collecting
+    coverage data. With the -p option, write to a temporary file containing
+    the machine name and process ID.
+
+coverage -e
+    Erase collected coverage data.
+
+coverage -c
+    Combine data from multiple coverage files (as created by -p option above)
+    and store it into a single file representing the union of the coverage.
+
+coverage -r [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...]
+    Report on the statement coverage for the given files.  With the -m
+    option, show line numbers of the statements that weren't executed.
+
+coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...]
+    Make annotated copies of the given files, marking statements that
+    are executed with > and statements that are missed with !.  With
+    the -d option, make the copies in that directory.  Without the -d
+    option, make each copy in the same directory as the original.
+
+-h  Print this help.
+
+-i  Ignore errors while reporting or annotating.
+
+-o DIR,...
+    Omit reporting or annotating files when their filename path starts with
+    a directory listed in the omit list.
+    e.g. coverage -i -r -o c:\python25,lib\enthought\traits
+
+Coverage data is saved in the file .coverage by default.  Set the
+COVERAGE_FILE environment variable to save it somewhere else.
+""".strip()
+
+class CoverageScript:
+    def __init__(self):
+        import coverage
+        self.covpkg = coverage
+        self.coverage = coverage.coverage()
+
+    def help(self, error=None):     #pragma: no cover
+        if error:
+            print error
+            print
+        print USAGE % self.covpkg.__dict__
+        sys.exit(1)
+
+    def command_line(self, argv, help_fn=None):
+        # Collect the command-line options.
+        help_fn = help_fn or self.help
+        settings = {}
+        optmap = {
+            '-a': 'annotate',
+            '-c': 'combine',
+            '-d:': 'directory=',
+            '-e': 'erase',
+            '-h': 'help',
+            '-i': 'ignore-errors',
+            '-m': 'show-missing',
+            '-p': 'parallel-mode',
+            '-r': 'report',
+            '-x': 'execute',
+            '-o:': 'omit=',
+            }
+        short_opts = ''.join(map(lambda o: o[1:], optmap.keys()))
+        long_opts = optmap.values()
+        options, args = getopt.getopt(argv, short_opts, long_opts)
+        for o, a in options:
+            if optmap.has_key(o):
+                settings[optmap[o]] = True
+            elif optmap.has_key(o + ':'):
+                settings[optmap[o + ':']] = a
+            elif o[2:] in long_opts:
+                settings[o[2:]] = True
+            elif o[2:] + '=' in long_opts:
+                settings[o[2:]+'='] = a
+
+        if settings.get('help'):
+            help_fn()
+
+        # Check for conflicts and problems in the options.
+        for i in ['erase', 'execute']:
+            for j in ['annotate', 'report', 'combine']:
+                if settings.get(i) and settings.get(j):
+                    help_fn("You can't specify the '%s' and '%s' "
+                              "options at the same time." % (i, j))
+
+        args_needed = (settings.get('execute')
+                       or settings.get('annotate')
+                       or settings.get('report'))
+        action = (settings.get('erase') 
+                  or settings.get('combine')
+                  or args_needed)
+        if not action:
+            help_fn("You must specify at least one of -e, -x, -c, -r, or -a.")
+        if not args_needed and args:
+            help_fn("Unexpected arguments: %s" % " ".join(args))
+        
+        # Do something.
+        self.coverage.parallel_mode = settings.get('parallel-mode')
+        self.coverage.get_ready()
+
+        if settings.get('erase'):
+            self.coverage.erase()
+        if settings.get('execute'):
+            if not args:
+                help_fn("Nothing to do.")
+            sys.argv = args
+            self.coverage.start()
+            import __main__
+            sys.path[0] = os.path.dirname(sys.argv[0])
+            execfile(sys.argv[0], __main__.__dict__)
+        if settings.get('combine'):
+            self.coverage.combine()
+        if not args:
+            # For report and annotate, if no files are given on the command
+            # line, then report or annotate everything that was executed.
+            args = self.coverage.data.executed.keys()  # TODO: Yikes!
+
+        ignore_errors = settings.get('ignore-errors')
+        show_missing = settings.get('show-missing')
+        directory = settings.get('directory=')
+
+        omit = settings.get('omit=')
+        if omit is not None:
+            omit = [self.coverage.abs_file(p) for p in omit.split(',')]
+        else:
+            omit = []
+        
+        if settings.get('report'):
+            self.coverage.report(args, show_missing, ignore_errors, omit_prefixes=omit)
+        if settings.get('annotate'):
+            self.coverage.annotate(args, directory, ignore_errors, omit_prefixes=omit)
+
+    
+# Main entrypoint.  This is installed as the script entrypoint, so don't
+# refactor it away...
+def main():
+    CoverageScript().command_line(sys.argv[1:])

File coverage/collector.py

+"""Raw data collector for coverage.py."""
+
+import sys, threading
+
+try:
+    # Use the C extension code when we can, for speed.
+    from coverage.tracer import Tracer
+except ImportError:
+    # If we don't have the C tracer, use this Python one.
+    class Tracer:
+        """Python implementation of the raw data tracer."""
+        def __init__(self):
+            self.cur_filename = None
+            self.filename_stack = []
+            
+        def _global_trace(self, frame, event, arg_unused):
+            """The trace function passed to sys.settrace."""
+            if event == 'call':
+                filename = frame.f_code.co_filename
+                tracename = self.should_trace_cache.get(filename)
+                if tracename is None:
+                    tracename = self.should_trace(filename)
+                    self.should_trace_cache[filename] = tracename
+                if tracename:
+                    self.filename_stack.append(self.cur_filename)
+                    self.cur_filename = tracename
+                    return self._local_trace
+                else:
+                    return None
+            return self.trace
+
+        def _local_trace(self, frame, event, arg_unused):
+            if event == 'line':
+                self.data[(self.cur_filename, frame.f_lineno)] = True
+            elif event == 'return':
+                self.cur_filename = self.filename_stack.pop()
+            return self._local_trace
+
+        def start(self):
+            sys.settrace(self._global_trace)
+
+        def stop(self):
+            sys.settrace(None)
+
+
+class Collector:
+    """Collects trace data.
+
+    Creates a Tracer object for each thread, since they track stack information.
+    Each Tracer points to the same shared data, contributing traced data points.
+    
+    """
+    
+    def __init__(self, should_trace):
+        """Create a collector.
+        
+        `should_trace` is a function, taking a filename, and returns a
+        canonicalized filename, or False depending on whether the file should be
+        traced or not.
+        
+        """
+        self.should_trace = should_trace
+        self.reset()
+
+    def reset(self):
+        # A dictionary with an entry for (Python source file name, line number
+        # in that file) if that line has been executed.
+        self.data = {}
+        
+        # A cache of the decision about whether to trace execution in a file.
+        # A dict of filename to boolean.
+        self.should_trace_cache = {}
+
+    def _start_tracer(self):
+        tracer = Tracer()
+        tracer.data = self.data
+        tracer.should_trace = self.should_trace
+        tracer.should_trace_cache = self.should_trace_cache
+        tracer.start()
+        return tracer
+    
+    # The trace function has to be set individually on each thread before
+    # execution begins.  Ironically, the only support the threading module has
+    # for running code before the thread main is the tracing function.  So we
+    # install this as a trace function, and the first time it's called, it does
+    # the real trace installation.
+    
+    def _installation_trace(self, frame_unused, event_unused, arg_unused):
+        """Called on new threads, installs the real tracer."""
+        # Remove ourselves as the trace function
+        sys.settrace(None)
+        # Install the real tracer
+        self._start_tracer()
+        # Return None to reiterate that we shouldn't be used for tracing.
+        return None
+
+    def start(self):
+        # Install the tracer on this thread.
+        self.tracer = self._start_tracer()
+        # Install our installation tracer in threading, to jump start other
+        # threads.
+        threading.settrace(self._installation_trace)
+
+    def stop(self):
+        self.tracer.stop()
+        threading.settrace(None)
+
+    def data_points(self):
+        """Return the (filename, lineno) pairs collected."""
+        return self.data.keys()

File coverage/control.py

+"""Core control stuff for coverage.py"""
+
+import glob, os, re, sys, types
+
+from coverage.data import CoverageData
+from coverage.misc import nice_pair, CoverageException
+
+
+class coverage:
+    def __init__(self):
+        from coverage.collector import Collector
+        
+        self.parallel_mode = False
+        self.exclude_re = ''
+        self.nesting = 0
+        self.cstack = []
+        self.xstack = []
+        self.relative_dir = self.abs_file(os.curdir)+os.sep
+        
+        self.collector = Collector(self.should_trace)
+        
+        self.data = CoverageData()
+    
+        # Cache of results of calling the analysis2() method, so that you can
+        # specify both -r and -a without doing double work.
+        self.analysis_cache = {}
+    
+        # Cache of results of calling the canonical_filename() method, to
+        # avoid duplicating work.
+        self.canonical_filename_cache = {}
+    
+        # The default exclude pattern.
+        self.exclude('# *pragma[: ]*[nN][oO] *[cC][oO][vV][eE][rR]')
+
+        # Save coverage data when Python exits.
+        import atexit
+        atexit.register(self.save)
+
+    def should_trace(self, filename):
+        """Decide whether to trace execution in `filename`
+        
+        Returns a canonicalized filename if it should be traced, False if it
+        should not.
+        """
+        if filename == '<string>':
+            # There's no point in ever tracing string executions, we can't do
+            # anything with the data later anyway.
+            return False
+        # TODO: flag: ignore std lib?
+        # TODO: ignore by module as well as file?
+        return self.canonical_filename(filename)
+
+    def use_cache(self, usecache, cache_file=None):
+        self.data.usefile(usecache, cache_file)
+        
+    def get_ready(self):
+        self.collector.reset()
+        self.data.read(parallel=self.parallel_mode)
+        self.analysis_cache = {}
+        
+    def start(self):
+        self.get_ready()
+        if self.nesting == 0:                               #pragma: no cover
+            self.collector.start()
+        self.nesting += 1
+        
+    def stop(self):
+        self.nesting -= 1
+        if self.nesting == 0:                               #pragma: no cover
+            self.collector.stop()
+
+    def erase(self):
+        self.get_ready()
+        self.collector.reset()
+        self.analysis_cache = {}
+        self.data.erase()
+
+    def exclude(self, regex):
+        if self.exclude_re:
+            self.exclude_re += "|"
+        self.exclude_re += "(" + regex + ")"
+
+    def begin_recursive(self):
+        #self.cstack.append(self.c)
+        self.xstack.append(self.exclude_re)
+        
+    def end_recursive(self):
+        #self.c = self.cstack.pop()
+        self.exclude_re = self.xstack.pop()
+
+    def save(self):
+        self.group_collected_data()
+        self.data.write()
+
+    def combine(self):
+        """Entry point for combining together parallel-mode coverage data."""
+        self.data.combine_parallel_data()
+
+    def get_zip_data(self, filename):
+        """ Get data from `filename` if it is a zip file path, or return None
+            if it is not.
+        """
+        import zipimport
+        markers = ['.zip'+os.sep, '.egg'+os.sep]
+        for marker in markers:
+            if marker in filename:
+                parts = filename.split(marker)
+                try:
+                    zi = zipimport.zipimporter(parts[0]+marker[:-1])
+                except zipimport.ZipImportError:
+                    continue
+                try:
+                    data = zi.get_data(parts[1])
+                except IOError:
+                    continue
+                return data
+        return None
+
+    def abs_file(self, filename):
+        """ Helper function to turn a filename into an absolute normalized
+            filename.
+        """
+        return os.path.normcase(os.path.abspath(os.path.realpath(filename)))
+
+    def relative_filename(self, filename):
+        """ Convert filename to relative filename from self.relative_dir.
+        """
+        return filename.replace(self.relative_dir, "")
+
+    def canonical_filename(self, filename):
+        """Return a canonical filename for `filename`.
+        
+        An absolute path with no redundant components and normalized case.
+        
+        """
+        if not self.canonical_filename_cache.has_key(filename):
+            f = filename
+            if os.path.isabs(f) and not os.path.exists(f):
+                if not self.get_zip_data(f):
+                    f = os.path.basename(f)
+            if not os.path.isabs(f):
+                for path in [os.curdir] + sys.path:
+                    g = os.path.join(path, f)
+                    if os.path.exists(g):
+                        f = g
+                        break
+            cf = self.abs_file(f)
+            self.canonical_filename_cache[filename] = cf
+        return self.canonical_filename_cache[filename]
+
+    def group_collected_data(self):
+        """Group the collected data by filename and reset the collector."""
+        self.data.add_raw_data(self.collector.data_points())
+        self.collector.reset()
+
+    # analyze_morf(morf).  Analyze the module or filename passed as
+    # the argument.  If the source code can't be found, raise an error.
+    # Otherwise, return a tuple of (1) the canonical filename of the
+    # source code for the module, (2) a list of lines of statements
+    # in the source code, (3) a list of lines of excluded statements,
+    # and (4), a map of line numbers to multi-line line number ranges, for
+    # statements that cross lines.
+
+    # The word "morf" means a module object (from which the source file can
+    # be deduced by suitable manipulation of the __file__ attribute) or a
+    # filename.
+    
+    def analyze_morf(self, morf):
+        from coverage.analyzer import CodeAnalyzer
+
+        if self.analysis_cache.has_key(morf):
+            return self.analysis_cache[morf]
+        orig_filename = filename = self.morf_filename(morf)
+        ext = os.path.splitext(filename)[1]
+        source = None
+        if ext == '.pyc':
+            filename = filename[:-1]
+            ext = '.py'
+        if ext == '.py':
+            if not os.path.exists(filename):
+                source = self.get_zip_data(filename)
+                if not source:
+                    raise CoverageException(
+                        "No source for code '%s'." % orig_filename
+                        )
+
+        analyzer = CodeAnalyzer()
+        lines, excluded_lines, line_map = analyzer.analyze_source(
+            text=source, filename=filename, exclude=self.exclude_re
+            )
+
+        result = filename, lines, excluded_lines, line_map
+        self.analysis_cache[morf] = result
+        return result
+
+    # format_lines(statements, lines).  Format a list of line numbers
+    # for printing by coalescing groups of lines as long as the lines
+    # represent consecutive statements.  This will coalesce even if
+    # there are gaps between statements, so if statements =
+    # [1,2,3,4,5,10,11,12,13,14] and lines = [1,2,5,10,11,13,14] then
+    # format_lines will return "1-2, 5-11, 13-14".
+
+    def format_lines(self, statements, lines):
+        pairs = []
+        i = 0
+        j = 0
+        start = None
+        pairs = []
+        while i < len(statements) and j < len(lines):
+            if statements[i] == lines[j]:
+                if start == None:
+                    start = lines[j]
+                end = lines[j]
+                j = j + 1
+            elif start:
+                pairs.append((start, end))
+                start = None
+            i = i + 1
+        if start:
+            pairs.append((start, end))
+        ret = ', '.join(map(nice_pair, pairs))
+        return ret
+
+    # Backward compatibility with version 1.
+    def analysis(self, morf):
+        f, s, _, m, mf = self.analysis2(morf)
+        return f, s, m, mf
+
+    def analysis2(self, morf):
+        filename, statements, excluded, line_map = self.analyze_morf(morf)
+        self.group_collected_data()
+        
+        # Identify missing statements.
+        missing = []
+        execed = self.data.executed_lines(filename)
+        for line in statements:
+            lines = line_map.get(line)
+            if lines:
+                for l in range(lines[0], lines[1]+1):
+                    if l in execed:
+                        break
+                else:
+                    missing.append(line)
+            else:
+                if line not in execed:
+                    missing.append(line)
+                    
+        return (filename, statements, excluded, missing,
+                self.format_lines(statements, missing))
+
+    # morf_filename(morf).  Return the filename for a module or file.
+
+    def morf_filename(self, morf):
+        if hasattr(morf, '__file__'):
+            f = morf.__file__
+        else:
+            f = morf
+        return self.canonical_filename(f)
+
+    def morf_name(self, morf):
+        """ Return the name of morf as used in report.
+        """
+        if hasattr(morf, '__name__'):
+            return morf.__name__
+        else:
+            return self.relative_filename(os.path.splitext(morf)[0])
+
+    def filter_by_prefix(self, morfs, omit_prefixes):
+        """ Return list of morfs where the morf name does not begin
+            with any one of the omit_prefixes.
+        """
+        filtered_morfs = []
+        for morf in morfs:
+            for prefix in omit_prefixes:
+                if self.morf_name(morf).startswith(prefix):
+                    break
+            else:
+                filtered_morfs.append(morf)
+
+        return filtered_morfs
+
+    def morf_name_compare(self, x, y):
+        return cmp(self.morf_name(x), self.morf_name(y))
+
+    def report(self, morfs, show_missing=True, ignore_errors=False, file=None, omit_prefixes=None):
+        if not isinstance(morfs, types.ListType):
+            morfs = [morfs]
+        # On windows, the shell doesn't expand wildcards.  Do it here.
+        globbed = []
+        for morf in morfs:
+            if isinstance(morf, basestring) and ('?' in morf or '*' in morf):
+                globbed.extend(glob.glob(morf))
+            else:
+                globbed.append(morf)
+        morfs = globbed
+
+        if omit_prefixes:
+            morfs = self.filter_by_prefix(morfs, omit_prefixes)
+        morfs.sort(self.morf_name_compare)
+
+        max_name = max(5, max(map(len, map(self.morf_name, morfs))))
+        fmt_name = "%%- %ds  " % max_name
+        fmt_err = fmt_name + "%s: %s"
+        header = fmt_name % "Name" + " Stmts   Exec  Cover"
+        fmt_coverage = fmt_name + "% 6d % 6d % 5d%%"
+        if show_missing:
+            header = header + "   Missing"
+            fmt_coverage = fmt_coverage + "   %s"
+        if not file:
+            file = sys.stdout
+        print >>file, header
+        print >>file, "-" * len(header)
+        total_statements = 0
+        total_executed = 0
+        for morf in morfs:
+            name = self.morf_name(morf)
+            try:
+                _, statements, _, missing, readable  = self.analysis2(morf)
+                n = len(statements)
+                m = n - len(missing)
+                if n > 0:
+                    pc = 100.0 * m / n
+                else:
+                    pc = 100.0
+                args = (name, n, m, pc)
+                if show_missing:
+                    args = args + (readable,)
+                print >>file, fmt_coverage % args
+                total_statements = total_statements + n
+                total_executed = total_executed + m
+            except KeyboardInterrupt:                       #pragma: no cover
+                raise
+            except:
+                if not ignore_errors:
+                    typ, msg = sys.exc_info()[:2]
+                    print >>file, fmt_err % (name, typ, msg)
+        if len(morfs) > 1:
+            print >>file, "-" * len(header)
+            if total_statements > 0:
+                pc = 100.0 * total_executed / total_statements
+            else:
+                pc = 100.0
+            args = ("TOTAL", total_statements, total_executed, pc)
+            if show_missing:
+                args = args + ("",)
+            print >>file, fmt_coverage % args
+
+    # annotate(morfs, ignore_errors).
+
+    blank_re = re.compile(r"\s*(#|$)")
+    else_re = re.compile(r"\s*else\s*:\s*(#|$)")
+
+    def annotate(self, morfs, directory=None, ignore_errors=False, omit_prefixes=None):
+        if omit_prefixes:
+            morfs = self.filter_by_prefix(morfs, omit_prefixes)
+        for morf in morfs:
+            try:
+                filename, statements, excluded, missing, _ = self.analysis2(morf)
+                self.annotate_file(filename, statements, excluded, missing, directory)
+            except KeyboardInterrupt:
+                raise
+            except:
+                if not ignore_errors:
+                    raise
+                
+    def annotate_file(self, filename, statements, excluded, missing, directory=None):
+        source = open(filename, 'r')
+        if directory:
+            dest_file = os.path.join(directory,
+                                     os.path.basename(filename)
+                                     + ',cover')
+        else:
+            dest_file = filename + ',cover'
+        dest = open(dest_file, 'w')
+        lineno = 0
+        i = 0
+        j = 0
+        covered = True
+        while True:
+            line = source.readline()
+            if line == '':
+                break
+            lineno = lineno + 1
+            while i < len(statements) and statements[i] < lineno:
+                i = i + 1
+            while j < len(missing) and missing[j] < lineno:
+                j = j + 1
+            if i < len(statements) and statements[i] == lineno:
+                covered = j >= len(missing) or missing[j] > lineno
+            if self.blank_re.match(line):
+                dest.write('  ')
+            elif self.else_re.match(line):
+                # Special logic for lines containing only 'else:'.  
+                if i >= len(statements) and j >= len(missing):
+                    dest.write('! ')
+                elif i >= len(statements) or j >= len(missing):
+                    dest.write('> ')
+                elif statements[i] == missing[j]:
+                    dest.write('! ')
+                else:
+                    dest.write('> ')
+            elif lineno in excluded:
+                dest.write('- ')
+            elif covered:
+                dest.write('> ')
+            else:
+                dest.write('! ')
+            dest.write(line)
+        source.close()
+        dest.close()

File coverage/data.py

+"""Coverage data for coverage.py"""
+
+import os, marshal, socket, types
+
+class CoverageData:
+    """Manages collected coverage data."""
+    # Name of the data file (unless environment variable is set).
+    filename_default = ".coverage"
+
+    # Environment variable naming the data file.
+    filename_env = "COVERAGE_FILE"
+
+    def __init__(self):
+        self.filename = None
+        self.use_file = True
+
+        # A map from canonical Python source file name to a dictionary in
+        # which there's an entry for each line number that has been
+        # executed:
+        #
+        #   {
+        #       'filename1.py': { 12: True, 47: True, ... },
+        #       ...
+        #       }
+        #
+        self.executed = {}
+        
+    def usefile(self, use_file=True, filename_default=None):
+        self.use_file = use_file
+        if filename_default and not self.filename:
+            self.filename_default = filename_default
+
+    def read(self, parallel=False):
+        """Read coverage data from the coverage data file (if it exists)."""
+        data = {}
+        if self.use_file and not self.filename:
+            self.filename = os.environ.get(
+                                    self.filename_env, self.filename_default)
+            if parallel:
+                self.filename += "." + socket.gethostname()
+                self.filename += "." + str(os.getpid())
+            if os.path.exists(self.filename):
+                data = self._read_file(self.filename)
+        self.executed = data
+
+    def write(self):
+        """Write the collected coverage data to a file."""
+        if self.use_file and self.filename:
+            self.write_file(self.filename)
+            
+    def erase(self):
+        if self.filename and os.path.exists(self.filename):
+            os.remove(self.filename)
+
+    def write_file(self, filename):
+        """Write the coverage data to `filename`."""
+        f = open(filename, 'wb')
+        try:
+            marshal.dump(self.executed, f)
+        finally:
+            f.close()
+
+    def read_file(self, filename):
+        self.executed = self._read_file(filename)
+        
+    def _read_file(self, filename):
+        """ Return the stored coverage data from the given file.
+        """
+        try:
+            fdata = open(filename, 'rb')
+            executed = marshal.load(fdata)
+            fdata.close()
+            if isinstance(executed, types.DictType):
+                return executed
+            else:
+                return {}
+        except:
+            return {}
+
+    def combine_parallel_data(self):
+        """ Treat self.filename as a file prefix, and combine the data from all
+            of the files starting with that prefix.
+        """
+        data_dir, local = os.path.split(self.filename)
+        for f in os.listdir(data_dir or '.'):
+            if f.startswith(local):
+                full_path = os.path.join(data_dir, f)
+                file_data = self._read_file(full_path)
+                self._combine_data(file_data)
+
+    def _combine_data(self, new_data):
+        """Combine the `new_data` into `executed`."""
+        for filename, file_data in new_data.items():
+            self.executed.setdefault(filename, {}).update(file_data)
+
+    def add_raw_data(self, data_points):
+        """Add raw data.
+        
+        `data_points` is (filename, lineno) pairs.
+        
+        """
+        for filename, lineno in data_points:
+            self.executed.setdefault(filename, {})[lineno] = True
+
+    def executed_lines(self, filename):
+        """Return a mapping object such that "lineno in obj" is true if that
+        line number had been executed in `filename`.
+        """
+        # TODO: Write a better description.
+        return self.executed[filename]
+
+    def summary(self):
+        """Return a dict summarizing the coverage data.
+        
+        Keys are the basename of the filenames, and values are the number of
+        executed lines.  This is useful in the unit tests.
+        
+        """
+        summ = {}
+        for filename, lines in self.executed.items():
+            summ[os.path.basename(filename)] = len(lines)
+        return summ

File coverage/misc.py

+"""Miscellaneous stuff for coverage.py"""
+
+def nice_pair(pair):
+    """Make a nice string representation of a pair of numbers.
+    
+    If the numbers are equal, just return the number, otherwise return the pair
+    with a dash between them, indicating the range.
+    
+    """
+    start, end = pair
+    if start == end:
+        return "%d" % start
+    else:
+        return "%d-%d" % (start, end)
+
+
+class CoverageException(Exception):
+    pass

File coverage/tracer.c

+// C-based Tracer for coverage.py
+
+#include "Python.h"
+#include "compile.h"        // in 2.3, this wasn't part of Python.h
+#include "eval.h"           // or this.
+#include "structmember.h"
+#include "frameobject.h"
+
+// The Tracer type.
+
+typedef struct {
+    PyObject_HEAD
+    PyObject * should_trace;
+    PyObject * data;
+    PyObject * should_trace_cache;
+    int started;
+    // The index of the last-used entry in tracenames.
+    int depth;
+    // Filenames to record at each level, or NULL if not recording.
+    PyObject * tracenames[300];
+} Tracer;
+
+static int
+Tracer_init(Tracer *self, PyObject *args, PyObject *kwds)
+{
+    self->should_trace = NULL;
+    self->data = NULL;
+    self->should_trace_cache = NULL;
+    self->started = 0;
+    self->depth = -1;
+    return 0;
+}
+
+static void
+Tracer_dealloc(Tracer *self)
+{
+    if (self->started) {
+        PyEval_SetTrace(NULL, NULL);
+    }
+
+    Py_XDECREF(self->should_trace);
+    Py_XDECREF(self->data);
+    Py_XDECREF(self->should_trace_cache);
+
+    while (self->depth >= 0) {
+        Py_XDECREF(self->tracenames[self->depth]);
+        self->depth--;
+    }
+
+    self->ob_type->tp_free((PyObject*)self);
+}
+
+static int
+Tracer_trace(Tracer *self, PyFrameObject *frame, int what, PyObject *arg)
+{
+    PyObject * filename = NULL;
+    PyObject * tracename = NULL;
+
+    // printf("trace: %d @ %d\n", what, frame->f_lineno);
+    
+    switch (what) {
+    case PyTrace_CALL:      // 0
+        self->depth++;
+        if (self->depth > sizeof(self->tracenames)/sizeof(self->tracenames[0])) {
+            PyErr_SetString(PyExc_RuntimeError, "Tracer stack overflow");
+            return -1;
+        }
+        // Check if we should trace this line.
+        filename = frame->f_code->co_filename;
+        tracename = PyDict_GetItem(self->should_trace_cache, filename);
+        if (tracename == NULL) {
+            // We've never considered this file before.  Ask should_trace about it.
+            PyObject * args = Py_BuildValue("(O)", filename);
+            tracename = PyObject_Call(self->should_trace, args, NULL);
+            Py_DECREF(args);
+            if (tracename == NULL) {
+                // An error occurred inside should_trace.
+                return -1;
+            }
+            PyDict_SetItem(self->should_trace_cache, filename, tracename);
+        }
+        else {
+            Py_INCREF(tracename);
+        }
+
+        // If tracename is a string, then we're supposed to trace.
+        self->tracenames[self->depth] = PyString_Check(tracename) ? tracename : NULL;
+        break;
+    
+    case PyTrace_RETURN:    // 3
+        if (self->depth >= 0) {
+            Py_XDECREF(self->tracenames[self->depth]);
+            self->depth--;
+        }
+        break;
+    
+    case PyTrace_LINE:      // 2
+        if (self->depth >= 0) {
+            if (self->tracenames[self->depth]) {
+                PyObject * t = PyTuple_New(2);
+                tracename = self->tracenames[self->depth];
+                Py_INCREF(tracename);
+                PyTuple_SetItem(t, 0, tracename);
+                PyTuple_SetItem(t, 1, PyInt_FromLong(frame->f_lineno));
+                Py_INCREF(Py_None);
+                PyDict_SetItem(self->data, t, Py_None);
+                Py_DECREF(t);
+            }
+        }
+        break;
+    }
+
+    return 0;
+}
+
+static PyObject *
+Tracer_start(Tracer *self, PyObject *args)
+{
+    PyEval_SetTrace((Py_tracefunc)Tracer_trace, (PyObject*)self);
+    self->started = 1;
+    return Py_BuildValue("");
+}
+
+static PyObject *
+Tracer_stop(Tracer *self, PyObject *args)
+{
+    if (self->started) {
+        PyEval_SetTrace(NULL, NULL);
+        self->started = 0;
+    }
+    return Py_BuildValue("");
+}
+
+static PyMemberDef
+Tracer_members[] = {
+    { "should_trace",       T_OBJECT, offsetof(Tracer, should_trace), 0,        "Function indicating whether to trace a file." },
+    { "data",               T_OBJECT, offsetof(Tracer, data), 0,                "The raw dictionary of trace data." },
+    { "should_trace_cache", T_OBJECT, offsetof(Tracer, should_trace_cache), 0,  "Dictionary caching should_trace results." },
+    { NULL }
+};
+
+static PyMethodDef
+Tracer_methods[] = {
+    { "start",  (PyCFunction) Tracer_start, METH_VARARGS, "Start the tracer" },
+    { "stop",   (PyCFunction) Tracer_stop,  METH_VARARGS, "Stop the tracer" },
+    { NULL }
+};
+
+static PyTypeObject
+TracerType = {
+    PyObject_HEAD_INIT(NULL)
+    0,                         /*ob_size*/
+    "coverage.Tracer",         /*tp_name*/
+    sizeof(Tracer),            /*tp_basicsize*/
+    0,                         /*tp_itemsize*/
+    (destructor)Tracer_dealloc, /*tp_dealloc*/
+    0,                         /*tp_print*/
+    0,                         /*tp_getattr*/
+    0,                         /*tp_setattr*/
+    0,                         /*tp_compare*/
+    0,                         /*tp_repr*/
+    0,                         /*tp_as_number*/
+    0,                         /*tp_as_sequence*/
+    0,                         /*tp_as_mapping*/
+    0,                         /*tp_hash */
+    0,                         /*tp_call*/
+    0,                         /*tp_str*/
+    0,                         /*tp_getattro*/
+    0,                         /*tp_setattro*/
+    0,                         /*tp_as_buffer*/
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
+    "Tracer objects",          /* tp_doc */
+    0,                         /* tp_traverse */
+    0,                         /* tp_clear */
+    0,                         /* tp_richcompare */
+    0,                         /* tp_weaklistoffset */
+    0,                         /* tp_iter */
+    0,                         /* tp_iternext */
+    Tracer_methods,            /* tp_methods */
+    Tracer_members,            /* tp_members */
+    0,                         /* tp_getset */
+    0,                         /* tp_base */
+    0,                         /* tp_dict */
+    0,                         /* tp_descr_get */
+    0,                         /* tp_descr_set */
+    0,                         /* tp_dictoffset */
+    (initproc)Tracer_init,     /* tp_init */
+    0,                         /* tp_alloc */
+    0,                         /* tp_new */
+};
+
+// Module definition
+
+void
+inittracer(void)
+{
+    PyObject* mod;
+
+    mod = Py_InitModule3("coverage.tracer", NULL, "Fast coverage tracer.");
+    if (mod == NULL) {
+        return;
+    }
+
+    TracerType.tp_new = PyType_GenericNew;
+    if (PyType_Ready(&TracerType) < 0) {
+        return;
+    }
+
+    Py_INCREF(&TracerType);
+    PyModule_AddObject(mod, "Tracer", (PyObject *)&TracerType);
+}

File coverage_coverage.py

+# Coverage-test coverage.py!
+
+import coverage
+import test_coverage
+import unittest
+import sys
+
+print "Testing under Python version:\n", sys.version
+
+coverage.erase()
+coverage.start()
+coverage.exclude("#pragma: no cover")
+
+# Re-import coverage to get it coverage tested!
+covmod = sys.modules['coverage']
+del sys.modules['coverage']
+import coverage
+sys.modules['coverage'] = coverage = covmod
+
+suite = unittest.TestSuite()
+suite.addTest(unittest.defaultTestLoader.loadTestsFromNames(["test_coverage"]))
+
+testrunner = unittest.TextTestRunner()
+testrunner.run(suite)
+
+coverage.stop()
+coverage.report("coverage.py")

File doc/coverage.px

+<?xml version="1.0" encoding="utf-8" ?>
+<page title='coverage'>
+<history>
+<what when='20041212T183900'>Created.</what>
+<what when='20051204T131100'>Updated to 2.5.</what>
+<what when='20060822T210600'>Updated to 2.6.</what>
+<what when='20061001T164600'>Added a problem description for doctest users.</what>
+<what when='20070721T211900'>Updated to 2.7.</what>
+<what when='20070722T154900'>Updated to 2.75.</what>
+<what when='20070723T201400'>Updated to 2.76.</what>
+<what when='20070729T201400'>Updated to 2.77.</what>
+<what when='20080107T071400'>Updated to 2.78.</what>
+<what when='20080525T135029'>Updated to 2.8.</what>
+<what when='20080525T172606'>Updated to 2.80.</what>
+<what when='20081012T080912'>Updated to 2.85.</what>
+</history>
+
+<p>Coverage.py is a Python module that measures code coverage during Python execution.
+It uses the code analysis tools and tracing hooks provided in the Python standard
+library to determine which lines are executable, and which have been executed.
+The original version was written by
+<a href='code/modules/rees-coverage.html'>Gareth Rees</a>.
+I've updated it to determine executable statements more accurately.
+</p>
+
+<h1>Installation</h1>
+
+<p>To install coverage, unpack the tar file, and run "setup.py install",
+or use "easy_install coverage".<