Commits

Ned Batchelder committed ee4f680

Massive eol whitespace clean-up.

Comments (0)

Files changed (49)

 
 - Some exceptions reported by the command line interface have been cleaned up
   so that tracebacks inside coverage.py aren't shown.  Fixes `issue 23`_.
-  
+
 .. _issue 23: http://bitbucket.org/ned/coveragepy/issue/23
-  
+
 
 Version 3.1, 4 October 2009
 ---------------------------
 
 - Fixed the way the Python library was ignored.  Too much code was being
   excluded the old way.
-  
+
 - Tabs are now properly converted in HTML reports.  Previously indentation was
   lost.  Fixed `issue 6`.
-  
+
 - Nested modules now get a proper flat_rootname.  Thanks, Christian Heimes.
 
 .. _issue 6: http://bitbucket.org/ned/coveragepy/issue/6
 
 - Added parameters to coverage.__init__ for options that had been set on the
   coverage object itself.
-  
+
 - Added clear_exclude() and get_exclude_list() methods for programmatic
   manipulation of the exclude regexes.
 
 
 - Executable lines are identified by reading the line number tables in the
   compiled code, removing a great deal of complicated analysis code.
-  
+
 - Precisely which lines are considered executable has changed in some cases.
   Therefore, your coverage stats may also change slightly.
 
 - The singleton coverage object is only created if the module-level functions
   are used.  This maintains the old interface while allowing better
   programmatic use of Coverage.
-  
+
 - The minimum supported Python version is 2.3.
- 
+
 
 Version 2.85, 14 September 2008
 -------------------------------
 - Add support for finding source files in eggs. Don't check for
   morf's being instances of ModuleType, instead use duck typing so that
   pseudo-modules can participate. Thanks, Imri Goldberg.
-  
+
 - Use os.realpath as part of the fixing of file names so that symlinks won't
   confuse things. Thanks, Patrick Mezard.
 
 
 - In reports, ignore code executed from strings, since we can't do anything
   useful with it anyway.
-  
+
 - Better file handling on Linux, thanks Guillaume Chazarain.
 
 - Better shell support on Windows, thanks Noel O'Boyle.
 
 - Call threading.settrace so that all threads are measured. Thanks Martin
   Fuzzey.
-  
+
 - Add a file argument to report so that reports can be captured to a different
   destination.
 
 clean:
 	python test/test_farm.py clean
 	-rm -rf build coverage.egg-info dist htmlcov
-	-rm -f *.pyd */*.pyd 
+	-rm -f *.pyd */*.pyd
 	-rm -f *.pyc */*.pyc */*/*.pyc */*/*/*.pyc */*/*/*/*.pyc */*/*/*/*/*.pyc
 	-rm -f *.pyo */*.pyo */*/*.pyo */*/*/*.pyo */*/*/*/*.pyo */*/*/*/*/*.pyo
 	-rm -f *.bak */*.bak */*/*.bak */*/*/*.bak */*/*/*/*.bak */*/*/*/*/*.bak
 	-rm -rf doc/_build/*
 
 LINTABLE = coverage setup.py test
-	
-lint: 
+
+lint:
 	-python -x /Python25/Scripts/pylint.bat --rcfile=.pylintrc $(LINTABLE)
 	python /Python25/Lib/tabnanny.py $(LINTABLE)
 	python checkeol.py
 
 DEVINST_FILE = coverage.egg-info/PKG-INFO
 devinst: $(DEVINST_FILE)
-$(DEVINST_FILE): coverage/tracer.c 
+$(DEVINST_FILE): coverage/tracer.c
 	-rm coverage/tracer.pyd
 	python setup.py develop
 

coverage/__init__.py

 
 def _singleton_method(name):
     """Return a function to the `name` method on a singleton `coverage` object.
-    
+
     The singleton object is created the first time one of these functions is
     called.
-    
+
     """
     def wrapper(*args, **kwargs):
         """Singleton wrapper around a coverage method."""

coverage/annotate.py

 
 class AnnotateReporter(Reporter):
     """Generate annotated source files showing line coverage.
-    
+
     This reporter creates annotated copies of the measured source files. Each
     .py file is copied as a .py,cover file, with a left-hand margin annotating
     each line::
-    
+
         > def h(x):
         -     if 0:   #pragma: no cover
         -         pass
         !         a = 1
         >     else:
         >         a = 2
-          
+
         > h(2)
 
     Executed lines use '>', lines not executed use '!', lines excluded from
     consideration use '-'.
-    
+
     """
 
     def __init__(self, coverage, ignore_errors=False):
         super(AnnotateReporter, self).__init__(coverage, ignore_errors)
         self.directory = None
-        
+
     blank_re = re.compile(r"\s*(#|$)")
     else_re = re.compile(r"\s*else\s*:\s*(#|$)")
 
     def report(self, morfs, directory=None, omit_prefixes=None):
         """Run the report."""
         self.report_files(self.annotate_file, morfs, directory, omit_prefixes)
-        
+
     def annotate_file(self, cu, analysis):
         """Annotate a single file.
-        
+
         `cu` is the CodeUnit for the file to annotate.
-        
+
         """
         if not cu.relative:
             return
             if self.blank_re.match(line):
                 dest.write('  ')
             elif self.else_re.match(line):
-                # Special logic for lines containing only 'else:'.  
+                # Special logic for lines containing only 'else:'.
                 if i >= len(statements) and j >= len(missing):
                     dest.write('! ')
                 elif i >= len(statements) or j >= len(missing):

coverage/bytecode.py

 
 class ByteCodes(object):
     """Iterator over byte codes in `code`.
-    
+
     Returns `ByteCode` objects.
-    
+
     """
     def __init__(self, code):
         self.code = code
         self.offset = 0
-        
+
     if sys.hexversion > 0x03000000:
         def __getitem__(self, i):
             return self.code[i]
 
     def __iter__(self):
         return self
-    
+
     def __next__(self):
         if self.offset >= len(self.code):
             raise StopIteration
-        
+
         bc = ByteCode()
         bc.op = self[self.offset]
         bc.offset = self.offset
-        
+
         next_offset = self.offset+1
         if bc.op >= opcode.HAVE_ARGUMENT:
             bc.arg = self[self.offset+1] + 256*self[self.offset+2]
             next_offset += 2
-            
+
             label = -1
             if bc.op in opcode.hasjrel:
                 label = next_offset + bc.arg
             elif bc.op in opcode.hasjabs:
                 label = bc.arg
             bc.jump_to = label
-            
+
         bc.next_offset = self.offset = next_offset
         return bc
-    
+
     next = __next__     # Py2k uses an old-style non-dunder name.
 
 
     """Iterate over all the code objects in `code`."""
     def __init__(self, code):
         self.stack = [code]
-        
+
     def __iter__(self):
         return self
-    
+
     def __next__(self):
         if self.stack:
             # We're going to return the code object on the stack, but first

coverage/cmdline.py

 
 class Opts(object):
     """A namespace class for individual options we'll build parsers from."""
-    
+
     append = optparse.Option(
         '-a', '--append', action='store_false', dest="erase_first",
         help="Append coverage data to .coverage, otherwise it is started "
         '', '--version', action='store_true',
         help="Display version information and exit."
         )
-    
+
+
 class CoverageOptionParser(optparse.OptionParser, object):
     """Base OptionParser for coverage.
-    
+
     Problems don't exit the program.
     Defaults are initialized for all options.
-    
+
     """
 
     def __init__(self, *args, **kwargs):
     class OptionParserError(Exception):
         """Used to stop the optparse error handler ending the process."""
         pass
-    
+
     def parse_args(self, args=None, options=None):
         """Call optparse.parse_args, but return a triple:
-        
+
         (ok, options, args)
-        
+
         """
         try:
             options, args = \
         except self.OptionParserError:
             return False, None, None
         return True, options, args
-        
+
     def error(self, msg):
         """Override optparse.error so sys.exit doesn't get called."""
         self.help_fn(msg)
 
     def __init__(self):
         super(ClassicOptionParser, self).__init__()
-        
+
         self.add_action('-a', '--annotate', 'annotate')
         self.add_action('-b', '--html', 'html')
         self.add_action('-c', '--combine', 'combine')
             callback=self._append_action
             )
         option.action_code = action_code
-        
+
     def _append_action(self, option, opt_unused, value_unused, parser):
         """Callback for an option that adds to the `actions` list."""
         parser.values.actions.append(option.action_code)
 
 class CmdOptionParser(CoverageOptionParser):
     """Parse one of the new-style commands for coverage.py."""
-    
+
     def __init__(self, action, options=None, defaults=None, usage=None,
                 cmd=None, description=None
                 ):
         """Create an OptionParser for a coverage command.
-        
+
         `action` is the slug to put into `options.actions`.
         `options` is a list of Option's for the command.
         `defaults` is a dict of default value for options.
         `usage` is the usage string to display in help.
         `cmd` is the command name, if different than `action`.
         `description` is the description of the command, for the help text.
-        
+
         """
         if usage:
             usage = "%prog " + usage
             "Each file gets its own page, with the source decorated to show "
             "executed, excluded, and missed lines."
         ),
-    
+
     'combine': CmdOptionParser("combine", [Opts.help],
         usage = " ",
         description = "Combine data from multiple coverage files collected "
             Opts.timid,
             Opts.help,
             ],
-        defaults = {'erase_first':True},
+        defaults = {'erase_first': True},
         cmd = "run",
         usage = "[options] <pyfile> [program options]",
         description = "Run a Python program, measuring code execution."
         ),
-    
+
     'xml': CmdOptionParser("xml",
         [
             Opts.ignore_errors,
 
 OK, ERR = 0, 1
 
+
 class CoverageScript(object):
     """The command-line interface to Coverage."""
-    
+
     def __init__(self, _covpkg=None, _run_python_file=None, _help_fn=None):
         # _covpkg is for dependency injection, so we can test this code.
         if _covpkg:
         else:
             import coverage
             self.covpkg = coverage
-        
+
         # _run_python_file is for dependency injection also.
         self.run_python_file = _run_python_file or run_python_file
-        
+
         # _help_fn is for dependency injection.
         self.help_fn = _help_fn or self.help
-        
+
         self.coverage = None
 
     def help(self, error=None, topic=None, parser=None):
 
     def command_line(self, argv):
         """The bulk of the command line interface to Coverage.
-        
+
         `argv` is the argument list to process.
 
         Returns 0 if all is well, 1 if something went wrong.
 
         """
         # Collect the command-line options.
-        
+
         if not argv:
             self.help_fn(topic='minimum_help')
             return OK
         if not args_allowed and args:
             self.help_fn("Unexpected arguments: %s" % " ".join(args))
             return ERR
-        
+
         if 'execute' in options.actions and not args:
             self.help_fn("Nothing to do.")
             return ERR
-            
+
         # Do something.
         self.coverage = self.covpkg.coverage(
             data_suffix = bool(options.parallel_mode),
         if options.omit:
             omit = options.omit.split(',')
         report_args['omit_prefixes'] = omit
-        
+
         if 'report' in options.actions:
             self.coverage.report(
                 show_missing=options.show_missing, **report_args)
 
 Commands:
     annotate    Annotate source files with execution information.
-    combine     Combine a number of data files. 
+    combine     Combine a number of data files.
     erase       Erase previously collected coverage data.
     help        Get help on using coverage.py.
     html        Create an HTML report.
 
 def main():
     """The main entrypoint to Coverage.
-    
+
     This is installed as the script entrypoint.
-    
+
     """
     try:
         status = CoverageScript().command_line(sys.argv[1:])

coverage/codeunit.py

 
 def code_unit_factory(morfs, file_locator, omit_prefixes=None):
     """Construct a list of CodeUnits from polymorphic inputs.
-    
+
     `morfs` is a module or a filename, or a list of same.
     `file_locator` is a FileLocator that can help resolve filenames.
     `omit_prefixes` is a list of prefixes.  CodeUnits that match those prefixes
     will be omitted from the list.
-    
+
     Returns a list of CodeUnit objects.
-    
+
     """
 
     # Be sure we have a list.
     if not isinstance(morfs, (list, tuple)):
         morfs = [morfs]
-    
+
     # On Windows, the shell doesn't expand wildcards.  Do it here.
     globbed = []
     for morf in morfs:
     morfs = globbed
 
     code_units = [CodeUnit(morf, file_locator) for morf in morfs]
-    
+
     if omit_prefixes:
         assert not isinstance(omit_prefixes, string_class) # common mistake
         prefixes = [file_locator.abs_file(p) for p in omit_prefixes]
                     break
             else:
                 filtered.append(cu)
-    
+
         code_units = filtered
 
     return code_units
 
 class CodeUnit(object):
     """Code unit: a filename or module.
-    
+
     Instance attributes:
-    
+
     `name` is a human-readable name for this code unit.
     `filename` is the os path from which we can read the source.
     `relative` is a boolean.
-    
+
     """
 
     def __init__(self, morf, file_locator):
 
     # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
     # of them defined.
-    
+
     def __lt__(self, other):
         return self.name < other.name
-    
+
     def __le__(self, other):
         return self.name <= other.name
 
     def __eq__(self, other):
         return self.name == other.name
-    
+
     def __ne__(self, other):
         return self.name != other.name
 
     def __gt__(self, other):
         return self.name > other.name
-    
+
     def __ge__(self, other):
         return self.name >= other.name
 
     def flat_rootname(self):
         """A base for a flat filename to correspond to this code unit.
-        
+
         Useful for writing files about the code where you want all the files in
         the same directory, but need to differentiate same-named files from
         different directories.
-        
+
         For example, the file a/b/c.py might return 'a_b_c'
-        
+
         """
         if self.modname:
             return self.modname.replace('.', '_')
         source = self.file_locator.get_zip_data(self.filename)
         if source is not None:
             return StringIO(source)
-            
+
         # Couldn't find source.
         raise CoverageException(
             "No source for code %r." % self.filename

coverage/collector.py

 
 class PyTracer(object):
     """Python implementation of the raw data tracer."""
-    
+
     # Because of poor implementations of trace-function-manipulating tools,
     # the Python trace function must be kept very simple.  In particular, there
     # must be only one function ever set as the trace function, both through
 
     def _trace(self, frame, event, arg_unused):
         """The trace function passed to sys.settrace."""
-        
+
         #print "trace event: %s %r @%d" % (
         #           event, frame.f_code.co_filename, frame.f_lineno)
-        
+
         if self.last_exc_back:
             if frame == self.last_exc_back:
                 # Someone forgot a return event.
                     self.cur_file_data[(self.last_line, -1)] = None
                 self.cur_file_data, self.last_line = self.data_stack.pop()
             self.last_exc_back = None
-            
+
         if event == 'call':
             # Entering a new function context.  Decide if we should trace
             # in this file.
             #print "exc", self.last_line, frame.f_lineno
             self.last_exc_back = frame.f_back
         return self._trace
-        
+
     def start(self):
         """Start this Tracer."""
         sys.settrace(self._trace)
 class Collector(object):
     """Collects trace data.
 
-    Creates a Tracer object for each thread, since they track stack information.
-    Each Tracer points to the same shared data, contributing traced data points.
-    
+    Creates a Tracer object for each thread, since they track stack
+    information.  Each Tracer points to the same shared data, contributing
+    traced data points.
+
     When the Collector is started, it creates a Tracer for the current thread,
     and installs a function to create Tracers for each new thread started.
     When the Collector is stopped, all active Tracers are stopped.
-    
+
     Threads started while the Collector is stopped will never have Tracers
     associated with them.
-    
+
     """
-    
+
     # The stack of active Collectors.  Collectors are added here when started,
     # and popped when stopped.  Collectors on the stack are paused when not
     # the top, and resumed when they become the top again.
 
     def __init__(self, should_trace, timid, branch):
         """Create a collector.
-        
+
         `should_trace` is a function, taking a filename, and returning a
         canonicalized filename, or False depending on whether the file should
         be traced or not.
-        
+
         If `timid` is true, then a slower simpler trace function will be
         used.  This is important for some environments where manipulation of
         tracing functions make the faster more sophisticated trace function not
         operate properly.
-        
+
         If `branch` is true, then branches will be measured.  This involves
         collecting data on which statements followed each other (arcs).  Use
         `get_arc_data` to get the arc data.
-        
+
         """
         self.should_trace = should_trace
         self.branch = branch
         # A dictionary mapping filenames to dicts with linenumber keys,
         # or mapping filenames to dicts with linenumber pairs as keys.
         self.data = {}
-        
+
         # A cache of the results from should_trace, the decision about whether
         # to trace execution in a file. A dict of filename to (filename or
         # False).
 
         self.pause()
         self.tracers = []
-        
+
         # Remove this Collector from the stack, and resume the one underneath
         # (if any).
         self._collectors.pop()
                 for k in sorted(stats.keys()):
                     print("%16s: %s" % (k, stats[k]))
         threading.settrace(None)
-        
+
     def resume(self):
         """Resume tracing after a `pause`."""
         for tracer in self.tracers:
 
     def get_line_data(self):
         """Return the line data collected.
-        
+
         Data is { filename: { lineno: None, ...}, ...}
-        
+
         """
         if self.branch:
             # If we were measuring branches, then we have to re-build the dict
 
     def get_arc_data(self):
         """Return the arc data collected.
-        
+
         Data is { filename: { (l1, l2): None, ...}, ...}
 
         Note that no data is collected or returned if the Collector wasn't

coverage/control.py

     """Programmatic access to Coverage.
 
     To use::
-    
+
         from coverage import coverage
-        
+
         cov = coverage()
         cov.start()
         #.. blah blah (run your code) blah blah ..
 
     def __init__(self, data_file=None, data_suffix=False, cover_pylib=False,
                 auto_data=False, timid=False, branch=False):
-        """        
+        """
         `data_file` is the base name of the data file to use, defaulting to
         ".coverage".  `data_suffix` is appended to `data_file` to create the
         final file name.  If `data_suffix` is simply True, then a suffix is
         created with the machine and process identity included.
-        
+
         `cover_pylib` is a boolean determining whether Python code installed
         with the Python interpreter is measured.  This includes the Python
         standard library and any packages installed with the interpreter.
-        
+
         If `auto_data` is true, then any existing data file will be read when
         coverage measurement starts, and data will be saved automatically when
         measurement stops.
-        
+
         If `timid` is true, then a slower and simpler trace function will be
         used.  This is important for some environments where manipulation of
         tracing functions breaks the faster trace function.
-        
+
         If `branch` is true, then branch coverage will be measured in addition
         to the usual statement coverage.
 
         """
         from coverage import __version__
-        
+
         self.cover_pylib = cover_pylib
         self.auto_data = auto_data
         self.atexit_registered = False
 
         self.exclude_re = ""
         self.exclude_list = []
-        
+
         self.file_locator = FileLocator()
-        
+
         # Timidity: for nose users, read an environment variable.  This is a
         # cheap hack, since the rest of the command line arguments aren't
         # recognized, but it solves some users' problems.
 
     def _should_trace(self, filename, frame):
         """Decide whether to trace execution in `filename`
-        
+
         This function is called from the trace function.  As each new file name
         is encountered, this function determines whether it is traced or not.
-        
+
         Returns a canonicalized filename if it should be traced, False if it
         should not.
-        
+
         """
         if filename == '<string>':
             # There's no point in ever tracing string executions, we can't do
 
     def use_cache(self, usecache):
         """Control the use of a data file (incorrectly called a cache).
-        
+
         `usecache` is true or false, whether to read and write data on disk.
-        
+
         """
         self.data.usefile(usecache)
 
         """Load previously-collected coverage data from the data file."""
         self.collector.reset()
         self.data.read()
-        
+
     def start(self):
         """Start measuring code coverage."""
         if self.auto_data:
                 atexit.register(self.save)
                 self.atexit_registered = True
         self.collector.start()
-        
+
     def stop(self):
         """Stop measuring code coverage."""
         self.collector.stop()
 
     def erase(self):
         """Erase previously-collected coverage data.
-        
+
         This removes the in-memory data collected in this session as well as
         discarding the data file.
-        
+
         """
         self.collector.reset()
         self.data.erase()
 
     def exclude(self, regex):
         """Exclude source lines from execution consideration.
-        
+
         `regex` is a regular expression.  Lines matching this expression are
         not considered executable when reporting code coverage.  A list of
         regexes is maintained; this function adds a new regex to the list.
         Matching any of the regexes excludes a source line.
-        
+
         """
         self.exclude_list.append(regex)
         self.exclude_re = "(" + ")|(".join(self.exclude_list) + ")"
 
     def combine(self):
         """Combine together a number of similarly-named coverage data files.
-        
+
         All coverage data files whose name starts with `data_file` (from the
         coverage() constructor) will be read, and combined together into the
         current measurements.
-        
+
         """
         self.data.combine_parallel_data()
 
 
     def analysis2(self, morf):
         """Analyze a module.
-        
+
         `morf` is a module or a filename.  It will be analyzed to determine
         its coverage statistics.  The return value is a 5-tuple:
-        
+
         * The filename for the module.
         * A list of line numbers of executable statements.
         * A list of line numbers of excluded statements.
-        * A list of line numbers of statements not run (missing from execution).
+        * A list of line numbers of statements not run (missing from
+          execution).
         * A readable formatted string of the missing line numbers.
 
         The analysis uses the source file itself and the current measured
 
     def _analyze(self, it):
         """Analyze a single morf or code unit.
-        
+
         Returns an `Analysis` object.
 
         """
         if not isinstance(it, CodeUnit):
             it = code_unit_factory(it, self.file_locator)[0]
-        
+
         return Analysis(self, it)
 
     def report(self, morfs=None, show_missing=True, ignore_errors=False,
                 file=None, omit_prefixes=None):     # pylint: disable-msg=W0622
         """Write a summary report to `file`.
-        
+
         Each module in `morfs` is listed, with counts of statements, executed
         statements, missing statements, and a list of lines missed.
-        
+
         """
         reporter = SummaryReporter(self, show_missing, ignore_errors)
         reporter.report(morfs, outfile=file, omit_prefixes=omit_prefixes)
     def annotate(self, morfs=None, directory=None, ignore_errors=False,
                     omit_prefixes=None):
         """Annotate a list of modules.
-        
+
         Each module in `morfs` is annotated.  The source is written to a new
         file, named with a ",cover" suffix, with each line prefixed with a
         marker to indicate the coverage of the line.  Covered lines have ">",
         excluded lines have "-", and missing lines have "!".
-        
+
         """
         reporter = AnnotateReporter(self, ignore_errors)
         reporter.report(
     def html_report(self, morfs=None, directory=None, ignore_errors=False,
                     omit_prefixes=None):
         """Generate an HTML report.
-        
+
         """
         reporter = HtmlReporter(self, ignore_errors)
         reporter.report(
     def xml_report(self, morfs=None, outfile=None, ignore_errors=False,
                     omit_prefixes=None):
         """Generate an XML report of coverage results.
-        
+
         The report is compatible with Cobertura reports.
-        
+
         """
         if outfile:
             outfile = open(outfile, "w")
 
     def sysinfo(self):
         """Return a list of key,value pairs showing internal information."""
-        
+
         import coverage as covmod
         import platform, re, sys
 
 
 class CoverageData(object):
     """Manages collected coverage data, including file storage.
-    
+
     The data file format is a pickled dict, with these keys:
-    
+
         * collector: a string identifying the collecting software
 
         * lines: a dict mapping filenames to sorted lists of line numbers
           executed:
             { 'file1': [17,23,45],  'file2': [1,2,3], ... }
-    
+
         * arcs: a dict mapping filenames to sorted lists of line number pairs:
             { 'file1': [(17,23), (17,25), (25,26)], ... }
 
     """
-    
+
     # Name of the data file (unless environment variable is set).
     filename_default = ".coverage"
 
 
     def __init__(self, basename=None, suffix=None, collector=None):
         """Create a CoverageData.
-        
+
         `basename` is the name of the file to use for storing data.
-        
+
         `suffix` is a suffix to append to the base file name. This can be used
         for multiple or parallel execution, so that many coverage data files
         can exist simultaneously.
 
         """
         self.collector = collector
-        
+
         self.use_file = True
 
         # Construct the filename that will be used for data file storage, if we
         #       }
         #
         self.lines = {}
-        
+
         # A map from canonical Python source file name to a dictionary with an
         # entry for each pair of line numbers forming an arc:
         #
         # { filename: { (l1,l2): None, ... }, ...}
         #
         self.arcs = {}
-        
+
     def usefile(self, use_file=True):
         """Set whether or not to use a disk file for data."""
         self.use_file = use_file
                 os.remove(self.filename)
         self.lines = {}
         self.arcs = {}
-        
+
     def line_data(self):
         """Return the map from filenames to lists of line numbers executed."""
         return dict(
         return dict(
             [(f, sorted(amap.keys())) for f, amap in self.arcs.items()]
             )
-        
+
     def write_file(self, filename):
         """Write the coverage data to `filename`."""
 
-        # Create the file data.        
+        # Create the file data.
         data = {}
 
         data['lines'] = self.line_data()
 
     def _read_file(self, filename):
         """Return the stored coverage data from the given file.
-        
+
         Returns two values, suitable for assigning to `self.lines` and
         `self.arcs`.
-        
+
         """
         lines = {}
         arcs = {}
 
     def combine_parallel_data(self):
         """Combine a number of data files together.
-        
+
         Treat `self.filename` as a file prefix, and combine the data from all
         of the data files starting with that prefix.
-        
+
         """
         data_dir, local = os.path.split(self.filename)
         for f in os.listdir(data_dir or '.'):
 
     def add_line_data(self, line_data):
         """Add executed line data.
-        
+
         `line_data` is { filename: { lineno: None, ... }, ...}
-        
+
         """
         for filename, linenos in line_data.items():
             self.lines.setdefault(filename, {}).update(linenos)
 
     def add_arc_data(self, arc_data):
         """Add measured arc data.
-        
+
         `arc_data` is { filename: { (l1,l2): None, ... }, ...}
-        
+
         """
         for filename, arcs in arc_data.items():
             self.arcs.setdefault(filename, {}).update(arcs)
 
     def executed_lines(self, filename):
         """A map containing all the line numbers executed in `filename`.
-        
+
         If `filename` hasn't been collected at all (because it wasn't executed)
         then return an empty map.
 
 
     def summary(self, fullpath=False):
         """Return a dict summarizing the coverage data.
-        
+
         Keys are based on the filenames, and values are the number of executed
         lines.  If `fullpath` is true, then the keys are the full pathnames of
         the files, otherwise they are the basenames of the files.
-        
+
         """
         summ = {}
         if fullpath:

coverage/execfile.py

 
 def run_python_file(filename, args):
     """Run a python file as if it were the main program on the command line.
-    
+
     `filename` is the path to the file to execute, it need not be a .py file.
     `args` is the argument array to present as sys.argv, including the first
     element representing the file being executed.
-    
+
     """
     # Create a module to serve as __main__
     old_main_mod = sys.modules['__main__']
     finally:
         # Restore the old __main__
         sys.modules['__main__'] = old_main_mod
-        
+
         # Restore the old argv and path
         sys.argv = old_argv
         sys.path[0] = old_path0

coverage/files.py

 
     def relative_filename(self, filename):
         """Return the relative form of `filename`.
-        
+
         The filename will be relative to the current directory when the
         FileLocator was constructed.
-        
+
         """
         return filename.replace(self.relative_dir, "")
 
     def canonical_filename(self, filename):
         """Return a canonical filename for `filename`.
-        
+
         An absolute path with no redundant components and normalized case.
-        
+
         """
         if filename not in self.canonical_filename_cache:
             f = filename
 
     def get_zip_data(self, filename):
         """Get data from `filename` if it is a zip file path.
-        
+
         Returns the string data read from the zip file, or None if no zip file
         could be found or `filename` isn't in it.  The data returned will be
         an empty string if the file is empty.
-        
+
         """
         import zipimport
         markers = ['.zip'+os.sep, '.egg'+os.sep]
 def data(fname):
     """Return the contents of a data file of ours."""
     return open(data_filename(fname)).read()
-    
+
 
 class HtmlReporter(Reporter):
     """HTML reporting."""
-    
+
     def __init__(self, coverage, ignore_errors=False):
         super(HtmlReporter, self).__init__(coverage, ignore_errors)
         self.directory = None
         self.source_tmpl = Templite(data("htmlfiles/pyfile.html"), globals())
-        
+
         self.files = []
         self.arcs = coverage.data.has_arcs()
 
     def report(self, morfs, directory, omit_prefixes=None):
         """Generate an HTML report for `morfs`.
-        
+
         `morfs` is a list of modules or filenames.  `directory` is where to put
         the HTML files. `omit_prefixes` is a list of strings, prefixes of
         modules to omit from the report.
-        
+
         """
         assert directory, "must provide a directory for html reporting"
-        
+
         # Process all the files.
         self.report_files(self.html_file, morfs, directory, omit_prefixes)
 
 
     def html_file(self, cu, analysis):
         """Generate an HTML file for one source file."""
-        
+
         source = cu.source_file().read()
 
-        nums = analysis.numbers        
+        nums = analysis.numbers
 
         missing_branch_arcs = analysis.missing_branch_arcs()
         n_par = 0   # accumulated below.
         c_par = " par" + c_run
 
         lines = []
-        
+
         for lineno, line in enumerate(source_token_lines(source)):
             lineno += 1     # 1-based line numbers.
             # Figure out how to mark this line.
                     annotate_title = "no jump to this line number"
             elif lineno in analysis.statements:
                 line_class += c_run
-            
+
             # Build the HTML for the line
             html = ""
             for tok_type, tok_text in line:
 
 def spaceless(html):
     """Squeeze out some annoying extra space from an HTML string.
-    
+
     Nicely-formatted templates mean lots of extra space in the result.  Get
     rid of some.
-    
+
     """
     html = re.sub(">\s+<p ", ">\n<p ", html)
     return html
 
 def nice_pair(pair):
     """Make a nice string representation of a pair of numbers.
-    
+
     If the numbers are equal, just return the number, otherwise return the pair
     with a dash between them, indicating the range.
-    
+
     """
     start, end = pair
     if start == end:
     Format a list of line numbers for printing by coalescing groups of lines as
     long as the lines represent consecutive statements.  This will coalesce
     even if there are gaps between statements.
-    
+
     For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
     `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
-    
+
     """
     pairs = []
     i = 0
 
 def expensive(fn):
     """A decorator to cache the result of an expensive operation.
-    
+
     Only applies to methods with no arguments.
-    
+
     """
     attr = "_cache_" + fn.__name__
     def _wrapped(self):

coverage/parser.py

 
 class CodeParser(object):
     """Parse code to find executable lines, excluded lines, etc."""
-    
+
     def __init__(self, text=None, filename=None, exclude=None):
         """
         Source can be provided as `text`, the text itself, or `filename`, from
-        which text will be read.  Excluded lines are those that match `exclude`,
-        a regex.
-        
+        which text will be read.  Excluded lines are those that match
+        `exclude`, a regex.
+
         """
         assert text or filename, "CodeParser needs either text or filename"
         self.filename = filename or "<code>"
         self.text = self.text.replace('\r\n', '\n')
 
         self.exclude = exclude
-        
+
         self.show_tokens = False
 
         # The text lines of the parsed code.
 
         # The line numbers of excluded lines of code.
         self.excluded = set()
-        
+
         # The line numbers of docstring lines.
         self.docstrings = set()
-        
+
         # The line numbers of class definitions.
         self.classdefs = set()
 
         # A dict mapping line numbers to (lo,hi) for multi-line statements.
         self.multiline = {}
-        
+
         # The line numbers that start statements.
         self.statement_starts = set()
 
         # Lazily-created ByteParser
         self._byte_parser = None
-        
+
     def _get_byte_parser(self):
         """Create a ByteParser on demand."""
         if not self._byte_parser:
 
     def _raw_parse(self):
         """Parse the source to find the interesting facts about its lines.
-        
+
         A handful of member fields are updated.
-        
+
         """
         # Find lines which match an exclusion pattern.
         if self.exclude:
             for i, ltext in enumerate(self.lines):
                 if re_exclude.search(ltext):
                     self.excluded.add(i+1)
-    
+
         # Tokenize, to find excluded suites, to find docstrings, and to find
         # multi-line statements.
         indent = 0
                     for l in range(first_line, elineno+1):
                         self.multiline[l] = rng
                 first_line = None
-                
+
             if ttext.strip() and toktype != tokenize.COMMENT:
                 # A non-whitespace token.
                 if first_line is None:
                         excluding = False
                     if excluding:
                         self.excluded.add(elineno)
-                        
+
             prev_toktype = toktype
 
         # Find the starts of the executable statements.
     def first_lines(self, lines, ignore=None):
         """Map the line numbers in `lines` to the correct first line of the
         statement.
-        
+
         Skip any line mentioned in `ignore`.
-        
+
         Returns a sorted list of the first lines.
-        
+
         """
         ignore = ignore or []
         lset = set()
             if new_l not in ignore:
                 lset.add(new_l)
         return sorted(lset)
-    
+
     def parse_source(self):
         """Parse source text to find executable lines, excluded lines, etc.
 
         Return values are 1) a sorted list of executable line numbers, and
         2) a sorted list of excluded line numbers.
-        
+
         Reported line numbers are normalized to the first line of multi-line
         statements.
-        
+
         """
         self._raw_parse()
-        
+
         excluded_lines = self.first_lines(self.excluded)
         ignore = excluded_lines + list(self.docstrings)
         lines = self.first_lines(self.statement_starts, ignore)
-    
+
         return lines, excluded_lines
 
     def arcs(self):
         """Get information about the arcs available in the code.
-        
+
         Returns a sorted list of line number pairs.  Line numbers have been
         normalized to the first line of multiline statements.
-        
+
         """
         all_arcs = []
         for l1, l2 in self.byte_parser._all_arcs():
 
     def exit_counts(self):
         """Get a mapping from line numbers to count of exits from that line.
-        
+
         Excluded lines are excluded.
-        
+
         """
         excluded_lines = self.first_lines(self.excluded)
         exit_counts = {}
             # Ensure key is there: classdefs can include excluded lines.
             if l in exit_counts:
                 exit_counts[l] -= 1
-                
+
         return exit_counts
     exit_counts = expensive(exit_counts)
 
 
     def child_parsers(self):
         """Iterate over all the code objects nested within this one.
-        
+
         The iteration includes `self` as its first value.
-        
+
         """
         return map(lambda c: ByteParser(code=c), CodeObjects(self.code))
 
-    # Getting numbers from the lnotab value changed in Py3.0.    
+    # Getting numbers from the lnotab value changed in Py3.0.
     if sys.hexversion >= 0x03000000:
         def _lnotab_increments(self, lnotab):
             """Return a list of ints from the lnotab bytes in 3.x"""
 
     def _bytes_lines(self):
         """Map byte offsets to line numbers in `code`.
-    
+
         Uses co_lnotab described in Python/compile.c to map byte offsets to
         line numbers.  Returns a list: [(b0, l0), (b1, l1), ...]
-    
+
         """
         # Adapted from dis.py in the standard library.
         byte_increments = self._lnotab_increments(self.code.co_lnotab[0::2])
         line_increments = self._lnotab_increments(self.code.co_lnotab[1::2])
-    
+
         bytes_lines = []
         last_line_num = None
         line_num = self.code.co_firstlineno
         if line_num != last_line_num:
             bytes_lines.append((byte_num, line_num))
         return bytes_lines
-    
+
     def _find_statements(self):
         """Find the statements in `self.code`.
-        
+
         Return a set of line numbers that start statements.  Recurses into all
         code objects reachable from `self.code`.
-        
+
         """
         stmts = set()
         for bp in self.child_parsers():
             for _, l in bp._bytes_lines():
                 stmts.add(l)
         return stmts
-    
+
     def _disassemble(self):     # pragma: no cover
         """Disassemble code, for ad-hoc experimenting."""
-        
+
         import dis
-        
+
         for bp in self.child_parsers():
             print("\n%s: " % bp.code)
             dis.dis(bp.code)
 
     def _split_into_chunks(self):
         """Split the code object into a list of `Chunk` objects.
-        
+
         Each chunk is only entered at its first instruction, though there can
         be many exits from a chunk.
-        
+
         Returns a list of `Chunk` objects.
-        
+
         """
 
         # The list of chunks so far, and the one we're working on.
         chunks = []
         chunk = None
         bytes_lines_map = dict(self._bytes_lines())
-        
+
         # The block stack: loops and try blocks get pushed here for the
         # implicit jumps that can occur.
         # Each entry is a tuple: (block type, destination)
         block_stack = []
-        
+
         # Some op codes are followed by branches that should be ignored.  This
         # is a count of how many ignores are left.
         ignore_branch = 0
                     chunk.exits.add(bc.offset)
                 chunk = Chunk(bc.offset, bytes_lines_map[bc.offset])
                 chunks.append(chunk)
-                
+
             if not chunk:
                 chunk = Chunk(bc.offset)
                 chunks.append(chunk)
 
-            # Look at the opcode                
+            # Look at the opcode
             if bc.jump_to >= 0 and bc.op not in OPS_NO_JUMP:
                 if ignore_branch:
                     # Someone earlier wanted us to ignore this branch.
                 else:
                     # The opcode has a jump, it's an exit for this chunk.
                     chunk.exits.add(bc.jump_to)
-            
+
             if bc.op in OPS_CODE_END:
                 # The opcode can exit the code object.
                 chunk.exits.add(-1)
             penult = ult
             ult = bc
 
-            
+
         if chunks:
             # The last two bytecodes could be a dummy "return None" that
             # shouldn't be counted as real code. Every Python code object seems
 
     def _arcs(self):
         """Find the executable arcs in the code.
-        
+
         Returns a set of pairs, (from,to).  From and to are integer line
         numbers.  If from is -1, then the arc is an entrance into the code
         object.  If to is -1, the arc is an exit from the code object.
-        
+
         """
         chunks = self._split_into_chunks()
-        
+
         # A map from byte offsets to chunks jumped into.
         byte_chunks = dict([(c.byte, c) for c in chunks])
 
         # Build a map from byte offsets to actual lines reached.
         byte_lines = {-1:[-1]}
         bytes_to_add = set([c.byte for c in chunks])
-        
+
         while bytes_to_add:
             byte_to_add = bytes_to_add.pop()
             if byte_to_add in byte_lines or byte_to_add == -1:
                 continue
-            
+
             # Which lines does this chunk lead to?
             bytes_considered = set()
             bytes_to_consider = [byte_to_add]
             lines = set()
-            
+
             while bytes_to_consider:
                 byte = bytes_to_consider.pop()
                 bytes_considered.add(byte)
-                
+
                 # Find chunk for byte
                 try:
                     ch = byte_chunks[byte]
                         # No chunk for this byte!
                         raise Exception("Couldn't find chunk @ %d" % byte)
                     byte_chunks[byte] = ch
-                    
+
                 if ch.line:
                     lines.add(ch.line)
                 else:
                 bytes_to_add.update(ch.exits)
 
             byte_lines[byte_to_add] = lines
-        
+
         # Figure out for each chunk where the exits go.
         arcs = set()
         for chunk in chunks:
                             arcs.add((chunk.line, exit_line))
         for line in byte_lines[0]:
             arcs.add((-1, line))
-        
+
         return arcs
-        
+
     def _all_chunks(self):
         """Returns a list of `Chunk` objects for this code and its children.
-        
+
         See `_split_into_chunks` for details.
-        
+
         """
         chunks = []
         for bp in self.child_parsers():
             chunks.extend(bp._split_into_chunks())
-        
+
         return chunks
 
     def _all_arcs(self):
         """Get the set of all arcs in this code object and its children.
-        
+
         See `_arcs` for details.
-        
+
         """
         arcs = set()
         for bp in self.child_parsers():
             arcs.update(bp._arcs())
-        
+
         return arcs
 
 
 class Chunk(object):
     """A sequence of bytecodes with a single entrance.
-    
+
     To analyze byte code, we have to divide it into chunks, sequences of byte
     codes such that each basic block has only one entrance, the first
-    instruction in the block. 
-    
+    instruction in the block.
+
     This is almost the CS concept of `basic block`_, except that we're willing
     to have many exits from a chunk, and "basic block" is a more cumbersome
     term.
-    
+
     .. _basic block: http://en.wikipedia.org/wiki/Basic_block
-    
+
     An exit of -1 means the chunk can leave the code (return).
-    
+
     """
     def __init__(self, byte, line=0):
         self.byte = byte
         self.line = line
         self.length = 0
         self.exits = set()
-        
+
     def __repr__(self):
         return "<%d+%d @%d %r>" % (
             self.byte, self.length, self.line, list(self.exits)
 
 class AdHocMain(object):        # pragma: no cover
     """An ad-hoc main for code parsing experiments."""
-    
+
     def main(self, args):
         """A main function for trying the code from the command line."""
 
             "-t", action="store_true", dest="tokens",
             help="Show tokens"
             )
-        
+
         options, args = parser.parse_args()
         if options.recursive:
             if args:
 
     def adhoc_one_file(self, options, filename):
         """Process just one file."""
-        
+
         if options.dis or options.chunks:
             try:
                 bp = ByteParser(filename=filename)
             except CoverageException:
-                _, err, _ = sys.exc_info()                
+                _, err, _ = sys.exc_info()
                 print("%s" % (err,))
                 return
 
                     arc_width, arc_chars = self.arc_ascii_art(arcs)
                 else:
                     arc_width, arc_chars = 0, {}
-                    
+
                 exit_counts = cp.exit_counts()
 
                 for i, ltext in enumerate(cp.lines):
 
     def arc_ascii_art(self, arcs):
         """Draw arcs as ascii art.
-        
+
         Returns a width of characters needed to draw all the arcs, and a
         dictionary mapping line numbers to ascii strings to draw for that line.
-        
+
         """
         arc_chars = {}
         for lfrom, lto in sorted(arcs):

coverage/phystokens.py

 
 def phys_tokens(toks):
     """Return all physical tokens, even line continuations.
-    
+
     tokenize.generate_tokens() doesn't return a token for the backslash that
     continues lines.  This wrapper provides those tokens so that we can
     re-create a faithful representation of the original source.
-    
+
     Returns the same values as generate_tokens()
-    
+
     """
     last_line = None
     last_lineno = -1
 
 def source_token_lines(source):
     """Generate a series of lines, one for each line in `source`.
-    
+
     Each line is a list of pairs, each pair is a token::
-    
+
         [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
 
     Each pair has a token class, and the token text.
-    
+
     If you concatenate all the token texts, and then join them with newlines,
     you should have your original `source` back, with two differences:
     trailing whitespace is not preserved, and a final line with no newline

coverage/report.py

 
 class Reporter(object):
     """A base class for all reporters."""
-    
+
     def __init__(self, coverage, ignore_errors=False):
         """Create a reporter.
-        
+
         `coverage` is the coverage instance. `ignore_errors` controls how
         skittish the reporter will be during file processing.
 
         """
         self.coverage = coverage
         self.ignore_errors = ignore_errors
-        
+
         # The code units to report on.  Set by find_code_units.
         self.code_units = []
-        
+
         # The directory into which to place the report, used by some derived
         # classes.
         self.directory = None
 
     def find_code_units(self, morfs, omit_prefixes):
         """Find the code units we'll report on.
-        
+
         `morfs` is a list of modules or filenames. `omit_prefixes` is a list
         of prefixes to leave out of the list.
-        
+
         """
         morfs = morfs or self.coverage.data.executed_files()
         self.code_units = code_unit_factory(
     def report_files(self, report_fn, morfs, directory=None,
                         omit_prefixes=None):
         """Run a reporting function on a number of morfs.
-        
+
         `report_fn` is called for each relative morf in `morfs`.
-        
+
         """
         self.find_code_units(morfs, omit_prefixes)
 

coverage/results.py

 
 class Analysis(object):
     """The results of analyzing a code unit."""
-    
+
     def __init__(self, cov, code_unit):
         self.coverage = cov
         self.code_unit = code_unit
-        
+
         self.filename = self.code_unit.filename
         ext = os.path.splitext(self.filename)[1]
         source = None
             n_missing_branches = sum([len(v) for v in mba.values()])
         else:
             n_branches = n_missing_branches = 0
-            
+
         self.numbers = Numbers(
             n_files=1,
-            n_statements=len(self.statements), 
+            n_statements=len(self.statements),
             n_excluded=len(self.excluded),
             n_missing=len(self.missing),
             n_branches=n_branches,
 
     def missing_formatted(self):
         """The missing line numbers, formatted nicely.
-        
+
         Returns a string like "1-2, 5-11, 13-14".
-        
+
         """
         return format_lines(self.statements, self.missing)
 
         """How many total branches are there?"""
         exit_counts = self.parser.exit_counts()
         return sum([count for count in exit_counts.values() if count > 1])
-        
+
     def missing_branch_arcs(self):
         """Return arcs that weren't executed from branch lines.
-        
+
         Returns {l1:[l2a,l2b,...], ...}
-        
+
         """
         missing = self.arcs_missing()
         branch_lines = set(self.branch_lines())
 
 class Numbers(object):
     """The numerical results of measuring coverage.
-    
+
     This holds the basic statistics from `Analysis`, and is used to roll
     up statistics across files.
 
         """Returns the number of executed statements."""
         return self.n_statements - self.n_missing
     n_executed = property(_get_n_executed)
-    
+
     def _get_n_executed_branches(self):
         """Returns the number of executed branches."""
         return self.n_branches - self.n_missing_branches
     n_executed_branches = property(_get_n_executed_branches)
-    
+
     def _get_pc_covered(self):
         """Returns a single percentage value for coverage."""
         if self.n_statements > 0:

coverage/summary.py

 
 class SummaryReporter(Reporter):
     """A reporter for writing the summary report."""
-    
+
     def __init__(self, coverage, show_missing=True, ignore_errors=False):
         super(SummaryReporter, self).__init__(coverage, ignore_errors)
         self.show_missing = show_missing
 
     def report(self, morfs, omit_prefixes=None, outfile=None):
         """Writes a report summarizing coverage statistics per module."""
-        
+
         self.find_code_units(morfs, omit_prefixes)
 
         # Prepare the formatting strings
         outfile.write(rule)
 
         total = Numbers()
-        
+
         for cu in self.code_units:
             try:
                 analysis = self.coverage._analyze(cu)

coverage/templite.py

     """A simple template renderer, for a nano-subset of Django syntax.
 
     Supported constructs are extended variable access::
-    
+
         {{var.modifer.modifier|filter|filter}}
-        
+
     loops::
-    
+
         {% for var in list %}...{% endfor %}
-    
+
     and ifs::
-    
+
         {% if var %}...{% endif %}
 
     Comments are within curly-hash markers::
-    
+
         {# This will be ignored #}
 
     Construct a Templite with the template text, then use `render` against a
     dictionary context to create a finished string.
-    
+
     """
     def __init__(self, text, *contexts):
         """Construct a Templite with the given `text`.
-        
+
         `contexts` are dictionaries of values to use for future renderings.
         These are good for filters and global values.
-        
+
         """
         self.text = text
         self.context = {}
         for context in contexts:
             self.context.update(context)
-        
+
         # Split the text to form a list of tokens.
         toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
-        
+
         # Parse the tokens into a nested list of operations.  Each item in the
         # list is a tuple with an opcode, and arguments.  They'll be
         # interpreted by TempliteEngine.
                     raise SyntaxError("Don't understand tag %r" % words)
             else:
                 ops.append(('lit', tok))
-        
+
         assert not ops_stack, "Unmatched action tag: %r" % ops_stack[-1][0]
         self.ops = ops
 
     def render(self, context=None):
         """Render this template by applying it to `context`.
-        
+
         `context` is a dictionary of values to use in this rendering.
-        
+
         """
         # Make the complete context we'll use.
         ctx = dict(self.context)
         if context:
             ctx.update(context)
-        
+
         # Run it through an engine, and return the result.
         engine = _TempliteEngine(ctx)
         engine.execute(self.ops)
 
     def execute(self, ops):
         """Execute `ops` in the engine.
-        
+
         Called recursively for the bodies of if's and loops.
-        
+
         """
         for op, args in ops:
             if op == 'lit':
 
     def evaluate(self, expr):
         """Evaluate an expression.
-        
+
         `expr` can have pipes and dots to indicate data access and filtering.
-        
+
         """
         if "|" in expr:
             pipes = expr.split("|")

coverage/tracer.c

     PyObject * data;
     PyObject * should_trace_cache;
     PyObject * arcs;
-    
+
     /* Has the tracer been started? */
     int started;
     /* Are we tracing arcs, or just lines? */
         data for a single source file.  The data stack parallels the call stack:
         each call pushes the new frame's file data onto the data stack, and each
         return pops file data off.
-        
+
         The file data is a dictionary whose form depends on the tracing options.
         If tracing arcs, the keys are line number pairs.  If not tracing arcs,
         the keys are line numbers.  In both cases, the value is irrelevant
     self->data = NULL;
     self->should_trace_cache = NULL;
     self->arcs = NULL;
-    
+
     self->started = 0;
     self->tracing_arcs = 0;
 
     }
     self->data_stack_alloc = STACK_DELTA;
 
-    self->cur_file_data = NULL;    
+    self->cur_file_data = NULL;
     self->last_line = -1;
 
     self->last_exc_back = NULL;
 static const char *
 indent(int n)
 {
-    static const char * spaces = 
+    static const char * spaces =
         "                                                                    "