Robert Lehmann avatar Robert Lehmann committed 5c17eea Merge

Comments (0)

Files changed (83)

 * Charles Duffy -- original graphviz extension
 * Kevin Dunn -- MathJax extension
 * Josip Dzolonga -- coverage builder
+* Hernan Grecco -- search improvements
 * Horst Gutmann -- internationalization support
 * Martin Hans -- autodoc improvements
 * Doug Hellmann -- graphviz improvements
   admonition title ("See Also" instead of "See also"), and spurious indentation
   in the text builder.
 
+* sphinx-build now has a verbose option :option:`-v` which can be
+  repeated for greater effect.  A single occurrance provides a
+  slightly more verbose output than normal.  Two or more occurrences
+  of this option provides more detailed output which may be useful for
+  debugging.
+
+* sphinx-build now provides more specific error messages when called with
+  invalid options or arguments.
+
+* sphinx-build now supports the standard :option:`--help` and
+  :option:`--version` options.
+
+* #869: sphinx-build now has the option :option:`-T` for printing the full
+  traceback after an unhandled exception.
+
+* #976: Fix gettext does not extract index entries.
+
+* #940: Fix gettext does not extract figure caption.
+
+* #1067: Improve the ordering of the JavaScript search results: matches in titles
+  come before matches in full text, and object results are better categorized.
+  Also implement a pluggable search scorer.
+
+* Fix text writer can not handle visit_legend for figure directive contents.
+
 * PR#72: #975: Fix gettext does not extract definition terms before docutils 0.10.0
 
 * PR#25: In inheritance diagrams, the first line of the class docstring
 
 * #1041: Fix cpp domain parser fails to parse a const type with a modifier.
 
+* #958: Do not preserve ``environment.pickle`` after a failed build.
+
 * PR#88: Added the "Sphinx Developer's Guide" (:file:`doc/devguide.rst`)
   which outlines the basic development process of the Sphinx project.
 
 
    .. versionadded:: 1.1
 
+.. confval:: html_search_scorer
+
+   The name of a javascript file (relative to the configuration directory) that
+   implements a search results scorer.  If empty, the default will be used.
+
+   .. XXX describe interface for scorer here
+
+   .. versionadded:: 1.2
+
 .. confval:: htmlhelp_basename
 
    Output file base name for HTML help builder.  Default is ``'pydoc'``.

doc/templating.rst

 
    * ``titles_only`` (false by default): if true, put only toplevel document
      titles in the tree
+
+   * ``includehidden`` (false by default): if true, the TOC tree will also
+     contain hidden entries.

sphinx/application.py

     :license: BSD, see LICENSE for details.
 """
 
+import os
 import sys
 import types
 import posixpath
 
     def __init__(self, srcdir, confdir, outdir, doctreedir, buildername,
                  confoverrides=None, status=sys.stdout, warning=sys.stderr,
-                 freshenv=False, warningiserror=False, tags=None):
+                 freshenv=False, warningiserror=False, tags=None, verbosity=0):
+        self.verbosity = verbosity
         self.next_listener_id = 0
         self._extensions = {}
         self._listeners = {}
             else:
                 self.builder.build_update()
         except Exception, err:
+            # delete the saved env to force a fresh build next time
+            envfile = path.join(self.doctreedir, ENV_PICKLE_FILENAME)
+            if path.isfile(envfile):
+                os.unlink(envfile)
             self.emit('build-finished', err)
             raise
         else:
             self.emit('build-finished', None)
         self.builder.cleanup()
 
+    def _log(self, message, wfile, nonl=False):
+        try:
+            wfile.write(message)
+        except UnicodeEncodeError:
+            encoding = getattr(wfile, 'encoding', 'ascii') or 'ascii'
+            wfile.write(message.encode(encoding, 'replace'))
+        if not nonl:
+            wfile.write('\n')
+        if hasattr(wfile, 'flush'):
+            wfile.flush()
+
     def warn(self, message, location=None, prefix='WARNING: '):
         if isinstance(location, tuple):
             docname, lineno = location
         if self.warningiserror:
             raise SphinxWarning(warntext)
         self._warncount += 1
-        try:
-            self._warning.write(warntext)
-        except UnicodeEncodeError:
-            encoding = getattr(self._warning, 'encoding', 'ascii') or 'ascii'
-            self._warning.write(warntext.encode(encoding, 'replace'))
+        self._log(warntext, self._warning, True)
 
     def info(self, message='', nonl=False):
-        try:
-            self._status.write(message)
-        except UnicodeEncodeError:
-            encoding = getattr(self._status, 'encoding', 'ascii') or 'ascii'
-            self._status.write(message.encode(encoding, 'replace'))
-        if not nonl:
-            self._status.write('\n')
-        self._status.flush()
+        self._log(message, self._status, nonl)
+
+    def verbose(self, message, *args, **kwargs):
+        if self.verbosity < 1:
+            return
+        if args or kwargs:
+            message = message % (args or kwargs)
+        self._log(message, self._warning)
+
+    def debug(self, message, *args, **kwargs):
+        if self.verbosity < 2:
+            return
+        if args or kwargs:
+            message = message % (args or kwargs)
+        self._log(message, self._warning)
 
     # general extensibility interface
 
     def setup_extension(self, extension):
         """Import and setup a Sphinx extension module. No-op if called twice."""
+        self.debug('setting up extension: %r', extension)
         if extension in self._extensions:
             return
         try:
         else:
             self._listeners[event][listener_id] = callback
         self.next_listener_id += 1
+        self.debug('connecting event %r: %r [id=%s]',
+                   event, callback, listener_id)
         return listener_id
 
     def disconnect(self, listener_id):
+        self.debug('disconnecting event: [id=%s]', listener_id)
         for event in self._listeners.itervalues():
             event.pop(listener_id, None)
 
     # registering addon parts
 
     def add_builder(self, builder):
+        self.debug('adding builder: %r', builder)
         if not hasattr(builder, 'name'):
             raise ExtensionError('Builder class %s has no "name" attribute'
                                  % builder)
         self.builderclasses[builder.name] = builder
 
     def add_config_value(self, name, default, rebuild):
+        self.debug('adding config value: %r', (name, default, rebuild))
         if name in self.config.values:
             raise ExtensionError('Config value %r already present' % name)
         if rebuild in (False, True):
         self.config.values[name] = (default, rebuild)
 
     def add_event(self, name):
+        self.debug('adding event: %r', name)
         if name in self._events:
             raise ExtensionError('Event %r already present' % name)
         self._events[name] = ''
 
     def add_node(self, node, **kwds):
+        self.debug('adding node: %r', (node, kwds))
         nodes._add_node_class_names([node.__name__])
         for key, val in kwds.iteritems():
             try:
             return obj
 
     def add_directive(self, name, obj, content=None, arguments=None, **options):
+        self.debug('adding directive: %r',
+                   (name, obj, content, arguments, options))
         directives.register_directive(
             name, self._directive_helper(obj, content, arguments, **options))
 
     def add_role(self, name, role):
+        self.debug('adding role: %r', (name, role))
         roles.register_local_role(name, role)
 
     def add_generic_role(self, name, nodeclass):
         # don't use roles.register_generic_role because it uses
         # register_canonical_role
+        self.debug('adding generic role: %r', (name, nodeclass))
         role = roles.GenericRole(name, nodeclass)
         roles.register_local_role(name, role)
 
     def add_domain(self, domain):
+        self.debug('adding domain: %r', domain)
         if domain.name in self.domains:
             raise ExtensionError('domain %s already registered' % domain.name)
         self.domains[domain.name] = domain
 
     def override_domain(self, domain):
+        self.debug('overriding domain: %r', domain)
         if domain.name not in self.domains:
             raise ExtensionError('domain %s not yet registered' % domain.name)
         if not issubclass(domain, self.domains[domain.name]):
 
     def add_directive_to_domain(self, domain, name, obj,
                                 content=None, arguments=None, **options):
+        self.debug('adding directive to domain: %r',
+                   (domain, name, obj, content, arguments, options))
         if domain not in self.domains:
             raise ExtensionError('domain %s not yet registered' % domain)
         self.domains[domain].directives[name] = \
             self._directive_helper(obj, content, arguments, **options)
 
     def add_role_to_domain(self, domain, name, role):
+        self.debug('adding role to domain: %r', (domain, name, role))
         if domain not in self.domains:
             raise ExtensionError('domain %s not yet registered' % domain)
         self.domains[domain].roles[name] = role
 
     def add_index_to_domain(self, domain, index):
+        self.debug('adding index to domain: %r', (domain, index))
         if domain not in self.domains:
             raise ExtensionError('domain %s not yet registered' % domain)
         self.domains[domain].indices.append(index)
     def add_object_type(self, directivename, rolename, indextemplate='',
                         parse_node=None, ref_nodeclass=None, objname='',
                         doc_field_types=[]):
+        self.debug('adding object type: %r',
+                   (directivename, rolename, indextemplate, parse_node,
+                    ref_nodeclass, objname, doc_field_types))
         StandardDomain.object_types[directivename] = \
             ObjType(objname or directivename, rolename)
         # create a subclass of GenericObject as the new directive
 
     def add_crossref_type(self, directivename, rolename, indextemplate='',
                           ref_nodeclass=None, objname=''):
+        self.debug('adding crossref type: %r',
+                   (directivename, rolename, indextemplate, ref_nodeclass,
+                    objname))
         StandardDomain.object_types[directivename] = \
             ObjType(objname or directivename, rolename)
         # create a subclass of Target as the new directive
         StandardDomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass)
 
     def add_transform(self, transform):
+        self.debug('adding transform: %r', transform)
         SphinxStandaloneReader.transforms.append(transform)
 
     def add_javascript(self, filename):
+        self.debug('adding javascript: %r', filename)
         from sphinx.builders.html import StandaloneHTMLBuilder
         if '://' in filename:
             StandaloneHTMLBuilder.script_files.append(filename)
                 posixpath.join('_static', filename))
 
     def add_stylesheet(self, filename):
+        self.debug('adding stylesheet: %r', filename)
         from sphinx.builders.html import StandaloneHTMLBuilder
         if '://' in filename:
             StandaloneHTMLBuilder.css_files.append(filename)
                 posixpath.join('_static', filename))
 
     def add_lexer(self, alias, lexer):
+        self.debug('adding lexer: %r', (alias, lexer))
         from sphinx.highlighting import lexers
         if lexers is None:
             return
         lexers[alias] = lexer
 
     def add_autodocumenter(self, cls):
+        self.debug('adding autodocumenter: %r', cls)
         from sphinx.ext import autodoc
         autodoc.add_documenter(cls)
         self.add_directive('auto' + cls.objtype, autodoc.AutoDirective)
 
     def add_autodoc_attrgetter(self, type, getter):
+        self.debug('adding autodoc attrgetter: %r', (type, getter))
         from sphinx.ext import autodoc
         autodoc.AutoDirective._special_attrgetters[type] = getter
 
     def add_search_language(self, cls):
+        self.debug('adding search language: %r', cls)
         from sphinx.search import languages, SearchLanguage
         assert isinstance(cls, SearchLanguage)
         languages[cls.lang] = cls

sphinx/builders/__init__.py

         summary = bold(summary)
         for item in iterable:
             l += 1
-            self.info(term_width_line('%s[%3d%%] %s' %
-                                      (summary, 100*l/length,
-                                       colorfunc(item))), nonl=1)
+            s = '%s[%3d%%] %s' % (summary, 100*l/length,
+                                  colorfunc(item))
+            if self.app.verbosity:
+                s += '\n'
+            else:
+                s = term_width_line(s)
+            self.info(s, nonl=1)
             yield item
         if l > 0:
             self.info()

sphinx/builders/gettext.py

 from collections import defaultdict
 
 from sphinx.builders import Builder
-from sphinx.util.nodes import extract_messages
+from sphinx.util import split_index_msg
+from sphinx.util.nodes import extract_messages, traverse_translatable_index
 from sphinx.util.osutil import SEP, safe_relpath, ensuredir, find_catalog
 from sphinx.util.console import darkgreen
+from sphinx.locale import pairindextypes
 
 POHEADER = ur"""
 # SOME DESCRIPTIVE TITLE.
         for node, msg in extract_messages(doctree):
             catalog.add(msg, node)
 
+        # Extract translatable messages from index entries.
+        for node, entries in traverse_translatable_index(doctree):
+            for typ, msg, tid, main in entries:
+                for m in split_index_msg(typ, msg):
+                    if typ == 'pair' and m in pairindextypes.values():
+                        # avoid built-in translated message was incorporated
+                        # in 'sphinx.util.nodes.process_index_entry'
+                        continue
+                    catalog.add(m, node)
+
 
 class MessageCatalogBuilder(I18nBuilder):
     """

sphinx/builders/html.py

         if not lang or lang not in languages:
             lang = 'en'
         self.indexer = IndexBuilder(self.env, lang,
-                                    self.config.html_search_options)
+                                    self.config.html_search_options,
+                                    self.config.html_search_scorer)
         self.load_indexer(docnames)
 
         self.docwriter = HTMLWriter(self)
             self.indexer.feed(pagename, title, doctree)
 
     def _get_local_toctree(self, docname, collapse=True, **kwds):
+        if 'includehidden' not in kwds:
+            kwds['includehidden'] = False
         return self.render_partial(self.env.get_toctree_for(
             docname, self, collapse, **kwds))['fragment']
 

sphinx/cmdline.py

          -w <file> -- write warnings (and errors) to given file
          -W        -- turn warnings into errors
          -P        -- run Pdb on exception
+         -T        -- show full traceback on exception
+         -v        -- increase verbosity (can be repeated)
+        --help     -- show this help and exit
+        --version  -- show version information and exit
 Modi:
 * without -a and without filenames, write new and changed files.
 * with -a, write all files.
         nocolor()
 
     try:
-        opts, args = getopt.getopt(argv[1:], 'ab:t:d:c:CD:A:ng:NEqQWw:P')
+        opts, args = getopt.getopt(argv[1:], 'ab:t:d:c:CD:A:ng:NEqQWw:PThv',
+                                   ['help', 'version'])
         allopts = set(opt[0] for opt in opts)
+        if '-h' in allopts or '--help' in allopts:
+            usage(argv)
+            return 0
+        if '--version' in allopts:
+            print 'Sphinx (sphinx-build) %s' %  __version__
+            return 0
         srcdir = confdir = abspath(args[0])
         if not path.isdir(srcdir):
             print >>sys.stderr, 'Error: Cannot find source directory `%s\'.' % (
         if not path.isdir(outdir):
             print >>sys.stderr, 'Making output directory...'
             os.makedirs(outdir)
-    except (IndexError, getopt.error):
-        usage(argv)
+    except getopt.error, err:
+        usage(argv, 'Error: %s' % err)
+        return 1
+    except IndexError:
+        usage(argv, 'Error: Insufficient arguments.')
         return 1
 
     filenames = args[2:]
     err = 0
     for filename in filenames:
         if not path.isfile(filename):
-            print >>sys.stderr, 'Cannot find file %r.' % filename
+            print >>sys.stderr, 'Error: Cannot find file %r.' % filename
             err = 1
     if err:
         return 1
 
     buildername = None
     force_all = freshenv = warningiserror = use_pdb = False
+    show_traceback = False
+    verbosity = 0
     status = sys.stdout
     warning = sys.stderr
     error = sys.stderr
             buildername = val
         elif opt == '-a':
             if filenames:
-                usage(argv, 'Cannot combine -a option and filenames.')
+                usage(argv, 'Error: Cannot combine -a option and filenames.')
                 return 1
             force_all = True
         elif opt == '-t':
             warnfile = val
         elif opt == '-P':
             use_pdb = True
+        elif opt == '-T':
+            show_traceback = True
+        elif opt == '-v':
+            verbosity += 1
+            show_traceback = True
 
     if warning and warnfile:
         warnfp = open(warnfile, 'w')
     try:
         app = Sphinx(srcdir, confdir, outdir, doctreedir, buildername,
                      confoverrides, status, warning, freshenv,
-                     warningiserror, tags)
+                     warningiserror, tags, verbosity)
         app.build(force_all, filenames)
         return app.statuscode
-    except KeyboardInterrupt:
-        if use_pdb:
-            import pdb
-            print >>error, red('Interrupted while building, starting debugger:')
-            traceback.print_exc()
-            pdb.post_mortem(sys.exc_info()[2])
-        return 1
-    except Exception, err:
+    except (Exception, KeyboardInterrupt), err:
         if use_pdb:
             import pdb
             print >>error, red('Exception occurred while building, '
             pdb.post_mortem(sys.exc_info()[2])
         else:
             print >>error
-            if isinstance(err, SystemMessage):
+            if show_traceback:
+                traceback.print_exc(None, error)
+                print >>error
+            if isinstance(err, KeyboardInterrupt):
+                print >>error, 'interrupted!'
+            elif isinstance(err, SystemMessage):
                 print >>error, red('reST markup error:')
                 print >>error, terminal_safe(err.args[0])
             elif isinstance(err, SphinxError):
         html_secnumber_suffix = ('. ', 'html'),
         html_search_language = (None, 'html'),
         html_search_options = ({}, 'html'),
+        html_search_scorer = ('', None),
 
         # HTML help only options
         htmlhelp_basename = (lambda self: make_filename(self.project), None),

sphinx/directives/other.py

         indexnode = addnodes.index()
         indexnode['entries'] = ne = []
         indexnode['inline'] = False
+        set_source_info(self, indexnode)
         for entry in arguments:
             ne.extend(process_index_entry(entry, targetid))
         return [indexnode, targetnode]

sphinx/environment.py

 
 from sphinx import addnodes
 from sphinx.util import url_re, get_matching_docs, docname_join, split_into, \
-     FilenameUniqDict
+     split_index_msg, FilenameUniqDict
 from sphinx.util.nodes import clean_astext, make_refnode, extract_messages, \
-     WarningStream
+     traverse_translatable_index, WarningStream
 from sphinx.util.osutil import movefile, SEP, ustrftime, find_catalog, \
      fs_encoding
 from sphinx.util.matching import compile_matchers
 
 # This is increased every time an environment attribute is added
 # or changed to properly invalidate pickle files.
-ENV_VERSION = 41
+ENV_VERSION = 42
 
 
 default_substitutions = set([
                 child.parent = node
             node.children = patch.children
 
+        # Extract and translate messages for index entries.
+        for node, entries in traverse_translatable_index(self.document):
+            new_entries = []
+            for type, msg, tid, main in entries:
+                msg_parts = split_index_msg(type, msg)
+                msgstr_parts = []
+                for part in msg_parts:
+                    msgstr = catalog.gettext(part)
+                    if not msgstr:
+                        msgstr = part
+                    msgstr_parts.append(msgstr)
+
+                new_entries.append((type, ';'.join(msgstr_parts), tid, main))
+
+            node['raw_entries'] = entries
+            node['entries'] = new_entries
+
 
 class SphinxStandaloneReader(standalone.Reader):
     """
         del self.config.values
         domains = self.domains
         del self.domains
-        # first write to a temporary file, so that if dumping fails,
-        # the existing environment won't be overwritten
-        picklefile = open(filename + '.tmp', 'wb')
+        picklefile = open(filename, 'wb')
         # remove potentially pickling-problematic values from config
         for key, val in vars(self.config).items():
             if key.startswith('_') or \
             pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
         finally:
             picklefile.close()
-        movefile(filename + '.tmp', filename)
         # reset attributes
         self.domains = domains
         self.config.values = values
         filterlevel = self.config.keep_warnings and 2 or 5
         for node in doctree.traverse(nodes.system_message):
             if node['level'] < filterlevel:
+                self.app.debug('%s [filtered system message]', node.astext())
                 node.parent.remove(node)
 
 
         if toctree.get('hidden', False) and not includehidden:
             return None
 
-        def _walk_depth(node, depth, maxdepth):
+        # For reading the following two helper function, it is useful to keep
+        # in mind the node structure of a toctree (using HTML-like node names
+        # for brevity):
+        #
+        # <ul>
+        #   <li>
+        #     <p><a></p>
+        #     <p><a></p>
+        #     ...
+        #     <ul>
+        #       ...
+        #     </ul>
+        #   </li>
+        # </ul>
+        #
+        # The transformation is made in two passes in order to avoid
+        # interactions between marking and pruning the tree (see bug #1046).
+
+        def _toctree_prune(node, depth, maxdepth):
             """Utility: Cut a TOC at a specified depth."""
-
-            # For reading this function, it is useful to keep in mind the node
-            # structure of a toctree (using HTML-like node names for brevity):
-            #
-            # <ul>
-            #   <li>
-            #     <p><a></p>
-            #     <p><a></p>
-            #     ...
-            #     <ul>
-            #       ...
-            #     </ul>
-            #   </li>
-            # </ul>
-
             for subnode in node.children[:]:
                 if isinstance(subnode, (addnodes.compact_paragraph,
                                         nodes.list_item)):
-                    # for <p> and <li>, just indicate the depth level and
-                    # recurse to children
-                    subnode['classes'].append('toctree-l%d' % (depth-1))
-                    _walk_depth(subnode, depth, maxdepth)
-
+                    # for <p> and <li>, just recurse
+                    _toctree_prune(subnode, depth, maxdepth)
                 elif isinstance(subnode, nodes.bullet_list):
                     # for <ul>, determine if the depth is too large or if the
                     # entry is to be collapsed
                     if maxdepth > 0 and depth > maxdepth:
                         subnode.parent.replace(subnode, [])
                     else:
-                        # to find out what to collapse, *first* walk subitems,
-                        # since that determines which children point to the
-                        # current page
-                        _walk_depth(subnode, depth+1, maxdepth)
                         # cull sub-entries whose parents aren't 'current'
                         if (collapse and depth > 1 and
                             'iscurrent' not in subnode.parent):
                             subnode.parent.remove(subnode)
+                        else:
+                            # recurse on visible children
+                            _toctree_prune(subnode, depth+1, maxdepth)
 
+        def _toctree_add_classes(node, depth):
+            """Add 'toctree-l%d' and 'current' classes to the toctree."""
+            for subnode in node.children:
+                if isinstance(subnode, (addnodes.compact_paragraph,
+                                        nodes.list_item)):
+                    # for <p> and <li>, indicate the depth level and recurse
+                    subnode['classes'].append('toctree-l%d' % (depth-1))
+                    _toctree_add_classes(subnode, depth)
+                elif isinstance(subnode, nodes.bullet_list):
+                    # for <ul>, just recurse
+                    _toctree_add_classes(subnode, depth+1)
                 elif isinstance(subnode, nodes.reference):
                     # for <a>, identify which entries point to the current
                     # document and therefore may not be collapsed
         newnode = addnodes.compact_paragraph('', '', *tocentries)
         newnode['toctree'] = True
 
-        # prune the tree to maxdepth and replace titles, also set level classes
-        _walk_depth(newnode, 1, prune and maxdepth or 0)
+        # prune the tree to maxdepth, also set toc depth and current classes
+        _toctree_add_classes(newnode, 1)
+        _toctree_prune(newnode, 1, prune and maxdepth or 0)
 
         # set the target paths in the toctrees (they are not known at TOC
         # generation time)

sphinx/ext/autodoc.py

 
         Returns True if successful, False if an error occurred.
         """
+        if self.objpath:
+            self.env.app.debug('autodoc: from %s import %s',
+                               self.modname, '.'.join(self.objpath))
         try:
+            self.env.app.debug('autodoc: import %s', self.modname)
             __import__(self.modname)
             parent = None
             obj = self.module = sys.modules[self.modname]
+            self.env.app.debug('autodoc: => %r', obj)
             for part in self.objpath:
                 parent = obj
+                self.env.app.debug('autodoc: getattr(_, %r)', part)
                 obj = self.get_attr(obj, part)
+                self.env.app.debug('autodoc: => %r', obj)
                 self.object_name = part
             self.parent = parent
             self.object = obj
         # this used to only catch SyntaxError, ImportError and AttributeError,
         # but importing modules with side effects can raise all kinds of errors
         except Exception, err:
-            if self.env.app and not self.env.app.quiet:
-                self.env.app.info(traceback.format_exc().rstrip())
-            self.directive.warn(
-                'autodoc can\'t import/find %s %r, it reported error: '
-                '"%s", please check your spelling and sys.path' %
-                (self.objtype, str(self.fullname), err))
+            if self.objpath:
+                errmsg = 'autodoc: failed to import %s %r from module %r' % \
+                         (self.objtype, '.'.join(self.objpath), self.modname)
+            else:
+                errmsg = 'autodoc: failed to import %s %r' % \
+                         (self.objtype, self.fullname)
+            errmsg += '; the following exception was raised:\n%s' % \
+                      traceback.format_exc()
+            self.env.app.debug(errmsg)
+            self.directive.warn(errmsg)
             self.env.note_reread()
             return False
 
         self.warnings = []
         self.result = ViewList()
 
+        try:
+            source, lineno = self.reporter.get_source_and_line(self.lineno)
+        except AttributeError:
+            source = lineno = None
+        self.env.app.debug('%s:%s: <input>\n%s',
+                           source, lineno, self.block_text)
+
         # find out what documenter to call
         objtype = self.name[4:]
         doc_class = self._registry[objtype]
         if not self.result:
             return self.warnings
 
+        if self.env.app.verbosity >= 2:
+            self.env.app.debug('autodoc: <output>\n%s', '\n'.join(self.result))
+
         # record all filenames as dependencies -- this will at least
         # partially make automatic invalidation possible
         for fn in self.filename_set:
         entries = [('single', target, targetid, main)]
     indexnode = addnodes.index()
     indexnode['entries'] = entries
+    set_role_source_info(inliner, lineno, indexnode)
     textnode = nodes.Text(title, title)
     return [indexnode, targetnode, textnode], []
 

sphinx/search/__init__.py

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
+from __future__ import with_statement
 import re
+import itertools
 import cPickle as pickle
 
-from docutils.nodes import comment, Text, NodeVisitor, SkipNode
+from docutils.nodes import comment, title, Text, NodeVisitor, SkipNode
 
 from sphinx.util import jsdump, rpartition
 
             (ord(word[0]) < 256 and (len(word) < 3 or word in self.stopwords or
                                      word.isdigit())))
 
+
 from sphinx.search import en, ja
 
 languages = {
     def __init__(self, document, lang):
         NodeVisitor.__init__(self, document)
         self.found_words = []
+        self.found_title_words = []
         self.lang = lang
 
     def dispatch_visit(self, node):
         if node.__class__ is comment:
             raise SkipNode
-        if node.__class__ is Text:
+        elif node.__class__ is Text:
             self.found_words.extend(self.lang.split(node.astext()))
+        elif node.__class__ is title:
+            self.found_title_words.extend(self.lang.split(node.astext()))
 
 
 class IndexBuilder(object):
         'pickle':   pickle
     }
 
-    def __init__(self, env, lang, options):
+    def __init__(self, env, lang, options, scoring):
         self.env = env
         # filename -> title
         self._titles = {}
         # stemmed word -> set(filenames)
         self._mapping = {}
+        # stemmed words in titles -> set(filenames)
+        self._title_mapping = {}
         # objtype -> index
         self._objtypes = {}
         # objtype index -> (domain, type, objname (localized))
         # add language-specific SearchLanguage instance
         self.lang = languages[lang](options)
 
+        if scoring:
+            with open(scoring, 'rb') as fp:
+                self.js_scorer_code = fp.read().decode('utf-8')
+        else:
+            self.js_scorer_code = u''
+
     def load(self, stream, format):
         """Reconstruct from frozen data."""
         if isinstance(format, basestring):
             raise ValueError('old format')
         index2fn = frozen['filenames']
         self._titles = dict(zip(index2fn, frozen['titles']))
-        self._mapping = {}
-        for k, v in frozen['terms'].iteritems():
-            if isinstance(v, int):
-                self._mapping[k] = set([index2fn[v]])
-            else:
-                self._mapping[k] = set(index2fn[i] for i in v)
+
+        def load_terms(mapping):
+            rv = {}
+            for k, v in mapping.iteritems():
+                if isinstance(v, int):
+                    rv[k] = set([index2fn[v]])
+                else:
+                    rv[k] = set(index2fn[i] for i in v)
+            return rv
+
+        self._mapping = load_terms(frozen['terms'])
+        self._title_mapping = load_terms(frozen['titleterms'])
         # no need to load keywords/objtypes
 
     def dump(self, stream, format):
         return rv
 
     def get_terms(self, fn2index):
-        rv = {}
-        for k, v in self._mapping.iteritems():
-            if len(v) == 1:
-                fn, = v
-                if fn in fn2index:
-                    rv[k] = fn2index[fn]
-            else:
-                rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
-        return rv
+        rvs = {}, {}
+        for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
+            for k, v in mapping.iteritems():
+                if len(v) == 1:
+                    fn, = v
+                    if fn in fn2index:
+                        rv[k] = fn2index[fn]
+                else:
+                    rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
+        return rvs
 
     def freeze(self):
         """Create a usable data structure for serializing."""
         filenames = self._titles.keys()
         titles = self._titles.values()
         fn2index = dict((f, i) for (i, f) in enumerate(filenames))
-        terms = self.get_terms(fn2index)
+        terms, title_terms = self.get_terms(fn2index)
+
         objects = self.get_objects(fn2index)  # populates _objtypes
         objtypes = dict((v, k[0] + ':' + k[1])
                         for (k, v) in self._objtypes.iteritems())
         objnames = self._objnames
         return dict(filenames=filenames, titles=titles, terms=terms,
-                    objects=objects, objtypes=objtypes, objnames=objnames)
+                    objects=objects, objtypes=objtypes, objnames=objnames,
+                    titleterms=title_terms)
 
     def prune(self, filenames):
         """Remove data for all filenames not in the list."""
         self._titles = new_titles
         for wordnames in self._mapping.itervalues():
             wordnames.intersection_update(filenames)
+        for wordnames in self._title_mapping.itervalues():
+            wordnames.intersection_update(filenames)
 
     def feed(self, filename, title, doctree):
         """Feed a doctree to the index."""
         visitor = WordCollector(doctree, self.lang)
         doctree.walk(visitor)
 
-        def add_term(word, stem=self.lang.stem):
+        stem = self.lang.stem
+        _filter =  self.lang.word_filter
+
+        for word in itertools.chain(visitor.found_title_words,
+                                    self.lang.split(title)):
             word = stem(word)
-            if self.lang.word_filter(word):
-                self._mapping.setdefault(word, set()).add(filename)
-
-        for word in self.lang.split(title):
-            add_term(word)
+            if _filter(word):
+                self._title_mapping.setdefault(word, set()).add(filename)
 
         for word in visitor.found_words:
-            add_term(word)
+            word = stem(word)
+            if word not in self._title_mapping and _filter(word):
+                self._mapping.setdefault(word, set()).add(filename)
 
     def context_for_searchtool(self):
         return dict(
             search_language_stemming_code = self.lang.js_stemmer_code,
             search_language_stop_words = jsdump.dumps(sorted(self.lang.stopwords)),
+            search_scorer_tool = self.js_scorer_code,
         )

sphinx/themes/agogo/layout.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "basic/layout.html" %}
+{%- extends "basic/layout.html" %}
 
 {% block header %}
     <div class="header-wrapper">

sphinx/themes/basic/defindex.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "layout.html" %}
+{%- extends "layout.html" %}
 {% set title = _('Overview') %}
 {% block body %}
   <h1>{{ docstitle|e }}</h1>

sphinx/themes/basic/domainindex.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "layout.html" %}
+{%- extends "layout.html" %}
 {% set title = indextitle %}
 {% block extrahead %}
 {{ super() }}

sphinx/themes/basic/genindex-single.html

   </dt>
 {% endmacro %}
 
-{% extends "layout.html" %}
+{%- extends "layout.html" %}
 {% set title = _('Index') %}
 {% block body %}
 

sphinx/themes/basic/genindex-split.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "layout.html" %}
+{%- extends "layout.html" %}
 {% set title = _('Index') %}
 {% block body %}
 

sphinx/themes/basic/genindex.html

   </dt>
 {% endmacro %}
 
-{% extends "layout.html" %}
+{%- extends "layout.html" %}
 {% set title = _('Index') %}
 {% block body %}
 

sphinx/themes/basic/page.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "layout.html" %}
+{%- extends "layout.html" %}
 {% block body %}
   {{ body }}
 {% endblock %}

sphinx/themes/basic/search.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "layout.html" %}
+{%- extends "layout.html" %}
 {% set title = _('Search') %}
 {% set script_files = script_files + ['_static/searchtools.js'] %}
 {% block extrahead %}

sphinx/themes/basic/static/doctools.js

  */
 jQuery.urldecode = function(x) {
   return decodeURIComponent(x).replace(/\+/g, ' ');
-}
+};
 
 /**
  * small helper function to urlencode strings

sphinx/themes/basic/static/searchtools.js_t

  *
  */
 
+{{ search_language_stemming_code|safe }}
+
+{% if search_scorer_tool %}
+{{ search_scorer_tool|safe }}
+{% else %}
 /**
- * helper function to return a node containing the
- * search summary for a given text. keywords is a list
- * of stemmed words, hlwords is the list of normal, unstemmed
- * words. the first one is used to find the occurance, the
- * latter for highlighting it.
+ * Simple result scoring code.
  */
+var Scorer = {
+  // Implement the following function to further tweak the score for each result
+  // The function takes a result array [filename, title, anchor, descr, score]
+  // and returns the new score.
+  /*
+  score: function(result) {
+    return result[4];
+  },
+  */
 
-jQuery.makeSearchSummary = function(text, keywords, hlwords) {
-  var textLower = text.toLowerCase();
-  var start = 0;
-  $.each(keywords, function() {
-    var i = textLower.indexOf(this.toLowerCase());
-    if (i > -1)
-      start = i;
-  });
-  start = Math.max(start - 120, 0);
-  var excerpt = ((start > 0) ? '...' : '') +
-  $.trim(text.substr(start, 240)) +
-  ((start + 240 - text.length) ? '...' : '');
-  var rv = $('<div class="context"></div>').text(excerpt);
-  $.each(hlwords, function() {
-    rv = rv.highlightText(this, 'highlighted');
-  });
-  return rv;
-}
+  // query matches the full name of an object
+  objNameMatch: 11,
+  // or matches in the last dotted part of the object name
+  objPartialMatch: 6,
+  // Additive scores depending on the priority of the object
+  objPrio: {0:  15,   // used to be importantResults
+            1:  5,   // used to be objectResults
+            2: -5},  // used to be unimportantResults
+  //  Used when the priority is not in the mapping.
+  objPrioDefault: 0,
 
-{{ search_language_stemming_code|safe }}
+  // query found in title
+  title: 15,
+  // query found in terms
+  term: 5
+};
+{% endif %}
 
 /**
  * Search Module
     if (this._pulse_status >= 0)
         return;
     function pulse() {
+      var i;
       Search._pulse_status = (Search._pulse_status + 1) % 4;
       var dotString = '';
-      for (var i = 0; i < Search._pulse_status; i++)
+      for (i = 0; i < Search._pulse_status; i++)
         dotString += '.';
       Search.dots.text(dotString);
       if (Search._pulse_status > -1)
         window.setTimeout(pulse, 500);
-    };
+    }
     pulse();
   },
 
   /**
-   * perform a search for something
+   * perform a search for something (or wait until index is loaded)
    */
   performSearch : function(query) {
     // create the required interface elements
       this.deferQuery(query);
   },
 
+  /**
+   * execute search (requires search index to be loaded)
+   */
   query : function(query) {
+    var i;
     var stopwords = {{ search_language_stop_words }};
 
-    // Stem the searchterms and add them to the correct list
+    // stem the searchterms and add them to the correct list
     var stemmer = new Stemmer();
     var searchterms = [];
     var excluded = [];
     var hlterms = [];
     var tmp = query.split(/\s+/);
     var objectterms = [];
-    for (var i = 0; i < tmp.length; i++) {
-      if (tmp[i] != "") {
+    for (i = 0; i < tmp.length; i++) {
+      if (tmp[i] !== "") {
           objectterms.push(tmp[i].toLowerCase());
       }
 
       if ($u.indexOf(stopwords, tmp[i]) != -1 || tmp[i].match(/^\d+$/) ||
-          tmp[i] == "") {
+          tmp[i] === "") {
         // skip this "word"
         continue;
       }
       // stem the word
       var word = stemmer.stemWord(tmp[i]).toLowerCase();
+      var toAppend;
       // select the correct list
       if (word[0] == '-') {
-        var toAppend = excluded;
+        toAppend = excluded;
         word = word.substr(1);
       }
       else {
-        var toAppend = searchterms;
+        toAppend = searchterms;
         hlterms.push(tmp[i].toLowerCase());
       }
       // only add if not already in the list
       if (!$u.contains(toAppend, word))
         toAppend.push(word);
-    };
+    }
     var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
 
     // console.debug('SEARCH: searching for:');
     // console.info('excluded: ', excluded);
 
     // prepare search
-    var filenames = this._index.filenames;
-    var titles = this._index.titles;
     var terms = this._index.terms;
-    var fileMap = {};
-    var files = null;
-    // different result priorities
-    var importantResults = [];
-    var objectResults = [];
-    var regularResults = [];
-    var unimportantResults = [];
+    var titleterms = this._index.titleterms;
+
+    // array of [filename, title, anchor, descr, score]
+    var results = [];
     $('#search-progress').empty();
 
     // lookup as object
-    for (var i = 0; i < objectterms.length; i++) {
-      var others = [].concat(objectterms.slice(0,i),
-                             objectterms.slice(i+1, objectterms.length))
-      var results = this.performObjectSearch(objectterms[i], others);
-      // Assume first word is most likely to be the object,
-      // other words more likely to be in description.
-      // Therefore put matches for earlier words first.
-      // (Results are eventually used in reverse order).
-      objectResults = results[0].concat(objectResults);
-      importantResults = results[1].concat(importantResults);
-      unimportantResults = results[2].concat(unimportantResults);
+    for (i = 0; i < objectterms.length; i++) {
+      var others = [].concat(objectterms.slice(0, i),
+                             objectterms.slice(i+1, objectterms.length));
+      results = results.concat(this.performObjectSearch(objectterms[i], others));
     }
 
-    // perform the search on the required terms
-    for (var i = 0; i < searchterms.length; i++) {
-      var word = searchterms[i];
-      // no match but word was a required one
-      if ((files = terms[word]) == null)
-        break;
-      if (files.length == undefined) {
-        files = [files];
-      }
-      // create the mapping
-      for (var j = 0; j < files.length; j++) {
-        var file = files[j];
-        if (file in fileMap)
-          fileMap[file].push(word);
-        else
-          fileMap[file] = [word];
-      }
+    // lookup as search terms in fulltext
+    results = results.concat(this.performTermsSearch(searchterms, excluded, terms, Scorer.term))
+                     .concat(this.performTermsSearch(searchterms, excluded, titleterms, Scorer.title));
+
+    // let the scorer override scores with a custom scoring function
+    if (Scorer.score) {
+      for (i = 0; i < results.length; i++)
+        results[i][4] = Scorer.score(results[i]);
     }
 
-    // now check if the files don't contain excluded terms
-    for (var file in fileMap) {
-      var valid = true;
-
-      // check if all requirements are matched
-      if (fileMap[file].length != searchterms.length)
-        continue;
-
-      // ensure that none of the excluded terms is in the
-      // search result.
-      for (var i = 0; i < excluded.length; i++) {
-        if (terms[excluded[i]] == file ||
-            $u.contains(terms[excluded[i]] || [], file)) {
-          valid = false;
-          break;
-        }
+    // now sort the results by score (in opposite order of appearance, since the
+    // display function below uses pop() to retrieve items) and then
+    // alphabetically
+    results.sort(function(a, b) {
+      var left = a[4];
+      var right = b[4];
+      if (left > right) {
+        return 1;
+      } else if (left < right) {
+        return -1;
+      } else {
+        // same score: sort alphabetically
+        left = a[1].toLowerCase();
+        right = b[1].toLowerCase();
+        return (left > right) ? -1 : ((left < right) ? 1 : 0);
       }
-
-      // if we have still a valid result we can add it
-      // to the result list
-      if (valid)
-        regularResults.push([filenames[file], titles[file], '', null]);
-    }
-
-    // delete unused variables in order to not waste
-    // memory until list is retrieved completely
-    delete filenames, titles, terms;
-
-    // now sort the regular results descending by title
-    regularResults.sort(function(a, b) {
-      var left = a[1].toLowerCase();
-      var right = b[1].toLowerCase();
-      return (left > right) ? -1 : ((left < right) ? 1 : 0);
     });
 
-    // combine all results
-    var results = unimportantResults.concat(regularResults)
-      .concat(objectResults).concat(importantResults);
+    // for debugging
+    //Search.lastresults = results.slice();  // a copy
+    //console.info('search results:', Search.lastresults);
 
     // print the results
     var resultCount = results.length;
       if (results.length) {
         var item = results.pop();
         var listItem = $('<li style="display:none"></li>');
-        if (DOCUMENTATION_OPTIONS.FILE_SUFFIX == '') {
+        if (DOCUMENTATION_OPTIONS.FILE_SUFFIX === '') {
           // dirhtml builder
           var dirname = item[0] + '/';
           if (dirname.match(/\/index\/$/)) {
         } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
           $.get(DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' +
                 item[0] + '.txt', function(data) {
-            if (data != '') {
-              listItem.append($.makeSearchSummary(data, searchterms, hlterms));
+            if (data !== '') {
+              listItem.append(Search.makeSearchSummary(data, searchterms, hlterms));
               Search.output.append(listItem);
             }
             listItem.slideDown(5, function() {
     displayNextItem();
   },
 
+  /**
+   * search for object names
+   */
   performObjectSearch : function(object, otherterms) {
     var filenames = this._index.filenames;
     var objects = this._index.objects;
     var objnames = this._index.objnames;
     var titles = this._index.titles;
 
-    var importantResults = [];
-    var objectResults = [];
-    var unimportantResults = [];
+    var i;
+    var results = [];
 
     for (var prefix in objects) {
       for (var name in objects[prefix]) {
         var fullname = (prefix ? prefix + '.' : '') + name;
         if (fullname.toLowerCase().indexOf(object) > -1) {
+          var score = 0;
+          var parts = fullname.split('.');
+          // check for different match types: exact matches of full name or
+          // "last name" (i.e. last dotted part)
+          if (fullname == object || parts[parts.length - 1] == object) {
+            score += Scorer.objNameMatch;
+          // matches in last name
+          } else if (parts[parts.length - 1].indexOf(object) > -1) {
+            score += Scorer.objPartialMatch;
+          }
           var match = objects[prefix][name];
           var objname = objnames[match[1]][2];
           var title = titles[match[0]];
             var haystack = (prefix + ' ' + name + ' ' +
                             objname + ' ' + title).toLowerCase();
             var allfound = true;
-            for (var i = 0; i < otherterms.length; i++) {
+            for (i = 0; i < otherterms.length; i++) {
               if (haystack.indexOf(otherterms[i]) == -1) {
                 allfound = false;
                 break;
             }
           }
           var descr = objname + _(', in ') + title;
-          anchor = match[3];
-          if (anchor == '')
+
+          var anchor = match[3];
+          if (anchor === '')
             anchor = fullname;
           else if (anchor == '-')
             anchor = objnames[match[1]][1] + '-' + fullname;
-          result = [filenames[match[0]], fullname, '#'+anchor, descr];
-          switch (match[2]) {
-          case 1: objectResults.push(result); break;
-          case 0: importantResults.push(result); break;
-          case 2: unimportantResults.push(result); break;
+          // add custom score for some objects according to scorer
+          if (Scorer.objPrio.hasOwnProperty(match[2])) {
+            score += Scorer.objPrio[match[2]];
+          } else {
+            score += Scorer.objPrioDefault;
           }
+          results.push([filenames[match[0]], fullname, '#'+anchor, descr, score]);
         }
       }
     }
 
-    // sort results descending
-    objectResults.sort(function(a, b) {
-      return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
+    return results;
+  },
+
+  /**
+   * search for full-text terms in the index
+   */
+  performTermsSearch : function(searchterms, excluded, terms, score) {
+    var filenames = this._index.filenames;
+    var titles = this._index.titles;
+
+    var i, j, file, files;
+    var fileMap = {};
+    var results = [];
+
+    // perform the search on the required terms
+    for (i = 0; i < searchterms.length; i++) {
+      var word = searchterms[i];
+      // no match but word was a required one
+      if ((files = terms[word]) === null)
+        break;
+      if (files.length === undefined) {
+        files = [files];
+      }
+      // create the mapping
+      for (j = 0; j < files.length; j++) {
+        file = files[j];
+        if (file in fileMap)
+          fileMap[file].push(word);
+        else
+          fileMap[file] = [word];
+      }
+    }
+
+    // now check if the files don't contain excluded terms
+    for (file in fileMap) {
+      var valid = true;
+
+      // check if all requirements are matched
+      if (fileMap[file].length != searchterms.length)
+          continue;
+
+      // ensure that none of the excluded terms is in the search result
+      for (i = 0; i < excluded.length; i++) {
+        if (terms[excluded[i]] == file ||
+          $u.contains(terms[excluded[i]] || [], file)) {
+          valid = false;
+          break;
+        }
+      }
+
+      // if we have still a valid result we can add it to the result list
+      if (valid) {
+        results.push([filenames[file], titles[file], '', null, score]);
+      }
+    }
+    return results;
+  },
+
+  /**
+   * helper function to return a node containing the
+   * search summary for a given text. keywords is a list
+   * of stemmed words, hlwords is the list of normal, unstemmed
+   * words. the first one is used to find the occurance, the
+   * latter for highlighting it.
+   */
+  makeSearchSummary : function(text, keywords, hlwords) {
+    var textLower = text.toLowerCase();
+    var start = 0;
+    $.each(keywords, function() {
+      var i = textLower.indexOf(this.toLowerCase());
+      if (i > -1)
+        start = i;
     });
-
-    importantResults.sort(function(a, b) {
-      return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
+    start = Math.max(start - 120, 0);
+    var excerpt = ((start > 0) ? '...' : '') +
+      $.trim(text.substr(start, 240)) +
+      ((start + 240 - text.length) ? '...' : '');
+    var rv = $('<div class="context"></div>').text(excerpt);
+    $.each(hlwords, function() {
+      rv = rv.highlightText(this, 'highlighted');
     });
-
-    unimportantResults.sort(function(a, b) {
-      return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
-    });
-
-    return [importantResults, objectResults, unimportantResults]
+    return rv;
   }
-}
+};
 
 $(document).ready(function() {
   Search.init();

sphinx/themes/default/layout.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "basic/layout.html" %}
+{%- extends "basic/layout.html" %}
 
 {% if theme_collapsiblesidebar|tobool %}
 {% set script_files = script_files + ['_static/sidebar.js'] %}

sphinx/themes/epub/epub-cover.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "layout.html" %}
+{%- extends "layout.html" %}
 {%- block header %}{% endblock %}
 {%- block rootrellink %}{% endblock %}
 {%- block relbaritems %}{% endblock %}

sphinx/themes/epub/layout.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "basic/layout.html" %}
+{%- extends "basic/layout.html" %}
 
 {# add only basic navigation links #}
 {% block sidebar1 %}{% endblock %}

sphinx/themes/haiku/layout.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "basic/layout.html" %}
+{%- extends "basic/layout.html" %}
 {% set script_files = script_files + ['_static/theme_extras.js'] %}
 {% set css_files = css_files + ['_static/print.css'] %}
 

sphinx/themes/pyramid/layout.html

-{% extends "basic/layout.html" %}
+{%- extends "basic/layout.html" %}
 
 {%- block extrahead %}
 <link rel="stylesheet" href="http://fonts.googleapis.com/css?family=Neuton&amp;subset=latin" type="text/css" media="screen" charset="utf-8" />

sphinx/themes/scrolls/layout.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "basic/layout.html" %}
+{%- extends "basic/layout.html" %}
 {% set script_files = script_files + ['_static/theme_extras.js'] %}
 {% set css_files = css_files + ['_static/print.css'] %}
 {# do not display relbars #}

sphinx/themes/sphinxdoc/layout.html

     :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 #}
-{% extends "basic/layout.html" %}
+{%- extends "basic/layout.html" %}
 
 {# put the sidebar before the body #}
 {% block sidebar1 %}{{ sidebar() }}{% endblock %}

sphinx/util/__init__.py

         self.stream1.write(text)
         self.stream2.write(text)
 
+    def flush(self):
+        if hasattr(self.stream1, 'flush'):
+            self.stream1.flush()
+        if hasattr(self.stream2, 'flush'):
+            self.stream2.flush()
+
 
 def parselinenos(spec, total):
     """Parse a line number spec (such as "1,2,4-6") and return a list of
     return parts
 
 
+def split_index_msg(type, value):
+    # new entry types must be listed in directives/other.py!
+    result = []
+    try:
+        if type == 'single':
+            try:
+                result = split_into(2, 'single', value)
+            except ValueError:
+                result = split_into(1, 'single', value)
+        elif type == 'pair':
+            result = split_into(2, 'pair', value)
+        elif type == 'triple':
+            result = split_into(3, 'triple', value)
+        elif type == 'see':
+            result = split_into(2, 'see', value)
+        elif type == 'seealso':
+            result = split_into(2, 'see', value)
+    except ValueError:
+        pass
+
+    return result
+
+
 def format_exception_cut_frames(x=1):
     """Format an exception with traceback, but only the last x frames."""
     typ, val, tb = sys.exc_info()

sphinx/util/nodes.py

                 node.line = definition_list_item.line - 1
                 node.rawsource = definition_list_item.\
                                  rawsource.split("\n", 2)[0]
+        # workaround: nodes.caption doesn't have source, line.
+        # this issue was filed to Docutils tracker:
+        # https://sourceforge.net/tracker/?func=detail&aid=3599485&group_id=38414&atid=422032
+        if isinstance(node, nodes.caption) and not node.source:
+            node.source = node.parent.source
+            node.line = ''  #need fix docutils to get `node.line`
+
         if not node.source:
             continue # built-in message
         if isinstance(node, IGNORED_NODES):
             yield node, msg
 
 
+def traverse_translatable_index(doctree):
+    """Traverse translatable index node from a document tree."""
+    def is_block_index(node):
+        return isinstance(node, addnodes.index) and  \
+            node.get('inline') == False
+    for node in doctree.traverse(is_block_index):
+        if 'raw_entries' in node:
+            entries = node['raw_entries']
+        else:
+            entries = node['entries']
+        yield node, entries
+
+
 def nested_parse_with_titles(state, content, node):
     """Version of state.nested_parse() that allows titles and does not require
     titles to have the same decoration as the calling document.

sphinx/writers/text.py

     def visit_label(self, node):
         raise nodes.SkipNode
 
+    def visit_legend(self, node):
+        pass
+    def depart_legend(self, node):
+        pass
+
     # XXX: option list could use some better styling
 
     def visit_option_list(self, node):

tests/root/contents.txt

    doctest
    extensions
    versioning/index
-   only
    footnote
-   i18n/index
 
    Python <http://python.org/>
 

tests/root/i18n/definition_terms.po

-# SOME DESCRIPTIVE TITLE.
-# Copyright (C) 2012, foof
-# This file is distributed under the same license as the foo package.
-# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: sphinx 1.0\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2013-01-01 05:00\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-
-msgid "i18n with definition terms"
-msgstr "I18N WITH DEFINITION TERMS"
-
-msgid "Some term"
-msgstr "SOME TERM"
-
-msgid "The corresponding definition"
-msgstr "THE CORRESPONDING DEFINITION"
-
-msgid "Some other term"
-msgstr "SOME OTHER TERM"
-
-msgid "The corresponding definition #2"
-msgstr "THE CORRESPONDING DEFINITION #2"

tests/root/i18n/definition_terms.txt

-:tocdepth: 2
-
-i18n with definition terms
-==========================
-
-Some term
-   The corresponding definition
-
-Some other term
-   The corresponding definition #2
-

tests/root/i18n/external_links.po

-# SOME DESCRIPTIVE TITLE.
-# Copyright (C) 2012, foof
-# This file is distributed under the same license as the foo package.
-# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: sphinx 1.0\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2012-11-22 08:28\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-
-msgid "i18n with external links"
-msgstr "EXTERNAL LINKS"
-
-msgid "External link to Python_."
-msgstr "EXTERNAL LINK TO Python_."
-
-msgid "Internal link to `i18n with external links`_."
-msgstr "`EXTERNAL LINKS`_ IS INTERNAL LINK."
-
-msgid "Inline link by `Sphinx <http://sphinx-doc.org>`_."
-msgstr "INLINE LINK BY `SPHINX <http://sphinx-doc.org>`_."
-
-msgid "Unnamed link__."
-msgstr "UNNAMED LINK__."

tests/root/i18n/external_links.txt

-:tocdepth: 2
-
-i18n with external links
-========================
-.. #1044 external-links-dont-work-in-localized-html
-
-* External link to Python_.
-* Internal link to `i18n with external links`_.
-* Inline link by `Sphinx <http://sphinx-doc.org>`_.
-* Unnamed link__.
-
-.. _Python: http://python.org
-.. __: http://google.com

tests/root/i18n/footnote.po

-# SOME DESCRIPTIVE TITLE.
-# Copyright (C) 2012, foof
-# This file is distributed under the same license as the foo package.
-# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: sphinx 1.0\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2012-11-22 08:28\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-
-msgid "i18n with Footnote"
-msgstr "I18N WITH FOOTNOTE"
-
-msgid "[100]_ Contents [#]_ for `i18n with Footnote`_ [ref]_"
-msgstr "`I18N WITH FOOTNOTE`_ INCLUDE THIS CONTENTS [ref]_ [#]_ [100]_"
-
-msgid "This is a auto numbered footnote."
-msgstr "THIS IS A AUTO NUMBERED FOOTNOTE."
-
-msgid "This is a named footnote."
-msgstr "THIS IS A NAMED FOOTNOTE."
-
-msgid "This is a numbered footnote."
-msgstr "THIS IS A NUMBERED FOOTNOTE."
-

tests/root/i18n/footnote.txt

-:tocdepth: 2
-
-i18n with Footnote
-==================
-.. #955 cant-build-html-with-footnotes-when-using
-
-[100]_ Contents [#]_ for `i18n with Footnote`_ [ref]_
-
-.. [#] This is a auto numbered footnote.
-.. [ref] This is a named footnote.
-.. [100] This is a numbered footnote.

tests/root/i18n/index.txt

-.. toctree::
-   :maxdepth: 2
-   :numbered:
-
-   footnote
-   external_links
-   refs_inconsistency
-   literalblock
-   seealso
-   definition_terms

tests/root/i18n/literalblock.po

-# SOME DESCRIPTIVE TITLE.
-# Copyright (C) 2012, foof
-# This file is distributed under the same license as the foo package.
-# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: sphinx 1.0\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2012-11-22 08:28\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-
-msgid "i18n with literal block"
-msgstr "I18N WITH LITERAL BLOCK"
-
-msgid "Correct literal block::"
-msgstr "CORRECT LITERAL BLOCK::"
-
-msgid "Missing literal block::"
-msgstr "MISSING LITERAL BLOCK::"
-
-msgid "That's all."
-msgstr "THAT'S ALL."
-

tests/root/i18n/literalblock.txt

-:tocdepth: 2
-
-i18n with literal block
-=========================
-
-Correct literal block::
-
-   this is
-   literal block
-
-Missing literal block::
-
-That's all.

tests/root/i18n/refs_inconsistency.po

-# SOME DESCRIPTIVE TITLE.
-# Copyright (C) 2012, foof
-# This file is distributed under the same license as the foo package.
-# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: sphinx 1.0\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2012-12-05 08:28\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-
-msgid "i18n with refs inconsistency"
-msgstr "I18N WITH REFS INCONSISTENCY"
-
-msgid "[100]_ for [#]_ footnote [ref2]_."
-msgstr "FOR FOOTNOTE [ref2]_."
-
-msgid "for reference_."
-msgstr "reference_ FOR reference_."
-
-msgid "normal text."
-msgstr "ORPHAN REFERENCE: `I18N WITH REFS INCONSISTENCY`_."
-
-msgid "This is a auto numbered footnote."
-msgstr "THIS IS A AUTO NUMBERED FOOTNOTE."
-
-msgid "This is a named footnote."
-msgstr "THIS IS A NAMED FOOTNOTE."
-
-msgid "This is a numbered footnote."
-msgstr "THIS IS A NUMBERED FOOTNOTE."
-

tests/root/i18n/refs_inconsistency.txt

-:tocdepth: 2
-
-i18n with refs inconsistency
-=============================
-
-* [100]_ for [#]_ footnote [ref2]_.
-* for reference_.
-* normal text.
-
-.. [#] This is a auto numbered footnote.
-.. [ref2] This is a named footnote.
-.. [100] This is a numbered footnote.
-.. _reference: http://www.example.com

tests/root/i18n/seealso.po