Georg Brandl avatar Georg Brandl committed 32c4f57

#586: Implemented improved glossary markup which allows multiple terms per definition.

Comments (0)

Files changed (11)

 
 * #526: Added Iranian translation.
 
+* #586: Implemented improved glossary markup which allows multiple terms per
+  definition.
+
 * #559: :confval:`html_add_permalinks` is now a string giving the
   text to display in permalinks.
 

doc/markup/para.rst

 
 .. rst:directive:: .. glossary::
 
-   This directive must contain a reST definition list with terms and
-   definitions.  The definitions will then be referencable with the :rst:role:`term`
-   role.  Example::
+   This directive must contain a reST definition-list-like markup with terms and
+   definitions.  The definitions will then be referencable with the
+   :rst:role:`term` role.  Example::
 
       .. glossary::
 
             The directory which, including its subdirectories, contains all
             source files for one Sphinx project.
 
+   In contrast to regular definition lists, *multiple* terms per entry are
+   allowed, and inline markup is allowed in terms.  You can link to all of the
+   terms.  For example::
+
+      .. glossary::
+
+         term 1
+         term 2
+            Definition of both terms.
+
+   (When the glossary is sorted, the first term determines the sort order.)
+
    .. versionadded:: 0.6
       You can now give the glossary directive a ``:sorted:`` flag that will
       automatically sort the entries alphabetically.
 
+   .. versionchanged:: 1.1
+      Now supports multiple terms and inline markup in terms.
+
 
 Grammar production displays
 ---------------------------

sphinx/addnodes.py

 class abbreviation(nodes.Inline, nodes.TextElement):
     """Node for abbreviations with explanations."""
 
+class termsep(nodes.Structural, nodes.Element):
+    """Separates two terms within a <term> node."""
+
 
 # make the new nodes known to docutils; needed because the HTML writer will
 # choke at some point if these are not added

sphinx/domains/std.py

 
 from docutils import nodes
 from docutils.parsers.rst import directives
+from docutils.statemachine import ViewList
 
 from sphinx import addnodes
 from sphinx.roles import XRefRole
 
 class Glossary(Directive):
     """
-    Directive to create a glossary with cross-reference targets
-    for :term: roles.
+    Directive to create a glossary with cross-reference targets for :term:
+    roles.
     """
 
     has_content = True
         gloss_entries = env.temp_data.setdefault('gloss_entries', set())
         node = addnodes.glossary()
         node.document = self.state.document
-        self.state.nested_parse(self.content, self.content_offset, node)
 
-        # the content should be definition lists
-        dls = [child for child in node
-               if isinstance(child, nodes.definition_list)]
-        # now, extract definition terms to enable cross-reference creation
-        new_dl = nodes.definition_list()
-        new_dl['classes'].append('glossary')
+        # This directive implements a custom format of the reST definition list
+        # that allows multiple lines of terms before the definition.  This is
+        # easy to parse since we know that the contents of the glossary *must
+        # be* a definition list.
+
+        # first, collect single entries
+        entries = []
+        in_definition = True
+        was_empty = True
+        messages = []
+        for (source, lineno, line) in self.content.xitems():
+            # empty line -> add to last definition
+            if not line:
+                if in_definition and entries:
+                    entries[-1][1].append('', source, lineno)
+                was_empty = True
+                continue
+            # unindented line -> a term
+            if line and not line[0].isspace():
+                # first term of definition
+                if in_definition:
+                    if not was_empty:
+                        messages.append(self.state.reporter.system_message(
+                            2, 'glossary term must be preceded by empty line',
+                            source=source, line=lineno))
+                    entries.append(([(line, source, lineno)], ViewList()))
+                    in_definition = False
+                # second term and following
+                else:
+                    if was_empty:
+                        messages.append(self.state.reporter.system_message(
+                            2, 'glossary terms must not be separated by empty '
+                            'lines', source=source, line=lineno))
+                    entries[-1][0].append((line, source, lineno))
+            else:
+                if not in_definition:
+                    # first line of definition, determines indentation
+                    in_definition = True
+                    indent_len = len(line) - len(line.lstrip())
+                entries[-1][1].append(line[indent_len:], source, lineno)
+            was_empty = False
+
+        # now, parse all the entries into a big definition list
         items = []
-        for dl in dls:
-            for li in dl.children:
-                if not li.children or not isinstance(li[0], nodes.term):
-                    continue
-                termtext = li.children[0].astext()
+        for terms, definition in entries:
+            termtexts = []
+            termnodes = []
+            system_messages = []
+            ids = []
+            for line, source, lineno in terms:
+                # parse the term with inline markup
+                res = self.state.inline_text(line, lineno)
+                system_messages.extend(res[1])
+
+                # get a text-only representation of the term and register it
+                # as a cross-reference target
+                tmp = nodes.paragraph('', '', *res[0])
+                termtext = tmp.astext()
                 new_id = 'term-' + nodes.make_id(termtext)
                 if new_id in gloss_entries:
                     new_id = 'term-' + str(len(gloss_entries))
                 gloss_entries.add(new_id)
-                li[0]['names'].append(new_id)
-                li[0]['ids'].append(new_id)
+                ids.append(new_id)
                 objects['term', termtext.lower()] = env.docname, new_id
+                termtexts.append(termtext)
                 # add an index entry too
                 indexnode = addnodes.index()
                 indexnode['entries'] = [('single', termtext, new_id, termtext)]
-                li.insert(0, indexnode)
-                items.append((termtext, li))
+                termnodes += indexnode
+                termnodes.extend(res[0])
+                termnodes.append(addnodes.termsep())
+            # make a single "term" node with all the terms, separated by termsep
+            # nodes (remove the dangling trailing separator)
+            term = nodes.term('', '', *termnodes[:-1])
+            term['ids'].extend(ids)
+            term['names'].extend(ids)
+            term += system_messages
+
+            defnode = nodes.definition()
+            self.state.nested_parse(definition, definition.items[0][1], defnode)
+
+            items.append((termtexts,
+                          nodes.definition_list_item('', term, defnode)))
+
         if 'sorted' in self.options:
-            items.sort(key=lambda x: unicodedata.normalize('NFD', x[0].lower()))
-        new_dl.extend(item[1] for item in items)
-        node.children = [new_dl]
-        return [node]
+            items.sort(key=lambda x:
+                       unicodedata.normalize('NFD', x[0][0].lower()))
+
+        dlist = nodes.definition_list()
+        dlist['classes'].append('glossary')
+        dlist.extend(item[1] for item in items)
+        node += dlist
+        return messages + [node]
 
 
 token_re = re.compile('`([a-z_][a-z0-9_]*)`')
         entries = [('single', target, targetid, target)]
     indexnode = addnodes.index()
     indexnode['entries'] = entries
-    indexnode['inline'] = True
     textnode = nodes.Text(title, title)
     return [indexnode, targetnode, textnode], []
 

sphinx/writers/html.py

     def depart_abbreviation(self, node):
         self.body.append('</abbr>')
 
+    def visit_termsep(self, node):
+        self.body.append('<br />')
+        raise nodes.SkipNode
+
     def depart_title(self, node):
         close_tag = self.context[-1]
         if (self.permalink_text and self.builder.add_permalinks and

sphinx/writers/latex.py

     def depart_term(self, node):
         self.body.append(self.context.pop())
 
+    def visit_termsep(self, node):
+        self.body.append(', ')
+        raise nodes.SkipNode
+
     def visit_classifier(self, node):
         self.body.append('{[}')
     def depart_classifier(self, node):
         self.body.append('\n\\end{flushright}\n')
 
     def visit_index(self, node, scre=re.compile(r';\s*')):
-        if not node.get('inline'):
+        if not node.get('inline', True):
             self.body.append('\n')
         entries = node['entries']
         for type, string, tid, _ in entries:

sphinx/writers/manpage.py

     def depart_versionmodified(self, node):
         self.depart_paragraph(node)
 
+    def visit_termsep(self, node):
+        self.body.append(', ')
+        raise nodes.SkipNode
+
     # overwritten -- we don't want source comments to show up
     def visit_comment(self, node):
         raise nodes.SkipNode

sphinx/writers/texinfo.py

     def depart_term(self, node):
         pass
 
+    def visit_termsep(self, node):
+        self.add_text(self.at_item_x + ' ', fresh=1)
+
     def visit_classifier(self, node):
         self.add_text(' : ')
     def depart_classifier(self, node):

sphinx/writers/text.py

         if not self._li_has_classifier:
             self.end_state(end=None)
 
+    def visit_termsep(self, node):
+        self.add_text(', ')
+        raise nodes.SkipNode
+
     def visit_classifier(self, node):
         self.add_text(' : ')
     def depart_classifier(self, node):

tests/root/markup.txt

    * Monty Python
 
 .. glossary::
+   :sorted:
 
    boson
       Particle with integer spin.
 
-   fermion
+   *fermion*
       Particle with half-integer spin.
 
+   tauon
+   myon
+   electron
+      Examples for fermions.
+
+   über
+      Gewisse
+
+   änhlich
+      Dinge
+
 .. productionlist::
    try_stmt: `try1_stmt` | `try2_stmt`
    try1_stmt: "try" ":" `suite`
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.