Commits

Anonymous committed b2b7f31 Merge

Merge with -main

Comments (0)

Files changed (41)

 bae0833cae75e5a641abe3c4b430fa384cd9d258 1.2
 f6e5acee4f761696676e05a9112c91a5a5670b49 1.2.1
 580c5ce755486bc92c79c50f80cfc79924e15140 1.2.2
+c62867700c9e98cc2988c62f298ec54cee9b6927 1.3
+3a3846c2503db85bb70a243c8bc702629c4bce57 1.3.1
 * Kumar Appaiah -- Debian control lexer
 * Ali Afshar -- image formatter
 * Andreas Amann -- AppleScript lexer
+* Jeremy Ashkenas -- CoffeeScript lexer
 * Stefan Matthias Aust -- Smalltalk lexer
 * Ben Bangert -- Mako lexers
 * Max Battcher -- Darcs patch lexer
 * Laurent Gautier -- R/S lexer
 * Krzysiek Goj -- Scala lexer
 * Matt Good -- Genshi, Cheetah lexers
+* Patrick Gotthardt -- PHP namespaces support
 * Olivier Guibe -- Asymptote lexer
 * Matthew Harrison -- SVG formatter
 * Steven Hazel -- Tcl lexer
 * David Hess, Fish Software, Inc. -- Objective-J lexer
 * Varun Hiremath -- Debian control lexer
 * Dennis Kaarsemaker -- sources.list lexer
+* Benjamin Kowarsch -- Modula-2 lexer
 * Marek Kubica -- Scheme lexer
 * Jochen Kupperschmidt -- Markdown processor
 * Gerd Kurzbach -- Modelica lexer
 * Mario Ruggier -- Evoque lexers
 * Stou Sandalski -- NumPy, FORTRAN, tcsh and XSLT lexers
 * Matteo Sasso -- Common Lisp lexer
+* Joe Schafer -- Ada lexer
 * Ken Schutte -- Matlab lexers
 * Tassilo Schweyer -- Io, MOOCode lexers
 * Joerg Sieker -- ABAP lexer
 
 Issue numbers refer to the tracker at http://dev.pocoo.org/projects/pygments/.
 
-Version 1.3
+Version 1.4
 -----------
 (in development)
 
-- Gherkin lexer: Fixed single apostrophe bug and added new i18n keywords.
+- With the ``noclasses`` option in the HTML formatter, some styles
+  present in the stylesheet were not added as inline styles.
+
+- Three fixes to the Lua lexer (#480, #481, #482).
+
+
+Version 1.3.1
+-------------
+(bugfix release, released Mar 05, 2010)
+
+- The ``pygmentize`` script was missing from the distribution.
+
+
+Version 1.3
+-----------
+(codename Schneeglöckchen, released Mar 01, 2010)
+
+- Added the ``ensurenl`` lexer option, which can be used to suppress the
+  automatic addition of a newline to the lexer input.
 
 - Lexers added:
 
+  * Ada
   * Coldfusion
+  * Modula-2
   * haXe
   * R console
   * Objective-J
   * Haml and Sass
+  * CoffeeScript
+
+- Enhanced reStructuredText highlighting.
+
+- Added support for PHP 5.3 namespaces in the PHP lexer.
+
+- Added a bash completion script for `pygmentize`, to the external/
+  directory (#466).
+
+- Fixed a bug in `do_insertions()` used for multi-lexer languages.
+
+- Fixed a Ruby regex highlighting bug (#476).
+
+- Fixed regex highlighting bugs in Perl lexer (#258).
+
+- Add small enhancements to the C lexer (#467) and Bash lexer (#469).
+
+- Small fixes for the Tcl, Debian control file, Nginx config,
+  Smalltalk, Objective-C, Clojure, Lua lexers.
+
+- Gherkin lexer: Fixed single apostrophe bug and added new i18n keywords.
 
 
 Version 1.2.2
-include external/*.py
+include pygmentize
+include external/*
 include Makefile CHANGES LICENSE AUTHORS TODO ez_setup.py
 recursive-include tests *
 recursive-include docs *

docs/src/integrate.txt

 colorize code via a simple menu option.  It can be found here_.
 
 .. _here: http://antoniocangiano.com/2008/10/28/pygments-textmate-bundle/
+
+Bash completion
+---------------
+
+The source distribution contains a file ``external/pygments.bashcomp`` that
+sets up completion for the ``pygmentize`` command in bash.

docs/src/lexers.txt

     Strip all leading and trailing whitespace from the input (default:
     ``False``).
 
+`ensurenl`
+    Make sure that the input ends with a newline (default: ``True``).  This
+    is required for some lexers that consume input linewise.
+    *New in Pygments 1.3.*
+
 `tabsize`
     If given and greater than 0, expand tabs in the input (default: ``0``).
 

external/pygments.bashcomp

+#!bash
+#
+# Bash completion support for Pygments (the 'pygmentize' command).
+#
+
+_pygmentize()
+{
+    local cur prev
+
+    COMPREPLY=()
+    cur=`_get_cword`
+    prev=${COMP_WORDS[COMP_CWORD-1]}
+
+    case "$prev" in
+        -f)
+            FORMATTERS=`pygmentize -L formatters | grep '* ' | cut -c3- | sed -e 's/,//g' -e 's/:$//'` 
+            COMPREPLY=( $( compgen -W '$FORMATTERS' -- "$cur" ) )
+            return 0
+            ;;
+        -l)
+            LEXERS=`pygmentize -L lexers | grep '* ' | cut -c3- | sed -e 's/,//g' -e 's/:$//'` 
+            COMPREPLY=( $( compgen -W '$LEXERS' -- "$cur" ) )
+            return 0
+            ;;
+        -S)
+            STYLES=`pygmentize -L styles | grep '* ' | cut -c3- | sed s/:$//`
+            COMPREPLY=( $( compgen -W '$STYLES' -- "$cur" ) )
+            return 0
+            ;;
+    esac
+
+    if [[ "$cur" == -* ]]; then
+        COMPREPLY=( $( compgen -W '-f -l -S -L -g -O -P -F \
+                                   -N -H -h -V -o' -- "$cur" ) )
+        return 0
+    fi
+}
+complete -F _pygmentize -o default pygmentize

pygments/__init__.py

     :license: BSD, see LICENSE for details.
 """
 
-__version__ = '1.2.2'
+__version__ = '1.3.1'
 __docformat__ = 'restructuredtext'
 
 __all__ = ['lex', 'format', 'highlight']
 
 
-import sys, os
+import sys
 
 from pygments.util import StringIO, BytesIO
 

pygments/filters/__init__.py

     :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
-try:
-    set
-except NameError:
-    from sets import Set as set
 
 import re
+
 from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
     string_to_tokentype
 from pygments.filter import Filter
-from pygments.util import get_list_opt, get_int_opt, get_bool_opt, get_choice_opt, \
-     ClassNotFound, OptionError
+from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
+     get_choice_opt, ClassNotFound, OptionError
 from pygments.plugin import find_plugin_filters
 
 

pygments/formatters/__init__.py

 
 from pygments.formatters._mapping import FORMATTERS
 from pygments.plugin import find_plugin_formatters
-from pygments.util import docstring_headline, ClassNotFound
+from pygments.util import ClassNotFound
 
 ns = globals()
 for fcls in FORMATTERS:

pygments/formatters/html.py

     :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
-import sys, os
+
+import os
+import sys
 import StringIO
 
-try:
-    set
-except NameError:
-    from sets import Set as set
-
 from pygments.formatter import Formatter
 from pygments.token import Token, Text, STANDARD_TYPES
 from pygments.util import get_bool_opt, get_int_opt, get_list_opt, bytes
         st = self.linenostep
         la = self.lineanchors
         aln = self.anchorlinenos
+        nocls = self.noclasses
         if sp:
             lines = []
 
         # in case you wonder about the seemingly redundant <div> here: since the
         # content in the other cell also is wrapped in a div, some browsers in
         # some configurations seem to mess up the formatting...
-        yield 0, ('<table class="%stable">' % self.cssclass +
-                  '<tr><td class="linenos"><div class="linenodiv"><pre>' +
-                  ls + '</pre></div></td><td class="code">')
+        if nocls:
+            yield 0, ('<table class="%stable">' % self.cssclass +
+                      '<tr><td><div class="linenodiv" '
+                      'style="background-color: #f0f0f0; padding-right: 10px">'
+                      '<pre style="line-height: 125%">' +
+                      ls + '</pre></div></td><td class="code">')
+        else:
+            yield 0, ('<table class="%stable">' % self.cssclass +
+                      '<tr><td class="linenos"><div class="linenodiv"><pre>' +
+                      ls + '</pre></div></td><td class="code">')
         yield 0, dummyoutfile.getvalue()
         yield 0, '</td></tr></table>'
 
         num = self.linenostart
         mw = len(str(len(lines) + num - 1))
 
-        if sp:
+        if self.noclasses:
+            if sp:
+                for t, line in lines:
+                    if num%sp == 0:
+                        style = 'background-color: #ffffc0; padding: 0 5px 0 5px'
+                    else:
+                        style = 'background-color: #f0f0f0; padding: 0 5px 0 5px'
+                    yield 1, '<span style="%s">%*s</span> ' % (
+                        style, mw, (num%st and ' ' or num)) + line
+                    num += 1
+            else:
+                for t, line in lines:
+                    yield 1, ('<span style="background-color: #f0f0f0; '
+                              'padding: 0 5px 0 5px">%*s</span> ' % (
+                              mw, (num%st and ' ' or num)) + line)
+                    num += 1
+        elif sp:
             for t, line in lines:
                 yield 1, '<span class="lineno%s">%*s</span> ' % (
                     num%sp == 0 and ' special' or '', mw,

pygments/lexer.py

 """
 import re
 
-try:
-    set
-except NameError:
-    from sets import Set as set
-
 from pygments.filter import apply_filters, Filter
 from pygments.filters import get_filter_by_name
 from pygments.token import Error, Text, Other, _TokenType
 
 
 __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
-           'LexerContext', 'include', 'flags', 'bygroups', 'using', 'this']
+           'LexerContext', 'include', 'bygroups', 'using', 'this']
 
 
 _default_analyse = staticmethod(lambda x: 0.0)
     ``stripall``
         Strip all leading and trailing whitespace from the input
         (default: False).
+    ``ensurenl``
+        Make sure that the input ends with a newline (default: True).  This
+        is required for some lexers that consume input linewise.
+        *New in Pygments 1.3.*
     ``tabsize``
         If given and greater than 0, expand tabs in the input (default: 0).
     ``encoding``
         self.options = options
         self.stripnl = get_bool_opt(options, 'stripnl', True)
         self.stripall = get_bool_opt(options, 'stripall', False)
+        self.ensurenl = get_bool_opt(options, 'ensurenl', True)
         self.tabsize = get_int_opt(options, 'tabsize', 0)
         self.encoding = options.get('encoding', 'latin1')
         # self.encoding = options.get('inencoding', None) or self.encoding
             text = text.strip('\n')
         if self.tabsize > 0:
             text = text.expandtabs(self.tabsize)
-        if not text.endswith('\n'):
+        if self.ensurenl and not text.endswith('\n'):
             text += '\n'
 
         def streamer():
         realpos += len(v) - oldi
 
     # leftover tokens
-    if insleft:
+    while insleft:
         # no normal tokens, set realpos to zero
         realpos = realpos or 0
         for p, t, v in itokens:
             yield realpos, t, v
             realpos += len(v)
+        try:
+            index, itokens = insertions.next()
+        except StopIteration:
+            insleft = False
+            break  # not strictly necessary
+

pygments/lexers/__init__.py

     :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
+
 import sys
+import types
 import fnmatch
-import types
 from os.path import basename
 
-try:
-    set
-except NameError:
-    from sets import Set as set
-
 from pygments.lexers._mapping import LEXERS
 from pygments.plugin import find_plugin_lexers
 from pygments.util import ClassNotFound, bytes
         raise AttributeError(name)
 
 
-import sys
 oldmod = sys.modules['pygments.lexers']
 newmod = _automodule('pygments.lexers')
 newmod.__dict__.update(oldmod.__dict__)

pygments/lexers/_luabuiltins.py

            'debug.setmetatable',
            'debug.setupvalue',
            'debug.traceback'],
- 'io': ['file:close',
-        'file:flush',
-        'file:lines',
-        'file:read',
-        'file:seek',
-        'file:setvbuf',
-        'file:write',
-        'io.close',
+ 'io': ['io.close',
         'io.flush',
         'io.input',
         'io.lines',
             return name.startswith('math')
 
         def is_in_io_module(name):
-            return name.startswith('io.') or name.startswith('file:')
+            return name.startswith('io.')
 
         def is_in_os_module(name):
             return name.startswith('os.')

pygments/lexers/_mapping.py

     'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
     'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
     'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
+    'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
     'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
     'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
     'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
     'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
     'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
     'BaseMakefileLexer': ('pygments.lexers.text', 'Makefile', ('basemake',), (), ()),
-    'BashLexer': ('pygments.lexers.other', 'Bash', ('bash', 'sh'), ('*.sh', '*.ebuild', '*.eclass'), ('application/x-sh', 'application/x-shellscript')),
+    'BashLexer': ('pygments.lexers.other', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass'), ('application/x-sh', 'application/x-shellscript')),
     'BashSessionLexer': ('pygments.lexers.other', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
     'BatchLexer': ('pygments.lexers.other', 'Batchfile', ('bat',), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
     'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
     'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
     'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
     'ClojureLexer': ('pygments.lexers.agile', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
+    'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript'), ('*.coffee',), ('text/coffeescript',)),
     'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldufsion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
     'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
     'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
     'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()),
     'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)),
     'ModelicaLexer': ('pygments.lexers.other', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
+    'Modula2Lexer': ('pygments.lexers.compiled', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
     'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
     'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
     'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),

pygments/lexers/agile.py

 """
 
 import re
-try:
-    set
-except NameError:
-    from sets import Set as set
 
 from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, \
      LexerContext, include, combined, do_insertions, bygroups, using
                  r'(?<=^match\s)|'
                  r'(?<=^if\s)|'
                  r'(?<=^elsif\s)'
-             r')(\s*)(/)(?!=)', bygroups(Text, String.Regex), 'multiline-regex'),
+             r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
             # multiline regex (in method calls)
             (r'(?<=\(|,)/', String.Regex, 'multiline-regex'),
             # multiline regex (this time the funny no whitespace rule)
             (r'@(\\\\|\\\@|[^\@])*@[egimosx]*', String.Regex, '#pop'),
             (r'%(\\\\|\\\%|[^\%])*%[egimosx]*', String.Regex, '#pop'),
             (r'\$(\\\\|\\\$|[^\$])*\$[egimosx]*', String.Regex, '#pop'),
-            (r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
         ],
         'root': [
             (r'\#.*?$', Comment.Single),
             (r's\((\\\\|\\\)|[^\)])*\)\s*', String.Regex, 'balanced-regex'),
 
             (r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex),
+            (r'm(?=[/!\\{<\[\(@%\$])', String.Regex, 'balanced-regex'),
             (r'((?<==~)|(?<=\())\s*/(\\\\|\\/|[^/])*/[gcimosx]*', String.Regex),
             (r'\s+', Text),
             (r'(abs|accept|alarm|atan2|bind|binmode|bless|caller|chdir|'
             (r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'),
             (r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'),
             (r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'),
-            (r'(q|qq|qw|qr|qx)(.)[.\n]*?\1', String.Other),
+            (r'(q|qq|qw|qr|qx)([^a-zA-Z0-9])(.|\n)*?\2', String.Other),
             (r'package\s+', Keyword, 'modulename'),
             (r'sub\s+', Keyword, 'funcname'),
             (r'(\[\]|\*\*|::|<<|>>|>=|<=|<=>|={3}|!=|=~|'
             (r'\\', String.Other),
             (r'\<', String.Other, 'lt-string'),
             (r'\>', String.Other, '#pop'),
-            (r'[^\<\>]]+', String.Other)
+            (r'[^\<\>]+', String.Other)
         ],
         'end-part': [
             (r'.+', Comment.Preproc, '#pop')
 
     name = 'Lua'
     aliases = ['lua']
-    filenames = ['*.lua']
+    filenames = ['*.lua', '*.wlua']
     mimetypes = ['text/x-lua', 'application/x-lua']
 
     tokens = {
         'root': [
+            # lua allows a file to start with a shebang
+            (r'#!(.*?)$', Comment.Preproc),
+            (r'', Text, 'base'),
+        ],
+        'base': [
             (r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline),
             ('--.*$', Comment.Single),
 
 
             (r'\n', Text),
             (r'[^\S\n]', Text),
-            (r'(?s)\[(=*)\[.*?\]\1\]', String.Multiline),
+            # multiline strings
+            (r'(?s)\[(=*)\[.*?\]\1\]', String),
             (r'[\[\]\{\}\(\)\.,:;]', Punctuation),
 
             (r'(==|~=|<=|>=|\.\.|\.\.\.|[=+\-*/%^<>#])', Operator),
 
             (r'[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)?', Name),
 
-            # multiline strings
-            (r'(?s)\[(=*)\[(.*?)\]\1\]', String),
             ("'", String.Single, combined('stringescape', 'sqs')),
             ('"', String.Double, combined('stringescape', 'dqs'))
         ],
             include('command'),
             include('basic'),
             include('data'),
+            (r'}', Keyword),  # HACK: somehow we miscounted our braces
         ],
         'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
         'command-in-brace': _gen_command_rules(keyword_cmds_re,
             # strings, symbols and characters
             (r'"(\\\\|\\"|[^"])*"', String),
             (r"'" + valid_name, String.Symbol),
-            (r"\\([()/'\".'_!§$%& ?;=+-]{1}|[a-zA-Z0-9]+)", String.Char),
+            (r"\\([()/'\".'_!§$%& ?;=#+-]{1}|[a-zA-Z0-9]+)", String.Char),
 
             # constants
             (r'(#t|#f)', Name.Constant),

pygments/lexers/asm.py

 """
 
 import re
-try:
-    set
-except NameError:
-    from sets import Set as set
 
 from pygments.lexer import RegexLexer, include, bygroups, using, DelegatingLexer
 from pygments.lexers.compiled import DLexer, CppLexer, CLexer

pygments/lexers/compiled.py

 """
 
 import re
-try:
-    set
-except NameError:
-    from sets import Set as set
 
 from pygments.scanner import Scanner
 from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
 __all__ = ['CLexer', 'CppLexer', 'DLexer', 'DelphiLexer', 'JavaLexer',
            'ScalaLexer', 'DylanLexer', 'OcamlLexer', 'ObjectiveCLexer',
            'FortranLexer', 'GLShaderLexer', 'PrologLexer', 'CythonLexer',
-           'ValaLexer', 'OocLexer', 'GoLexer', 'FelixLexer']
+           'ValaLexer', 'OocLexer', 'GoLexer', 'FelixLexer', 'AdaLexer',
+           'Modula2Lexer']
 
 
 class CLexer(RegexLexer):
             (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
             (r'0[0-7]+[Ll]?', Number.Oct),
             (r'\d+[Ll]?', Number.Integer),
+            (r'\*/', Error),
             (r'[~!%^&*+=|?:<>/-]', Operator),
             (r'[()\[\],.]', Punctuation),
             (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
             (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
             (r'0[0-7]+[Ll]?', Number.Oct),
             (r'\d+[Ll]?', Number.Integer),
+            (r'\*/', Error),
             (r'[~!%^&*+=|?:<>/-]', Operator),
             (r'[()\[\],.;]', Punctuation),
             (r'(asm|auto|break|case|catch|const|const_cast|continue|'
         ],
     }
 
+
 class ScalaLexer(RegexLexer):
     """
     For `Scala <http://www.scala-lang.org>`_ source code.
             (r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
              r'declspec|finally|int64|try|leave)\b', Keyword.Reserved),
             (r'(TRUE|FALSE|nil|NULL)\b', Name.Builtin),
-            ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
-            ('[a-zA-Z_][a-zA-Z0-9_]*', Name),
+            ('[a-zA-Z$_][a-zA-Z0-9$_]*:(?!:)', Name.Label),
+            ('[a-zA-Z$_][a-zA-Z0-9$_]*', Name),
         ],
         'root': [
             include('whitespace'),
             # functions
             (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))'    # return arguments
-             r'([a-zA-Z_][a-zA-Z0-9_]*)'             # method name
+             r'([a-zA-Z$_][a-zA-Z0-9$_]*)'           # method name
              r'(\s*\([^;]*?\))'                      # signature
              r'(' + _ws + r')({)',
              bygroups(using(this), Name.Function,
              'function'),
             # function declarations
             (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))'    # return arguments
-             r'([a-zA-Z_][a-zA-Z0-9_]*)'             # method name
+             r'([a-zA-Z$_][a-zA-Z0-9$_]*)'           # method name
              r'(\s*\([^;]*?\))'                      # signature
              r'(' + _ws + r')(;)',
              bygroups(using(this), Name.Function,
         ],
         'classname' : [
             # interface definition that inherits
-            ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*:\s*)([a-zA-Z_][a-zA-Z0-9_]*)?',
+            ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?',
              bygroups(Name.Class, Text, Name.Class), '#pop'),
             # interface definition for a category
-            ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\([a-zA-Z_][a-zA-Z0-9_]*\))',
+            ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\([a-zA-Z$_][a-zA-Z0-9$_]*\))',
              bygroups(Name.Class, Text, Name.Label), '#pop'),
             # simple interface / implementation
-            ('([a-zA-Z_][a-zA-Z0-9_]*)', Name.Class, '#pop')
+            ('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop')
         ],
         'forward_classname' : [
-          ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*,\s*)',
+          ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*,\s*)',
            bygroups(Name.Class, Text), 'forward_classname'),
-          ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*;?)',
+          ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*;?)',
            bygroups(Name.Class, Text), '#pop')
         ],
         'statement' : [
             include('nl')
         ],
      }
+
+
+class AdaLexer(RegexLexer):
+    """
+    For Ada source code.
+
+    *New in Pygments 1.3.*
+    """
+
+    name = 'Ada'
+    aliases = ['ada', 'ada95' 'ada2005']
+    filenames = ['*.adb', '*.ads', '*.ada']
+    mimetypes = ['text/x-ada']
+
+    flags = re.MULTILINE | re.I  # Ignore case
+
+    _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
+
+    tokens = {
+        'root': [
+            (r'[^\S\n]+', Text),
+            (r'--.*?\n', Comment.Single),
+            (r'[^\S\n]+', Text),
+            (r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
+            (r'(subtype|type)(\s+)([a-z0-9_]+)',
+             bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
+            (r'task|protected', Keyword.Declaration),
+            (r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
+            (r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
+            (r'(pragma)(\s+)([a-zA-Z0-9_]+)', bygroups(Keyword.Reserved, Text,
+                                                       Comment.Preproc)),
+            (r'(true|false|null)\b', Keyword.Constant),
+            (r'(Byte|Character|Float|Integer|Long_Float|Long_Integer|'
+             r'Long_Long_Float|Long_Long_Integer|Natural|Positive|Short_Float|'
+             r'Short_Integer|Short_Short_Float|Short_Short_Integer|String|'
+             r'Wide_String|Duration)\b', Keyword.Type),
+            (r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
+            (r'generic|private', Keyword.Declaration),
+            (r'package', Keyword.Declaration, 'package'),
+            (r'array\b', Keyword.Reserved, 'array_def'),
+            (r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
+            (r'([a-z0-9_]+)(\s*)(:)(\s*)(constant)',
+             bygroups(Name.Constant, Text, Punctuation, Text,
+                      Keyword.Reserved)),
+            (r'<<[a-z0-9_]+>>', Name.Label),
+            (r'([a-z0-9_]+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
+             bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
+            (r'\b(abort|abs|abstract|accept|access|aliased|all|array|at|begin|'
+             r'body|case|constant|declare|delay|delta|digits|do|else|elsif|end|'
+             r'entry|exception|exit|interface|for|goto|if|is|limited|loop|new|'
+             r'null|of|or|others|out|overriding|pragma|protected|raise|range|'
+             r'record|renames|requeue|return|reverse|select|separate|subtype|'
+             r'synchronized|task|tagged|terminate|then|type|until|when|while|'
+             r'xor)\b',
+             Keyword.Reserved),
+            (r'"[^"]*"', String),
+            include('attribute'),
+            include('numbers'),
+            (r"'[^']'", String.Character),
+            (r'([a-z0-9_]+)(\s*|[(,])', bygroups(Name, using(this))),
+            (r"(<>|=>|:=|[\(\)\|:;,.'])", Punctuation),
+            (r'[*<>+=/&-]', Operator),
+            (r'\n+', Text),
+        ],
+        'numbers' : [
+            (r'[0-9_]+#[0-9a-f]+#', Number.Hex),
+            (r'[0-9_]+\.[0-9_]*', Number.Float),
+            (r'[0-9_]+', Number.Integer),
+        ],
+        'attribute' : [
+            (r"(')([a-zA-Z0-9_]+)", bygroups(Punctuation, Name.Attribute)),
+        ],
+        'subprogram' : [
+            (r'\(', Punctuation, ('#pop', 'formal_part')),
+            (r';', Punctuation, '#pop'),
+            (r'is\b', Keyword.Reserved, '#pop'),
+            (r'"[^"]+"|[a-z0-9_]+', Name.Function),
+            include('root'),
+        ],
+        'end' : [
+            ('(if|case|record|loop|select)', Keyword.Reserved),
+            ('"[^"]+"|[a-zA-Z0-9_]+', Name.Function),
+            ('[\n\s]+', Text),
+            (';', Punctuation, '#pop'),
+        ],
+        'type_def': [
+            (r';', Punctuation, '#pop'),
+            (r'\(', Punctuation, 'formal_part'),
+            (r'with|and|use', Keyword.Reserved),
+            (r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
+            (r'record\b', Keyword.Reserved, ('formal_part')),
+            include('root'),
+        ],
+        'array_def' : [
+            (r';', Punctuation, '#pop'),
+            (r'([a-z0-9_]+)(\s+)(range)', bygroups(Keyword.Type, Text,
+                                                   Keyword.Reserved)),
+            include('root'),
+        ],
+        'import': [
+            (r'[a-z0-9_.]+', Name.Namespace, '#pop'),
+        ],
+        'formal_part' : [
+            (r'\)', Punctuation, '#pop'),
+            (r'([a-z0-9_]+)(\s*)(,|:[^=])', bygroups(Name.Variable,
+                                                     Text, Punctuation)),
+            (r'(in|not|null|out|access)\b', Keyword.Reserved),
+            include('root'),
+        ],
+        'package': [
+            ('body', Keyword.Declaration),
+            ('is\s+new|renames', Keyword.Reserved),
+            ('is', Keyword.Reserved, '#pop'),
+            (';', Punctuation, '#pop'),
+            ('\(', Punctuation, 'package_instantiation'),
+            ('([a-zA-Z0-9_.]+)', Name.Class),
+            include('root'),
+        ],
+        'package_instantiation': [
+            (r'("[^"]+"|[a-z0-9_]+)(\s+)(=>)', bygroups(Name.Variable,
+                                                        Text, Punctuation)),
+            (r'[a-z0-9._\'"]', Text),
+            (r'\)', Punctuation, '#pop'),
+            include('root'),
+        ],
+    }
+
+
+class Modula2Lexer(RegexLexer):
+    """
+    For `Modula-2 <http://www.modula2.org/>`_ source code.
+
+    Additional options that determine which keywords are highlighted:
+
+    `pim`
+        Select PIM Modula-2 dialect (default: True).
+    `iso`
+        Select ISO Modula-2 dialect (default: False).
+    `objm2`
+        Select Objective Modula-2 dialect (default: False).
+    `gm2ext`
+        Also highlight GNU extensions (default: False).
+
+    *New in Pygments 1.3.*
+    """
+    name = 'Modula-2'
+    aliases = ['modula2', 'm2']
+    filenames = ['*.def', '*.mod']
+    mimetypes = ['text/x-modula2']
+
+    flags = re.MULTILINE | re.DOTALL
+
+    tokens = {
+        'whitespace': [
+            (r'\n+', Text), # blank lines
+            (r'\s+', Text), # whitespace
+        ],
+        'identifiers': [
+            (r'([a-zA-Z_\$][a-zA-Z0-9_\$]*)', Name),
+        ],
+        'numliterals': [
+            (r'[01]+B', Number.Binary),        # binary number (ObjM2)
+            (r'[0-7]+B', Number.Oct),          # octal number (PIM + ISO)
+            (r'[0-7]+C', Number.Oct),          # char code (PIM + ISO)
+            (r'[0-9A-F]+C', Number.Hex),       # char code (ObjM2)
+            (r'[0-9A-F]+H', Number.Hex),       # hexadecimal number
+            (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number
+            (r'[0-9]+\.[0-9]+', Number.Float), # real number
+            (r'[0-9]+', Number.Integer),       # decimal whole number
+        ],
+        'strings': [
+            (r"'(\\\\|\\'|[^'])*'", String), # single quoted string
+            (r'"(\\\\|\\"|[^"])*"', String), # double quoted string
+        ],
+        'operators': [
+            (r'[*/+=#~&<>\^-]', Operator),
+            (r':=', Operator),   # assignment
+            (r'@', Operator),    # pointer deref (ISO)
+            (r'\.\.', Operator), # ellipsis or range
+            (r'`', Operator),    # Smalltalk message (ObjM2)
+            (r'::', Operator),   # type conversion (ObjM2)
+        ],
+        'punctuation': [
+            (r'[\(\)\[\]{},.:;|]', Punctuation),
+        ],
+        'comments': [
+            (r'//.*?\n', Comment.Single),       # ObjM2
+            (r'/\*(.*?)\*/', Comment.Multiline), # ObjM2
+            (r'\(\*([^\$].*?)\*\)', Comment.Multiline),
+            # TO DO: nesting of (* ... *) comments
+        ],
+        'pragmas': [
+            (r'\(\*\$(.*?)\*\)', Comment.Preproc), # PIM
+            (r'<\*(.*?)\*>', Comment.Preproc),     # ISO + ObjM2
+        ],
+        'root': [
+            include('whitespace'),
+            include('comments'),
+            include('pragmas'),
+            include('identifiers'),
+            include('numliterals'),
+            include('strings'),
+            include('operators'),
+            include('punctuation'),
+        ]
+    }
+
+    pim_reserved_words = [
+        # 40 reserved words
+        'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION',
+        'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'EXPORT', 'FOR',
+        'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD',
+        'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'QUALIFIED',
+        'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
+        'UNTIL', 'VAR', 'WHILE', 'WITH',
+    ]
+
+    pim_pervasives = [
+        # 31 pervasives
+        'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'DEC',
+        'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL',
+        'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', 'NIL', 'ODD',
+        'ORD', 'PROC', 'REAL', 'SIZE', 'TRUE', 'TRUNC', 'VAL',
+    ]
+
+    iso_reserved_words = [
+        # 46 reserved words
+        'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
+        'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY',
+        'FOR', 'FORWARD', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN',
+        'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'PACKEDSET', 'POINTER',
+        'PROCEDURE', 'QUALIFIED', 'RECORD', 'REPEAT', 'REM', 'RETRY',
+        'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
+        'WITH',
+    ]
+
+    iso_pervasives = [
+        # 42 pervasives
+        'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'CMPLX',
+        'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH',
+        'IM', 'INC', 'INCL', 'INT', 'INTEGER', 'INTERRUPTIBLE', 'LENGTH',
+        'LFLOAT', 'LONGCOMPLEX', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW',
+        'NIL', 'ODD', 'ORD', 'PROC', 'PROTECTION', 'RE', 'REAL', 'SIZE',
+        'TRUE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
+    ]
+
+    objm2_reserved_words = [
+        # base language, 42 reserved words
+        'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
+        'DO', 'ELSE', 'ELSIF', 'END', 'ENUM', 'EXIT', 'FOR', 'FROM', 'IF',
+        'IMMUTABLE', 'IMPLEMENTATION', 'IMPORT', 'IN', 'IS', 'LOOP', 'MOD',
+        'MODULE', 'NOT', 'OF', 'OPAQUE', 'OR', 'POINTER', 'PROCEDURE',
+        'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
+        'UNTIL', 'VAR', 'VARIADIC', 'WHILE',
+        # OO extensions, 16 reserved words
+        'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
+        'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
+        'SUPER', 'TRY',
+    ]
+
+    objm2_pervasives = [
+        # base language, 38 pervasives
+        'ABS', 'BITSET', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'DISPOSE',
+        'FALSE', 'HALT', 'HIGH', 'INTEGER', 'INRANGE', 'LENGTH', 'LONGCARD',
+        'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEG', 'NEW', 'NEXTV', 'NIL',
+        'OCTET', 'ODD', 'ORD', 'PRED', 'PROC', 'READ', 'REAL', 'SUCC', 'TMAX',
+        'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'VAL', 'WRITE', 'WRITEF',
+        # OO extensions, 3 pervasives
+        'OBJECT', 'NO', 'YES',
+    ]
+
+    gnu_reserved_words = [
+        # 10 additional reserved words
+        'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
+        '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
+    ]
+
+    gnu_pervasives = [
+        # 21 identifiers, actually from pseudo-module SYSTEM
+        # but we will highlight them as if they were pervasives
+        'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
+        'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
+        'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
+        'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
+    ]
+
+    def __init__(self, **options):
+        self.reserved_words = set()
+        self.pervasives = set()
+        # ISO Modula-2
+        if get_bool_opt(options, 'iso', False):
+            self.reserved_words.update(self.iso_reserved_words)
+            self.pervasives.update(self.iso_pervasives)
+        # Objective Modula-2
+        elif get_bool_opt(options, 'objm2', False):
+            self.reserved_words.update(self.objm2_reserved_words)
+            self.pervasives.update(self.objm2_pervasives)
+        # PIM Modula-2 (DEFAULT)
+        else:
+            self.reserved_words.update(self.pim_reserved_words)
+            self.pervasives.update(self.pim_pervasives)
+        # GNU extensions
+        if get_bool_opt(options, 'gm2ext', False):
+            self.reserved_words.update(self.gnu_reserved_words)
+            self.pervasives.update(self.gnu_pervasives)
+        # initialise
+        RegexLexer.__init__(self, **options)
+
+    def get_tokens_unprocessed(self, text):
+        for index, token, value in \
+            RegexLexer.get_tokens_unprocessed(self, text):
+            # check for reserved words and pervasives
+            if token is Name:
+                if value in self.reserved_words:
+                    token = Keyword.Reserved
+                elif value in self.pervasives:
+                    token = Keyword.Pervasive
+            # return result
+            yield index, token, value

pygments/lexers/functional.py

 """
 
 import re
-try:
-    set
-except NameError:
-    from sets import Set as set
 
 from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
 from pygments.token import Text, Comment, Operator, Keyword, Name, \
 
         style = self.options.get('litstyle')
         if style is None:
-            style = (text.lstrip()[0] in '%\\') and 'latex' or 'bird'
+            style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird'
 
         code = ''
         insertions = []
 
     tokens = {
         'escape-sequence': [
-            (r'\\[\"\'ntbr]', String.Escape),
+            (r'\\[\\\"\'ntbr]', String.Escape),
             (r'\\[0-9]{3}', String.Escape),
             (r'\\x[0-9a-fA-F]{2}', String.Escape),
         ],

pygments/lexers/math.py

 """
 
 import re
-try:
-    set
-except NameError:
-    from sets import Set as set
 
 from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
 from pygments.token import Comment, String, Punctuation, Keyword, Name, \

pygments/lexers/other.py

 
 class BashLexer(RegexLexer):
     """
-    Lexer for (ba)sh shell scripts.
+    Lexer for (ba|k|)sh shell scripts.
 
     *New in Pygments 0.6.*
     """
 
     name = 'Bash'
-    aliases = ['bash', 'sh']
-    filenames = ['*.sh', '*.ebuild', '*.eclass']
+    aliases = ['bash', 'sh', 'ksh']
+    filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass']
     mimetypes = ['application/x-sh', 'application/x-shellscript']
 
     tokens = {
         ],
         '_parenth_helper' : [
             include('whitespaces'),
+            (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
             (r'[-+*/\\~<>=|&#!?,@%\w+:]+', String.Symbol),
             # literals
             (r'\'[^\']*\'', String),
             (r'\$.', String.Char),
-            (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
             (r'#*\(', String.Symbol, 'inner_parenth'),
         ],
         'parenth' : [

pygments/lexers/parsers.py

 import re
 
 from pygments.lexer import RegexLexer, DelegatingLexer, \
-    include, bygroups, using, this
-from pygments.token import Error, Punctuation, Generic, Other, \
-    Text, Comment, Operator, Keyword, Name, String, Number, Whitespace
+    include, bygroups, using
+from pygments.token import Punctuation, Other, Text, Comment, Operator, \
+     Keyword, Name, String, Number, Whitespace
 from pygments.lexers.compiled import JavaLexer, CLexer, CppLexer, \
     ObjectiveCLexer, DLexer
 from pygments.lexers.dotnet import CSharpLexer
 from pygments.lexers.agile import RubyLexer, PythonLexer, PerlLexer
 from pygments.lexers.web import ActionScriptLexer
-# Use TextLexer during development to just focus on one part of a delegating
-# lexer.
-from pygments.lexers.special import TextLexer
+
 
 __all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer',
            'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer',

pygments/lexers/templates.py

 """
 
 import re
-try:
-    set
-except NameError:
-    from sets import Set as set
 
 from pygments.lexers.web import \
      PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer

pygments/lexers/text.py

 """
 
 import re
-try:
-    set
-except NameError:
-    from sets import Set as set
 from bisect import bisect
 
 from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
     tokens = {
         'root': [
             # Heading with overline
-            (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)(.+)(\n)(\1)(\n)',
+            (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
+             r'(.+)(\n)(\1)(\n)',
              bygroups(Generic.Heading, Text, Generic.Heading,
                       Text, Generic.Heading, Text)),
             # Plain heading
              bygroups(Text, Number, using(this, state='inline'))),
             (r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1  .+\n)+)',
              bygroups(Text, Number, using(this, state='inline'))),
+            # Line blocks
+            (r'^(\s*)(\|)( .+\n(?:\|  .+\n)*)',
+             bygroups(Text, Operator, using(this, state='inline'))),
             # Sourcecode directives
             (r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
              r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
              _handle_sourcecode),
             # A directive
-            (r'^( *\.\.)(\s*)([\w-]+)(::)(?:([ \t]*)(.+))?',
-             bygroups(Punctuation, Text, Operator.Word, Punctuation, Text, Keyword)),
+            (r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
+             bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
+                      using(this, state='inline'))),
             # A reference target
             (r'^( *\.\.)(\s*)([\w\t ]+:)(.*?)$',
              bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
             # A footnote target
             (r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
              bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
+            # A substitution def
+            (r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
+             bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
+                      Punctuation, Text, using(this, state='inline'))),
             # Comments
             (r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
             # Field list
-            (r'^( *)(:.*?:)([ \t]+)(.*?)$', bygroups(Text, Name.Class, Text,
-                                                     Name.Function)),
+            (r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
+            (r'^( *)(:.*?:)([ \t]+)(.*?)$',
+             bygroups(Text, Name.Class, Text, Name.Function)),
             # Definition list
             (r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
              bygroups(using(this, state='inline'), using(this, state='inline'))),
         'inline': [
             (r'\\.', Text), # escape
             (r'``', String, 'literal'), # code
-            (r'(`)(.+?)(`__?)',
-             bygroups(Punctuation, using(this), Punctuation)), # reference
-            (r'(`.+?`)(:[a-zA-Z0-9-]+?:)?',
+            (r'(`.+?)(<.+?>)(`__?)',  # reference with inline target
+             bygroups(String, String.Interpol, String)),
+            (r'`.+?`__?', String), # reference
+            (r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
              bygroups(Name.Variable, Name.Attribute)), # role
-            (r'(:[a-zA-Z0-9-]+?:)(`.+?`)',
-             bygroups(Name.Attribute, Name.Variable)), # user-defined role
+            (r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
+             bygroups(Name.Attribute, Name.Variable)), # role (content first)
             (r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
             (r'\*.+?\*', Generic.Emph), # Emphasis
             (r'\[.*?\]_', String), # Footnote or citation
             (r'[}]', Text),
             (r'[^,]$', Name.Function, '#pop'),
             (r'([\+\.a-zA-Z0-9-][\s\n]*)', Name.Function),
+            (r'\[.*?\]', Name.Entity),
         ],
         'depend_vers': [
             (r'\),', Text, '#pop'),
             (r'[^\s;#{}$]+', String), # catch all
             (r'/[^\s;#]*', Name), # pathname
             (r'\s+', Text),
+            (r'[$;]', Text),  # leftover characters
         ],
     }
 

pygments/lexers/web.py

 """
 
 import re
-try:
-    set
-except NameError:
-    from sets import Set as set
 
-from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, include, this
+from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \
+     include, this
 from pygments.token import \
      Text, Comment, Operator, Keyword, Name, String, Number, Other, Punctuation
 from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
 
 __all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'CssLexer',
            'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer',
-           'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ObjectiveJLexer']
+           'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer',
+           'ObjectiveJLexer', 'CoffeeScriptLexer']
 
 
 class JavascriptLexer(RegexLexer):
         ],
         'php': [
             (r'\?>', Comment.Preproc, '#pop'),
-            (r'<<<([a-zA-Z_][a-zA-Z0-9_]*)\n.*?\n\1\;?\n', String),
+            (r'<<<(\'?)([a-zA-Z_][a-zA-Z0-9_]*)\1\n.*?\n\2\;?\n', String),
             (r'\s+', Text),
             (r'#.*?\n', Comment.Single),
             (r'//.*?\n', Comment.Single),
             (r'[~!%^&*+=|:.<>/?@-]+', Operator),
             (r'[\[\]{}();,]+', Punctuation),
             (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
+            (r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
             (r'(function)(\s+)(&?)(\s*)',
               bygroups(Keyword, Text, Operator, Text), 'functionname'),
             (r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
              r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
              r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
              r'implements|public|private|protected|abstract|clone|try|'
-             r'catch|throw|this)\b', Keyword),
+             r'catch|throw|this|use|namespace)\b', Keyword),
             ('(true|false|null)\b', Keyword.Constant),
             (r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
             (r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
-            ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
+            (r'[\\a-zA-Z_][\\a-zA-Z0-9_]*', Name.Other),
             (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
              r"0[xX][0-9a-fA-F]+[Ll]?", Number),
             (r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
             (r'"', String.Double, 'string'),
         ],
         'classname': [
-            (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
+            (r'[a-zA-Z_][\\a-zA-Z0-9_]*', Name.Class, '#pop')
         ],
         'functionname': [
             (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
             (r'', Text, '#pop'),
         ],
     }
+
+
+class CoffeeScriptLexer(RegexLexer):
+    """
+    For `CoffeeScript`_ source code.
+
+    .. _CoffeeScript: http://coffeescript.org
+
+    *New in Pygments 1.3.*
+    """
+
+    name = 'CoffeeScript'
+    aliases = ['coffee-script', 'coffeescript']
+    filenames = ['*.coffee']
+    mimetypes = ['text/coffeescript']
+
+    flags = re.DOTALL
+    tokens = {
+        'commentsandwhitespace': [
+            (r'\s+', Text),
+            (r'#.*?\n', Comment.Single),
+        ],
+        'slashstartsregex': [
+            include('commentsandwhitespace'),
+            (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+             r'([gim]+\b|\B)', String.Regex, '#pop'),
+            (r'(?=/)', Text, ('#pop', 'badregex')),
+            (r'', Text, '#pop'),
+        ],
+        'badregex': [
+            ('\n', Text, '#pop'),
+        ],
+        'root': [
+            (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
+            include('commentsandwhitespace'),
+            (r'\+\+|--|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
+             r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|[-<>+*`%&\|\^/])=?',
+             Operator, 'slashstartsregex'),
+            (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+            (r'[})\].]', Punctuation),
+            (r'(for|in|of|while|break|return|continue|switch|when|then|if|else|'
+             r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
+             r'extends|this)\b', Keyword, 'slashstartsregex'),
+            (r'(true|false|yes|no|on|off|null|NaN|Infinity|undefined)\b',
+             Keyword.Constant),
+            (r'(Array|Boolean|Date|Error|Function|Math|netscape|'
+             r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
+             r'decodeURIComponent|encodeURI|encodeURIComponent|'
+             r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
+             r'window)\b', Name.Builtin),
+            (r'[$a-zA-Z_][a-zA-Z0-9_\.:]*\s*:\s', Name.Variable,
+              'slashstartsregex'),
+            (r'@[$a-zA-Z_][a-zA-Z0-9_\.:]*\s*:\s', Name.Variable.Instance,
+              'slashstartsregex'),
+            (r'@?[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other, 'slashstartsregex'),
+            (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+            (r'0x[0-9a-fA-F]+', Number.Hex),
+            (r'[0-9]+', Number.Integer),
+            (r'"(\\\\|\\"|[^"])*"', String.Double),
+            (r"'(\\\\|\\'|[^'])*'", String.Single),
+        ]
+    }

pygments/token.py

     :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
-try:
-    set
-except NameError:
-    from sets import Set as set
-
 
 class _TokenType(tuple):
     parent = None
     :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
+
 import re
 import sys
+import codecs
 
 
 split_path_re = re.compile(r'[/\\ ]')
     import StringIO, cStringIO
     BytesIO = cStringIO.StringIO
     StringIO = StringIO.StringIO
+    uni_open = codecs.open
 else:
     import builtins
     bytes = builtins.bytes
     import io
     BytesIO = io.BytesIO
     StringIO = io.StringIO
+    uni_open = builtins.open

scripts/find_error.py

     import pygments
 except ImportError:
     # try parent path
-    sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
+    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
 
-from pygments import highlight
+
+from pygments.lexer import RegexLexer
 from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
-from pygments.token import Error
+from pygments.token import Error, Text, _TokenType
 
-def main(fn):
-    try:
-        lx = get_lexer_for_filename(fn)
-    except ValueError:
+
+class DebuggingRegexLexer(RegexLexer):
+    """Make the state stack, position and current match instance attributes."""
+
+    def get_tokens_unprocessed(self, text, stack=('root',)):
+        """
+        Split ``text`` into (tokentype, text) pairs.
+
+        ``stack`` is the inital stack (default: ``['root']``)
+        """
+        self.pos = 0
+        tokendefs = self._tokens
+        self.statestack = list(stack)
+        statetokens = tokendefs[self.statestack[-1]]
+        while 1:
+            for rexmatch, action, new_state in statetokens:
+                self.m = m = rexmatch(text, self.pos)
+                if m:
+                    if type(action) is _TokenType:
+                        yield self.pos, action, m.group()
+                    else:
+                        for item in action(self, m):
+                            yield item
+                    self.pos = m.end()
+                    if new_state is not None:
+                        # state transition
+                        if isinstance(new_state, tuple):
+                            for state in new_state:
+                                if state == '#pop':
+                                    self.statestack.pop()
+                                elif state == '#push':
+                                    self.statestack.append(self.statestack[-1])
+                                else:
+                                    self.statestack.append(state)
+                        elif isinstance(new_state, int):
+                            # pop
+                            del self.statestack[new_state:]
+                        elif new_state == '#push':
+                            self.statestack.append(self.statestack[-1])
+                        else:
+                            assert False, 'wrong state def: %r' % new_state
+                        statetokens = tokendefs[self.statestack[-1]]
+                    break
+            else:
+                try:
+                    if text[self.pos] == '\n':
+                        # at EOL, reset state to 'root'
+                        self.pos += 1
+                        self.statestack = ['root']
+                        statetokens = tokendefs['root']
+                        yield self.pos, Text, u'\n'
+                        continue
+                    yield self.pos, Error, text[self.pos]
+                    self.pos += 1
+                except IndexError:
+                    break
+
+
+def main(fn, lexer=None):
+    if lexer is not None:
+        lx = get_lexer_by_name(lexer)
+    else:
         try:
-            name, rest = fn.split("_", 1)
-            lx = get_lexer_by_name(name)
+            lx = get_lexer_for_filename(os.path.basename(fn))
         except ValueError:
-            raise AssertionError('no lexer found for file %r' % fn)
+            try:
+                name, rest = fn.split('_', 1)
+                lx = get_lexer_by_name(name)
+            except ValueError:
+                raise AssertionError('no lexer found for file %r' % fn)
+    debug_lexer = False
+    # does not work for e.g. ExtendedRegexLexers
+    if lx.__class__.__bases__ == (RegexLexer,):
+        lx.__class__.__bases__ = (DebuggingRegexLexer,)
+        debug_lexer = True
+    lno = 1
     text = file(fn, 'U').read()
     text = text.strip('\n') + '\n'
     text = text.decode('latin1')
-    ntext = []
+    tokens = []
+    states = []
+
+    def show_token(tok, state):
+        reprs = map(repr, tok)
+        print '   ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0],
+        if debug_lexer:
+            print ' ' + ' ' * (29-len(reprs[0])) + repr(state),
+        print
+
     for type, val in lx.get_tokens(text):
+        lno += val.count('\n')
         if type == Error:
-            print "Error parsing", fn
-            print "\n".join(['   ' + repr(x) for x in ntext[-num:]])
-            print `val` + "<<<"
+            print 'Error parsing', fn, 'on line', lno
+            print 'Previous tokens' + (debug_lexer and ' and states' or '') + ':'
+            if showall:
+                for tok, state in zip(tokens, states):
+                    show_token(tok, state)
+            else:
+                for i in range(len(tokens) - num, len(tokens)):
+                    show_token(tokens[i], states[i])
+            print 'Error token:'
+            l = len(repr(val))
+            print '   ' + repr(val),
+            if debug_lexer and hasattr(lx, 'statestack'):
+                print ' ' * (60-l) + repr(lx.statestack),
+            print
+            print
             return 1
-        ntext.append((type,val))
+        tokens.append((type,val))
+        if debug_lexer:
+            if hasattr(lx, 'statestack'):
+                states.append(lx.statestack[:])
+            else:
+                states.append(None)
+    if showall:
+        for tok, state in zip(tokens, states):
+            show_token(tok, state)
     return 0
 
 
 num = 10
+showall = False
+lexer = None
 
-if __name__ == "__main__":
-    if sys.argv[1][:2] == '-n':
-        num = int(sys.argv[1][2:])
-        del sys.argv[1]
+if __name__ == '__main__':
+    import getopt
+    opts, args = getopt.getopt(sys.argv[1:], 'n:l:a')
+    for opt, val in opts:
+        if opt == '-n':
+            num = int(val)
+        elif opt == '-a':
+            showall = True
+        elif opt == '-l':
+            lexer = val
     ret = 0
-    for f in sys.argv[1:]:
-        ret += main(f)
+    for f in args:
+        ret += main(f, lexer)
     sys.exit(bool(ret))
 
 try:
     from setuptools import setup, find_packages
+    have_setuptools = True
 except ImportError:
     from distutils.core import setup
     def find_packages():
             'pygments.styles',
             'pygments.filters',
         ]
+    have_setuptools = False
 
 try:
     from distutils.command.build_py import build_py_2to3 as build_py
 except ImportError:
     from distutils.command.build_py import build_py
 
+if have_setuptools:
+    add_keywords = dict(
+        entry_points = {
+            'console_scripts': ['pygmentize = pygments.cmdline:main'],
+        },
+    )
+else:
+    add_keywords = dict(
+        scripts = ['pygmentize'],
+    )
+
 setup(
     name = 'Pygments',
-    version = '1.2.2',
+    version = '1.3.1',
     url = 'http://pygments.org/',
     license = 'BSD License',
     author = 'Georg Brandl',
     long_description = __doc__,
     keywords = 'syntax highlighting',
     packages = find_packages(),
-    scripts = ['pygmentize'],
     platforms = 'any',
     zip_safe = False,
     include_package_data = True,
         'Operating System :: OS Independent',
     ],
     cmdclass = {'build_py': build_py},
+    **add_keywords
 )

tests/examplefiles/Sorting.mod

+IMPLEMENTATION MODULE Sorting;
+
+(* J. Andrea, Dec.16/91 *)
+(* This code may be freely used and distributed, it may not be sold. *)
+
+(* Adapted to ISO Module-2 by Frank Schoonjans  Feb 2004 *)
+
+FROM Storage IMPORT ALLOCATE;
+
+CONST
+   max_stack = 20;
+   n_small   = 6; (* use a simple sort for this size and smaller *)
+
+VAR
+  rtemp :REAL;
+  ctemp :CARDINAL;
+
+  L, R, n               :INTEGER;
+  top, bottom, lastflip :INTEGER;
+
+  tos            :CARDINAL;
+  Lstack, Rstack :ARRAY [1..max_stack] OF INTEGER;
+
+      (* --------------------------------------------------- *)
+      PROCEDURE CardQSortIndex( x :ARRAY OF CARDINAL; array_len :CARDINAL;
+                                VAR index :ARRAY OF CARDINAL );
+
+      VAR
+        median : CARDINAL;
+        i,j    : INTEGER;
+      BEGIN
+
+        n := VAL(INTEGER,array_len) - 1; (* back to zero offset *)
+
+        (* initialize the index *)
+        FOR i := 0 TO n DO
+          index[i] := VAL(CARDINAL,i);
+        END;
+
+        tos := 0;
+
+        L := 0;  R := n;
+
+        (* PUSH very first set *)
+        tos := tos + 1;  Lstack[tos] := L;  Rstack[tos] := R;
+
+        REPEAT
+
+          (* POP *)
+          L := Lstack[tos];  R := Rstack[tos];  tos := tos - 1;
+
+          IF R - L + 1 > n_small THEN
+
+            REPEAT
+              i := L;  j := R;    median := x[index[( L + R ) DIV 2]];
+
+              REPEAT
+                WHILE x[index[i]] < median DO
+                  i := i + 1;
+                END;
+                WHILE median < x[index[j]] DO
+                  j := j - 1;
+                END;
+
+                IF i <= j THEN (* swap *)
+                  ctemp := index[i];  index[i] := index[j];  index[j] := ctemp;
+                  i := i + 1;  j := j - 1;
+                END;
+              UNTIL i > j;
+
+              IF j - L < R - i THEN
+                IF i < R THEN (* PUSH *)
+                  tos := tos + 1;  Lstack[tos] := i;  Rstack[tos] := R;
+                END;
+                R := j;
+              ELSE
+                IF L < j THEN (* push *)
+                  tos := tos + 1;  Lstack[tos] := L;  Rstack[tos] := j;
+                END;
+                L := i;
+              END;
+
+            UNTIL L >= R;
+
+         ELSE
+
+           (* small sort for small number of values *)
+           FOR i := L TO R - 1 DO
+             FOR j := i TO R DO
+               IF x[index[i]] > x[index[j]] THEN
+                  ctemp    := index[i];
+                  index[i] := index[j];
+                  index[j] := ctemp
+               END;
+             END;
+           END;
+
+         END; (* check for small *)
+
+       UNTIL tos = 0;
+
+      END CardQSortIndex;
+
+      (* --------------------------------------------------- *)
+      PROCEDURE RealQSortIndex( x :ARRAY OF REAL; array_len :CARDINAL;
+                                VAR index :ARRAY OF CARDINAL );
+
+      VAR
+        median :REAL;
+        i,j    :INTEGER;
+      BEGIN
+
+        n := VAL(INTEGER,array_len) - 1; (* back to zero offset *)
+
+        (* initialize the index *)
+        FOR i := 0 TO n DO
+          index[i] := VAL(CARDINAL,i);
+        END;
+
+        tos := 0;
+
+        L := 0;  R := n;
+
+        (* PUSH very first set *)
+        tos := tos + 1;  Lstack[tos] := L;  Rstack[tos] := R;
+
+        REPEAT
+
+          (* POP *)
+          L := Lstack[tos];  R := Rstack[tos];  tos := tos - 1;
+
+          IF R - L + 1 > n_small THEN
+
+            REPEAT
+              i := L;  j := R;    median := x[index[( L + R ) DIV 2]];
+
+              REPEAT
+                WHILE x[index[i]] < median DO
+                  i := i + 1;
+                END;
+                WHILE median < x[index[j]] DO
+                  j := j - 1;
+                END;
+
+                IF i <= j THEN (* swap *)
+                  ctemp := index[i];  index[i] := index[j];  index[j] := ctemp;
+                  i := i + 1;  j := j - 1;
+                END;
+              UNTIL i > j;
+
+              IF j - L < R - i THEN
+                IF i < R THEN (* PUSH *)
+                  tos := tos + 1;  Lstack[tos] := i;  Rstack[tos] := R;
+                END;
+                R := j;
+              ELSE
+                IF L < j THEN (* push *)
+                  tos := tos + 1;  Lstack[tos] := L;  Rstack[tos] := j;
+                END;
+                L := i;
+              END;
+
+            UNTIL L >= R;
+
+         ELSE
+
+           (* small sort for small number of values *)
+           FOR i := L TO R - 1 DO
+             FOR j := i TO R DO
+               IF x[index[i]] > x[index[j]] THEN
+                  ctemp    := index[i];
+                  index[i] := index[j];
+                  index[j] := ctemp
+               END;
+             END;
+           END;
+
+         END; (* check for small *)
+
+       UNTIL tos = 0;
+
+      END RealQSortIndex;
+
+      (* --------------------------------------------------- *)
+      PROCEDURE CardQSort( VAR x :ARRAY OF CARDINAL; array_len :CARDINAL );
+
+      VAR
+        median : CARDINAL;
+        n,i,j  : INTEGER;
+      BEGIN
+
+        n := VAL(INTEGER,array_len) - 1; (* back to zero offset *)
+
+        tos := 0;
+
+        L := 0;  R := n;
+
+        (* PUSH very first set *)
+        tos := tos + 1;  Lstack[tos] := L;  Rstack[tos] := R;
+
+        REPEAT
+
+          (* POP *)
+          L := Lstack[tos];  R := Rstack[tos];  tos := tos - 1;
+
+          IF R - L + 1 > n_small THEN
+
+            REPEAT
+              i := L;  j := R;    median := x[( L + R ) DIV 2];
+
+              REPEAT
+                WHILE x[i] < median DO
+                  i := i + 1;
+                END;
+                WHILE median < x[j] DO
+                  j := j - 1;
+                END;
+
+                IF i <= j THEN (* swap *)
+                  ctemp := x[i];  x[i] := x[j];  x[j] := ctemp;
+                  i := i + 1;  j := j - 1;
+                END;
+              UNTIL i > j;
+
+              IF j - L < R - i THEN
+                IF i < R THEN (* PUSH *)
+                  tos := tos + 1;  Lstack[tos] := i;  Rstack[tos] := R;
+                END;
+                R := j;
+              ELSE
+                IF L < j THEN (* push *)
+                  tos := tos + 1;  Lstack[tos] := L;  Rstack[tos] := j;
+                END;
+                L := i;
+              END;
+
+            UNTIL L >= R;
+
+         ELSE
+
+           (* small sort for small number of values *)
+           FOR i := L TO R - 1 DO
+             FOR j := i TO R DO
+               IF x[i] > x[j] THEN
+                  ctemp := x[i];
+                  x[i]  := x[j];
+                  x[j]  := ctemp
+               END;
+             END;
+           END;
+
+         END; (* check for small *)
+
+       UNTIL tos = 0;
+
+      END CardQSort;
+
+      (* ----------------------------------------------------- *)
+      PROCEDURE CardBSort( VAR x :ARRAY OF CARDINAL; array_len :CARDINAL );
+      VAR i,j : INTEGER;
+      BEGIN
+        top    := 0;      (* open arrays are zero offset *)
+        bottom := VAL(INTEGER,array_len) - 1;
+
+        WHILE top < bottom DO
+
+          lastflip := top;
+
+          FOR i := top TO bottom-1 DO
+             IF x[i] > x[i+1] THEN    (* flip *)
+               ctemp  := x[i];
+               x[i]   := x[i+1];
+               x[i+1] := ctemp;
+               lastflip := i;
+             END;
+          END;
+
+          bottom := lastflip;
+
+          IF bottom > top THEN
+
+             i := bottom - 1;
+             FOR j := top TO bottom-1 DO
+               IF x[i] > x[i+1] THEN    (* flip *)
+                 ctemp  := x[i];
+                 x[i]   := x[i+1];
+                 x[i+1] := ctemp;
+                 lastflip := i;
+               END;
+               i := i - 1;
+             END;
+
+             top := lastflip + 1;
+
+          ELSE
+             (* force a loop failure *)
+             top := bottom + 1;
+          END;
+
+       END;
+
+      END CardBSort;
+
+
+      (* ----------------------------------------------------- *)
+      PROCEDURE RealBSort( VAR x :ARRAY OF REAL; array_len :CARDINAL );
+      VAR bottom,top : INTEGER;
+          i,j        : INTEGER;
+      BEGIN
+        top    := 0;      (* open arrays are zero offset *)
+        bottom := VAL(INTEGER,array_len) - 1;
+
+        WHILE top < bottom DO
+
+          lastflip := top;
+
+          FOR i := top TO bottom-1 DO
+             IF x[i] > x[i+1] THEN    (* flip *)
+               rtemp  := x[i];
+               x[i]   := x[i+1];
+               x[i+1] := rtemp;
+               lastflip := i;
+             END;
+          END;
+
+          bottom := lastflip;
+
+          IF bottom > top THEN
+
+             i := bottom - 1;
+             FOR j := top TO bottom-1 DO
+               IF x[i] > x[i+1] THEN    (* flip *)
+                 rtemp  := x[i];
+                 x[i]   := x[i+1];
+                 x[i+1] := rtemp;
+                 lastflip := i;
+               END;
+               i := i - 1;
+             END;
+
+             top := lastflip + 1;
+
+          ELSE
+             (* force a loop failure *)
+             top := bottom + 1;
+          END;
+
+       END;
+
+      END RealBSort;
+
+
+    (* ----------------------------------------------------- *)
+    PROCEDURE TopoSort( x, y :ARRAY OF CARDINAL; n_pairs :CARDINAL;
+                       VAR solution :ARRAY OF CARDINAL; VAR n_solution :CARDINAL;
+                       VAR error, sorted :BOOLEAN );
+    (*
+     This procedure needs some garbage collection added, i've tried but
+     will little success. J. Andrea, Dec.18/91
+    *)
+
+    TYPE
+      LPtr = POINTER TO Leader;
+      TPtr = POINTER TO Trailer;
+
+      Leader = RECORD
+                  key   :CARDINAL;
+                  count :INTEGER;
+                  trail :TPtr;
+                  next  :LPtr;
+               END;
+
+      Trailer = RECORD
+                  id   :LPtr;
+                  next :TPtr;
+                END;
+
+    VAR
+      p, q, head, tail :LPtr;
+      t                :TPtr;