Radomir Dopieralski avatar Radomir Dopieralski committed 495d9a9

pep8 and pyflakes fixes

Comments (0)

Files changed (11)

hatta/__main__.py

 sys.stdout = sys.__stdout__
 sys.stderr = sys.__stderr__
 
+
 def application(env, start):
     """Detect that we are being run as WSGI application."""
 
     application = wiki.application
     return application(env, start)
 
+
 def main(config=None, wiki=None):
     """Start a standalone WSGI server."""
 
             return
     apps = [('', app)]
     name = wiki.site_name
-    server = wsgiserver.CherryPyWSGIServer((host, port), apps, server_name=name)
+    server = wsgiserver.CherryPyWSGIServer((host, port), apps,
+                                           server_name=name)
     try:
         server.start()
     except KeyboardInterrupt:
 
 if __name__ == "__main__":
     main()
-
 
 import os
 
+
 OPTIONS = []
 VALID_NAMES = set()
 
+
 def _add(short, long, dest, help, default=None, metavar=None,
          action=None, type=None):
     """Helper for building the list of options."""
     action="store_true",
     help='Convert all text pages to UNIX-style CR newlines')
 
+
 class WikiConfig(object):
     """
     Responsible for reading and storing site configuration. Contains the
         import optparse
 
         parser = optparse.OptionParser()
-        for short, long, dest, help, default, metavar, action, type in self.options:
+        for (short, long, dest, help, default, metavar, action,
+             type) in self.options:
             parser.add_option(short, long, dest=dest, help=help, type=type,
                               default=default, metavar=metavar, action=action)
 
     config.parse_files()
     # config.sanitize()
     return config
-
 
 import base64
 
+
 icon = base64.b64decode(
 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhki'
 'AAAAAlwSFlzAAAEnQAABJ0BfDRroQAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBo'
 
 class ServiceUnavailableErr(WikiError):
     code = 503
-

hatta/hg_integration.py

 from config import WikiConfig
 from __main__ import main
 
+
 def run_wiki(ui, repo, directory=None, **opts):
     """Start serving Hatta in the provided repository."""
 
 
 import werkzeug
 import werkzeug.contrib.atom
-import jinja2
 
+pygments = None
 try:
     import pygments
     import pygments.util
     import pygments.formatters
     import pygments.styles
 except ImportError:
-    pygments = None
+    pass
 
+Image = None
 try:
     import Image
 except ImportError:
-    Image = None
+    pass
 
 import parser
 import error
     'archive/gzip'
     """
 
-    addr = title.encode('utf-8') # the encoding doesn't relly matter here
+    addr = title.encode('utf-8')  # the encoding doesn't relly matter here
     mime, encoding = mimetypes.guess_type(addr, strict=False)
     if encoding:
         mime = 'archive/%s' % encoding
         mime = 'text/x-wiki'
     return mime
 
+
 def date_html(date_time):
     """
     Create HTML for a date, according to recommendation at
     return date_time.strftime(
         '<abbr class="date" title="%Y-%m-%dT%H:%M:%SZ">%Y-%m-%d %H:%M</abbr>')
 
+
 class WikiPage(object):
     """Everything needed for rendering a page."""
 
         else:
             self.aliases = {}
 
-
     def link_alias(self, addr):
         """Find a target address for an alias."""
 
                 classes.append('mail')
                 text = text.replace('@', '&#64;').replace('.', '&#46;')
                 href = werkzeug.escape(addr,
-                                       quote=True).replace('@',
-                                                           '%40').replace('.',
-                                                                          '%2E')
+                    quote=True).replace('@', '%40').replace('.', '%2E')
             else:
                 href = werkzeug.escape(addr, quote=True)
         else:
                     classes.append('nonexistent')
         class_ = ' '.join(classes) or None
         return werkzeug.html.a(image or text, href=href, class_=class_,
-                               title=addr+chunk)
+                               title=addr + chunk)
 
     def wiki_image(self, addr, alt, class_='wiki', lineno=0):
         """Create HTML for a wiki image."""
 
         return self.storage.page_text(self.title)
 
-
     def view_content(self, lines=None):
-        """Read the page content from storage or preview and return iterator."""
+        """
+        Read the page content from storage or preview and return iterator.
+        """
 
         if lines is None:
             f = self.storage.open_page(self.title)
         for old_line, new_line, changed in diff:
             old_no, old_text = old_line
             new_no, new_text = new_line
-            line_no = (new_no or old_no or 1)-1
+            line_no = (new_no or old_no or 1) - 1
             if changed:
                 yield u'<div class="change" id="line_%d">' % line_no
                 old_iter = infiniter(mark_re.finditer(old_text))
                     line_no, werkzeug.escape(old_text))
         yield u'</pre>'
 
+
 class WikiPageColorText(WikiPageText):
     """Text pages, but displayed colorized with pygments"""
 
             im = Image.open(page_file)
             im = im.convert('RGBA')
             im.thumbnail((128, 128), Image.ANTIALIAS)
-            im.save(cache_file,'PNG')
+            im.save(cache_file, 'PNG')
         except IOError:
             raise error.UnsupportedMediaTypeErr('Image corrupted')
         cache_file.close()
         except csv.Error, e:
             yield u'</table>'
             yield werkzeug.html.p(werkzeug.html(
-                _(u'Error parsing CSV file %{file}s on line %{line}d: %{error}s')
-                % {'file': html_title, 'line': reader.line_num, 'error': e}))
+                _(u'Error parsing CSV file %{file}s on'
+                  u'line %{line}d: %{error}s') %
+                {'file': html_title, 'line': reader.line_num, 'error': e}))
         finally:
             csv_file.close()
         yield u'</table>'
             raise error.NotFoundErr()
         return self.content_iter(lines)
 
+
 class WikiPageRST(WikiPageText):
     """
     Display ReStructured Text.
 mimetypes.add_type('application/x-python', '.wsgi')
 mimetypes.add_type('application/x-javascript', '.js')
 mimetypes.add_type('text/x-rst', '.rst')
-
 import itertools
 import werkzeug
 
-EXTERNAL_URL_RE = re.compile(ur'^[a-z]+://|^mailto:', re.I|re.U)
+
+EXTERNAL_URL_RE = re.compile(ur'^[a-z]+://|^mailto:', re.I | re.U)
+
 
 def external_link(addr):
     """
 
     return EXTERNAL_URL_RE.match(addr)
 
+
 class WikiParser(object):
     r"""
     Responsible for generating HTML markup from the wiki markup.
         "empty": (40, ur"^\s*$"),
         "heading": (50, heading_pat),
         "indent": (60, ur"^[ \t]+"),
-        "macro":(70, ur"^<<\w+\s*$"),
+        "macro": (70, ur"^<<\w+\s*$"),
         "quote": (80, quote_pat),
         "rule": (90, ur"^\s*---+\s*$"),
         "syntax": (100, ur"^\{\{\{\#![\w+#.-]+\s*$"),
         "code": (20, ur"[{][{][{](?P<code_text>([^}]|[^}][}]|[^}][}][}])"
                 ur"*[}]*)[}][}][}]"),
         "free_link": (30, ur"""[a-zA-Z]+://\S+[^\s.,:;!?()'"=+<>-]"""),
-        "italic": (40 , ur"//"),
+        "italic": (40, ur"//"),
         "link": (50, ur"\[\[(?P<link_target>([^|\]]|\][^|\]])+)"
                 ur"(\|(?P<link_text>([^\]]|\][^\]])+))?\]\]"),
         "image": (60, image_pat),
         "math": (100, ur"\$\$(?P<math_text>[^$]+)\$\$"),
         "mono": (110, ur"##"),
         "newline": (120, ur"\n"),
-        "punct": (130, ur'(^|\b|(?<=\s))(%s)((?=[\s.,:;!?)/&=+"\'—-])|\b|$)' %
+        "punct": (130,
+                  ur'(^|\b|(?<=\s))(%s)((?=[\s.,:;!?)/&=+"\'—-])|\b|$)' %
                   ur"|".join(re.escape(k) for k in punct)),
         "table": (140, ur"=?\|=?"),
         "text": (150, ur".+?"),
     }
 
-
     def __init__(self, lines, wiki_link, wiki_image,
                  wiki_syntax=None, wiki_math=None, smilies=None):
         self.wiki_link = wiki_link
     @classmethod
     def extract_links(cls, text):
         links = []
-        def link(addr, label=None, class_=None, image=None, alt=None, lineno=0):
+
+        def link(addr, label=None, class_=None, image=None, alt=None,
+                 lineno=0):
             addr = addr.strip()
             if external_link(addr) or addr.startswith(':'):
                 # Don't index external links and aliases
     def _block_heading(self, block):
         for self.line_no, line in block:
             level = min(len(self.heading_re.match(line).group(0).strip()), 5)
-            self.headings[level-1] = self.headings.get(level-1, 0)+1
+            self.headings[level - 1] = self.headings.get(level - 1, 0) + 1
             label = u"-".join(str(self.headings.get(i, 0))
                               for i in range(level))
             yield werkzeug.html.a(name="head-%s" % label)
             yield u"".join(self.parse_line(content))
         if in_p:
             yield '%s</p>' % self.pop_to("")
-        yield '</blockquote>'*level
+        yield '</blockquote>' * level
 
     def _block_conflict(self, block):
         for self.line_no, part in block:
     markup = dict(WikiParser.markup)
     camel_link = ur"\w+[%s]\w+" % re.escape(
         u''.join(unichr(i) for i in xrange(sys.maxunicode)
-        if unicodedata.category(unichr(i))=='Lu'))
+        if unicodedata.category(unichr(i)) == 'Lu'))
     markup["camel_link"] = (105, camel_link)
     markup["camel_nolink"] = (106, ur"[!~](?P<camel_text>%s)" % camel_link)
 
 
     def _line_camel_nolink(self, groups):
         return werkzeug.escape(groups["camel_text"])
-
-
         con.execute('CREATE INDEX IF NOT EXISTS index2 '
                          'ON words (word);')
         con.execute('CREATE TABLE IF NOT EXISTS links '
-                '(src INTEGER, target INTEGER, label VARCHAR, number INTEGER);')
+            '(src INTEGER, target INTEGER, label VARCHAR, number INTEGER);')
         con.commit()
 
     @property
     def count_words(self, words):
         count = {}
         for word in words:
-            count[word] = count.get(word, 0)+1
+            count[word] = count.get(word, 0) + 1
         return count
 
     def title_id(self, title, con):
         finally:
             con.commit()
 
-
     def page_backlinks(self, title):
         """Gives a list of pages linking to specified page."""
 
-        con = self.con # sqlite3.connect(self.filename)
+        con = self.con  # sqlite3.connect(self.filename)
         try:
             sql = ('SELECT DISTINCT(titles.title) '
                    'FROM links, titles '
     def page_links(self, title):
         """Gives a list of links on specified page."""
 
-        con = self.con # sqlite3.connect(self.filename)
+        con = self.con  # sqlite3.connect(self.filename)
         try:
             title_id = self.title_id(title, con)
             sql = 'SELECT target FROM links WHERE src=? ORDER BY number;'
         finally:
             con.commit()
 
-    def page_links_and_labels (self, title):
-        con = self.con # sqlite3.connect(self.filename)
+    def page_links_and_labels(self, title):
+        con = self.con  # sqlite3.connect(self.filename)
         try:
             title_id = self.title_id(title, con)
-            sql = 'SELECT target, label FROM links WHERE src=? ORDER BY number;'
+            sql = ('SELECT target, label FROM links'
+                   'WHERE src=? ORDER BY number;')
             for link, label in con.execute(sql, (title_id,)):
                 yield unicode(link), unicode(label)
         finally:
             # Check for the rest of words
             for title_id, title, first_count in first_counts:
                 # Score for the first word
-                score = float(first_count)/first_rank
+                score = float(first_count) / first_rank
                 for rank, word in rest:
                     sql = ('SELECT SUM(count) FROM words '
                            'WHERE page=? AND word LIKE ?;')
                         # If page misses any of the words, its score is 0
                         score = 0
                         break
-                    score += float(count)/rank
+                    score += float(count) / rank
                 if score > 0:
-                    yield int(100*score), unicode(title)
+                    yield int(100 * score), unicode(title)
         finally:
             con.commit()
 
                 text = None
                 title_id = self.title_id(title, cursor)
                 if not list(self.page_backlinks(title)):
-                    cursor.execute("DELETE FROM titles WHERE id=?;", (title_id,))
+                    cursor.execute("DELETE FROM titles WHERE id=?;",
+                                   (title_id,))
         extract_links = getattr(page, 'extract_links', None)
         if extract_links and text:
             links = extract_links(text)
 
         # We use % here because the sqlite3's substitiution doesn't work
         # We store revision 0 as 1, 1 as 2, etc. because 0 means "no revision"
-        self.con.execute('PRAGMA USER_VERSION=%d;' % (int(rev+1),))
+        self.con.execute('PRAGMA USER_VERSION=%d;' % (int(rev + 1),))
 
     def get_last_revision(self):
         """Retrieve the last indexed repository revision."""
         c = con.execute('PRAGMA USER_VERSION;')
         rev = c.fetchone()[0]
         # -1 means "no revision", 1 means revision 0, 2 means revision 1, etc.
-        return rev-1
+        return rev - 1
 
     def update(self, wiki):
         """Reindex al pages that changed since last indexing."""
         self.reindex(wiki, changed)
         rev = self.storage.repo_revision()
         self.set_last_revision(rev)
-
 import mercurial.hgweb
 import mercurial.commands
 
+import error
 
-import error
 
 def locked_repo(func):
     """A decorator for locking the repository when calling a method."""
 
     return new_func
 
+
 def _find_repo_path(path):
     """Go up the directory tree looking for a repository."""
 
             return None
     return path
 
+
 def _get_ui():
     try:
         ui = mercurial.ui.ui(report_untrusted=False,
     change history, using Mercurial repository as the storage method.
     """
 
-    def __init__(self, path, charset=None, _=lambda x:x, unix_eol=False):
+    def __init__(self, path, charset=None, _=lambda x: x, unix_eol=False):
         """
         Takes the path to the directory where the pages are to be kept.
-        If the directory doen't exist, it will be created. If it's inside
+        If the directory doesn't exist, it will be created. If it's inside
         a Mercurial repository, that repository will be used, otherwise
         a new repository will be created in it.
         """
             self._repos[thread_id] = repo
             return repo
 
-
     def _check_path(self, path):
         """
         Ensure that the path is within allowed bounds.
         return os.path.join(self.repo_prefix, filename)
 
     def _file_to_title(self, filepath):
+        _ = self._
         if not filepath.startswith(self.repo_prefix):
             raise error.ForbiddenErr(
                 _(u"Can't read or write outside of the pages repository"))
         name = filepath[len(self.repo_prefix):].strip('/')
-        # Unescape special windows filenames and dot files
-        if name.startswith('_') and len(name)>1:
+        # Un-escape special windows filenames and dot files
+        if name.startswith('_') and len(name) > 1:
             name = name[1:]
         return werkzeug.url_unquote(name)
 
         return msg
 
     @locked_repo
-    def save_file(self, title, file_name, author=u'', comment=u'', parent=None):
+    def save_file(self, title, file_name, author=u'', comment=u'',
+                  parent=None):
         """Save an existing file as specified page."""
 
         _ = self._
         try:
             mercurial.util.rename(file_name, file_path)
         except OSError, e:
-            if e.errno == errno.ENAMETOOLONG: # "File name too long"
+            if e.errno == errno.ENAMETOOLONG:
+                # "File name too long"
                 raise error.RequestURITooLarge()
             else:
                 raise
 
         return self._changectx().rev()
 
-
     def _changectx(self):
         """Get the changectx of the tip."""
 
             return
         maxrev = filectx_tip.filerev()
         minrev = 0
-        for rev in range(maxrev, minrev-1, -1):
+        for rev in range(maxrev, minrev - 1, -1):
             filectx = filectx_tip.filectx(rev)
             date = datetime.datetime.fromtimestamp(filectx.date()[0])
             author = unicode(filectx.user(), "utf-8",
         changectx = self._changectx()
         maxrev = changectx.rev()
         minrev = 0
-        for wiki_rev in range(maxrev, minrev-1, -1):
+        for wiki_rev in range(maxrev, minrev - 1, -1):
             change = self.repo.changectx(wiki_rev)
             date = datetime.datetime.fromtimestamp(change.date()[0])
             author = unicode(change.user(), "utf-8",
                 yield werkzeug.url_unquote(filename)
 
     def changed_since(self, rev):
-        """Return all pages that changed since specified repository revision."""
+        """
+        Return all pages that changed since specified repository revision.
+        """
 
         try:
             last = self.repo.lookup(int(rev))
         current = self.repo.lookup('tip')
         status = self.repo.status(current, last)
         modified, added, removed, deleted, unknown, ignored, clean = status
-        for filename in modified+added+removed+deleted:
+        for filename in modified + added + removed + deleted:
             if filename.startswith(self.repo_prefix):
                 yield self._file_to_title(filename)
 
             mercurial.commands.rename(self.ui, self.repo, path, temp_path)
             os.makedirs(path)
             index_path = os.path.join(path, self.index)
-            mercurial.commands.rename(self.ui, self.repo, temp_path, index_path)
+            mercurial.commands.rename(self.ui, self.repo, temp_path,
+                                      index_path)
         finally:
             try:
                 os.rmdir(temp_dir)
             except OSError:
                 pass
-        self._commit([index_path, path], _(u"made subdirectory page"), "<wiki>")
+        self._commit([index_path, path], _(u"made subdirectory page"),
+                     "<wiki>")
 
     @locked_repo
-    def save_file(self, title, file_name, author=u'', comment=u'', parent=None):
+    def save_file(self, title, file_name, author=u'', comment=u'',
+                  parent=None):
         """Save the file and make the subdirectories if needed."""
 
         path = self._file_path(title)
         try:
             os.makedirs(os.path.join(self.repo_path, dir_path))
         except OSError, e:
-            if e.errno != errno.EEXIST: # "File exists"
+            if e.errno != errno.EEXIST:
+                # "File exists"
                 raise
         super(WikiSubdirectoryStorage, self).save_file(title, file_name,
                                                        author, comment, parent)
         commit after removing empty directories.
         """
 
-        super(WikiSubdirectoryStorage, self).delete_page(title, author, comment)
+        super(WikiSubdirectoryStorage, self).delete_page(title, author,
+                                                         comment)
         file_path = self._file_path(title)
         self._check_path(file_path)
         dir_path = os.path.dirname(file_path)
         try:
             os.removedirs(dir_path)
         except OSError, e:
-            if e.errno != errno.ENOTEMPTY: # "Directory not empty"
+            if e.errno != errno.ENOTEMPTY:
+                # "Directory not empty"
                 raise
 
     def all_pages(self):
         """
 
         for (dirpath, dirnames, filenames) in os.walk(self.path):
-            path = dirpath[len(self.path)+1:]
+            path = dirpath[len(self.path) + 1:]
             for name in filenames:
                 if os.path.basename(name) == self.index:
                     filename = os.path.join(path, os.path.dirname(name))
                     if (os.path.isfile(os.path.join(self.path, filename))
                         and not filename.startswith('.')):
                         yield werkzeug.url_unquote(filename)
-
-
-
 import werkzeug.routing
 import jinja2
 
+pygments = None
 try:
     import pygments
 except ImportError:
-    pygments = None
+    pass
 
 import hatta
 import storage
 import error
 import data
 
-import mercurial # import it after storage!
+import mercurial  # import it after storage!
+
 
 class WikiResponse(werkzeug.BaseResponse, werkzeug.ETagResponseMixin,
                    werkzeug.CommonResponseDescriptorsMixin):
             auth = werkzeug.url_unquote(self.environ.get('REMOTE_USER', ""))
         except UnicodeError:
             auth = None
-        author = (self.form.get("author") or cookie or auth or self.remote_addr)
+        author = (self.form.get("author") or cookie or auth or
+                  self.remote_addr)
         return author
 
     def _get_file_stream(self, total_content_length=None, content_type=None,
         self.unix_eol = self.config.get_bool('unix_eol', False)
         if self.subdirectories:
             self.storage = storage.WikiSubdirectoryStorage(self.path,
-                                                self.page_charset, self.gettext)
+                                                           self.page_charset,
+                                                           self.gettext)
         else:
             self.storage = self.storage_class(self.path, self.page_charset,
                                               self.gettext, self.unix_eol)
         self.index.update(self)
         self.url_rules = URL.rules(self)
         self.url_map = werkzeug.routing.Map(self.url_rules, converters={
-            'title':WikiTitleConverter,
-            'all':WikiAllConverter
+            'title': WikiTitleConverter,
+            'all': WikiAllConverter,
         })
 
     def add_url_rule(self, rule):
 
         self.url_rules.append(rule)
         self.url_map = werkzeug.routing.Map(self.url_rules, converters={
-            'title':WikiTitleConverter,
-            'all':WikiAllConverter
+            'title': WikiTitleConverter,
+            'all': WikiAllConverter,
         })
 
     def get_page(self, request, title):
         response = WikiResponse(content, mimetype=mime)
         if rev is None:
             inode, _size, mtime = self.storage.page_file_meta(title)
-            response.set_etag(u'%s/%s/%d-%d' % (etag, werkzeug.url_quote(title),
+            response.set_etag(u'%s/%s/%d-%d' % (etag,
+                                                werkzeug.url_quote(title),
                                                 inode, mtime))
             if size == -1:
                 size = _size
             werkzeug.html.p(
                 werkzeug.html(
                     _(u'Content of revision %(rev)d of page %(title)s:'))
-                % {'rev': rev, 'title': link }),
+                % {'rev': rev, 'title': link}),
             werkzeug.html.pre(werkzeug.html(text)),
         ]
         special_title = _(u'Revision of "%(title)s"') % {'title': title}
                     self.storage.delete_page(title, author, comment)
                     url = request.get_url(self.front_page)
                 else:
-                    self.storage.save_text(title, text, author, comment, parent)
+                    self.storage.save_text(title, text, author, comment,
+                                           parent)
             else:
                 text = u''
                 upload = request.files['data']
 
     @URL('/+edit/<title:title>', methods=['GET'])
     def edit(self, request, title, preview=None):
-        _ = self.gettext
         self._check_lock(title)
         exists = title in self.storage
         if exists:
             if rev > 0:
                 url = request.adapter.build(self.diff, {
                     'title': title,
-                    'from_rev': rev-1,
-                    'to_rev': rev
+                    'from_rev': rev - 1,
+                    'to_rev': rev,
                 }, force_external=True)
             else:
                 url = request.adapter.build(self.revision, {
         try:
             wrap_file = werkzeug.wrap_file
         except AttributeError:
-            wrap_file = lambda x, y:y
+            wrap_file = lambda x, y: y
         f = wrap_file(request.environ, self.storage.open_page(title))
         response = self.response(request, title, f, '/download', mime, size=-1)
         response.direct_passthrough = True
         try:
             wrap_file = werkzeug.wrap_file
         except AttributeError:
-            wrap_file = lambda x, y:y
+            wrap_file = lambda x, y: y
         f = wrap_file(request.environ, open(cache_file))
         response = self.response(request, title, f, '/render', cache_mime,
                                  size=cache_size)
             else:
                 comment = _(u'Undo of change %(rev)d of page %(title)s') % {
                     'rev': rev, 'title': title}
-                data = self.storage.page_revision(title, rev-1)
+                data = self.storage.page_revision(title, rev - 1)
                 self.storage.save_data(title, data, author, comment, parent)
             page = self.get_page(request, title)
             self.index.update_page(page, title, data=data)
                 max_rev = rev
             if rev > 0:
                 date_url = request.adapter.build(self.diff, {
-                    'title': title, 'from_rev': rev-1, 'to_rev': rev})
+                    'title': title, 'from_rev': rev - 1, 'to_rev': rev})
             else:
                 date_url = request.adapter.build(self.revision, {
                     'title': title, 'rev': rev})
     def recent_changes(self, request):
         """Serve the recent changes page."""
 
-        _ = self.gettext
         def _changes_list():
             last = {}
             lastrev = {}
                 if rev > 0:
                     date_url = request.adapter.build(self.diff, {
                         'title': title,
-                        'from_rev': rev-1,
-                        'to_rev': lastrev.get(title, rev)
+                        'from_rev': rev - 1,
+                        'to_rev': lastrev.get(title, rev),
                     })
                 elif rev == 0:
                     date_url = request.adapter.build(self.revision, {
         response = WikiResponse(html, mimetype='text/html')
         return response
 
-
     @URL('/+index')
     def all_pages(self, request):
         """Show index of all pages in the wiki."""
     def wanted(self, request):
         """Show all pages that don't exist yet, but are linked."""
 
-        _ = self.gettext
         def _wanted_pages_list():
             for refs, title in self.index.wanted_pages():
                 if not (parser.external_link(title) or title.startswith('+')):
         """Serve the search results page."""
 
         _ = self.gettext
+
         def search_snippet(title, words):
             """Extract a snippet of text for search results."""
 
             except error.NotFoundErr:
                 return u''
             regexp = re.compile(u"|".join(re.escape(w) for w in words),
-                                re.U|re.I)
+                                re.U | re.I)
             match = regexp.search(text)
             if match is None:
                 return u""
             h = werkzeug.html
             self.storage.reopen()
             self.index.update(self)
-            result = sorted(self.index.find(words), key=lambda x:-x[0])
+            result = sorted(self.index.find(words), key=lambda x: -x[0])
             yield werkzeug.html.p(h(_(u'%d page(s) containing all words:')
                                   % len(result)))
             yield u'<ol class="search">'
                 yield h.li(h.b(page.wiki_link(title)), u' ', h.i(str(score)),
                            h.div(search_snippet(title, words),
                                  _class="snippet"),
-                           id_="search-%d" % (number+1))
+                           id_="search-%d" % (number + 1))
             yield u'</ol>'
 
         query = request.values.get('q', u'').strip()
     def backlinks(self, request, title):
         """Serve the page with backlinks."""
 
-        _ = self.gettext
         self.storage.reopen()
         self.index.update(self)
         page = self.get_page(request, title)
                   'Disallow: /+feed\r\n'
                   'Disallow: /+history\r\n'
                   'Disallow: /+search\r\n'
-                  'Disallow: /+hg\r\n'
-                 )
+                  'Disallow: /+hg\r\n')
         return self._serve_default(request, 'robots.txt', robots,
                                    'text/plain')
 
     @URL('/+hg<all:path>', methods=['GET', 'POST', 'HEAD'])
     def hgweb(self, request, path=None):
-        """Serve the pages repository on the web like a normal hg repository."""
+        """
+        Serve the pages repository on the web like a normal hg repository.
+        """
 
         _ = self.gettext
         if not self.config.get_bool('hgweb', False):
             raise error.ForbiddenErr(_(u'Repository access disabled.'))
         app = mercurial.hgweb.request.wsgiapplication(
             lambda: mercurial.hgweb.hgweb(self.storage.repo, self.site_name))
+
         def hg_app(env, start):
             env = request.environ
             prefix = '/+hg'
 
         _ = self.gettext
         if not request.remote_addr.startswith('127.'):
-            raise error.ForbiddenErr(_(u'This URL can only be called locally.'))
+            raise error.ForbiddenErr(
+                _(u'This URL can only be called locally.'))
+
         def agony():
             yield u'Oh dear!'
             self.dead = True
             request.cleanup()
             del request
             del adapter
-

File contents unchanged.

Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.