Commits

Anonymous committed 40781df Merge

Merge main dev

Comments (0)

Files changed (20)

MANIFEST

-hatta.py
-hatta_qticon.py
-hatta_gtkicon.py
-resources/hatta.svg
-resources/hatta.png
-resources/hatta.ico
-resources/hatta.desktop
-examples/hatta.wsgi
-examples/hatta.fcg
-examples/extend_parser.py
-locale/ar/LC_MESSAGES/hatta.mo
-locale/da/LC_MESSAGES/hatta.mo
-locale/de/LC_MESSAGES/hatta.mo
-locale/es/LC_MESSAGES/hatta.mo
-locale/fr/LC_MESSAGES/hatta.mo
-locale/ja/LC_MESSAGES/hatta.mo
-locale/pl/LC_MESSAGES/hatta.mo
-locale/sv/LC_MESSAGES/hatta.mo
+include hatta/*.py
+include hatta_qticon.py
+include hatta_gtkicon.py
+include resources/hatta.svg
+include resources/hatta.png
+include resources/hatta.ico
+include resources/hatta.desktop
+recursive-include examples *
+recursive-include locale/ar/LC_MESSAGES *
+recursive-include hatta/templates *.html

hatta/__main__.py

 sys.stdout = sys.__stdout__
 sys.stderr = sys.__stderr__
 
+
 def application(env, start):
     """Detect that we are being run as WSGI application."""
 
     application = wiki.application
     return application(env, start)
 
+
 def main(config=None, wiki=None):
     """Start a standalone WSGI server."""
 
             return
     apps = [('', app)]
     name = wiki.site_name
-    server = wsgiserver.CherryPyWSGIServer((host, port), apps, server_name=name)
+    server = wsgiserver.CherryPyWSGIServer((host, port), apps,
+                                           server_name=name)
     try:
         server.start()
     except KeyboardInterrupt:
 
 if __name__ == "__main__":
     main()
-
 
 import os
 
+
 OPTIONS = []
 VALID_NAMES = set()
 
+
 def _add(short, long, dest, help, default=None, metavar=None,
          action=None, type=None):
     """Helper for building the list of options."""
 _add('-D', '--subdirectories', dest='subdirectories',
     action="store_true",
     help='Store subpages as subdirectories in the filesystem')
+_add('-E', '--extension', dest='extension',
+    help='Extension to add to wiki page files')
 _add('-U', '--unix-eol', dest='unix_eol',
     action="store_true",
     help='Convert all text pages to UNIX-style CR newlines')
 
     def __init__(self, **kw):
         self.config = dict(kw)
+        self.valid_names = set(VALID_NAMES)
         self.parse_environ()
         self.options = list(OPTIONS)
-        self.valid_names = set(VALID_NAMES)
 
     def sanitize(self):
         """
         import optparse
 
         parser = optparse.OptionParser()
-        for short, long, dest, help, default, metavar, action, type in self.options:
+        for (short, long, dest, help, default, metavar, action,
+             type) in self.options:
             parser.add_option(short, long, dest=dest, help=help, type=type,
                               default=default, metavar=metavar, action=action)
 
     config.parse_files()
     # config.sanitize()
     return config
-
 
 import base64
 
+
 icon = base64.b64decode(
 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhki'
 'AAAAAlwSFlzAAAEnQAABJ0BfDRroQAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBo'
 abbr.date {border:none}
 dt {font-weight: bold; float: left; }
 dd {font-style: italic; }
+@media print {
+ body {background:white;color:black;font-size:100%;font-family:serif;}
+ #hatta-search, #hatta-menu, #hatta-footer {display:none;}
+ a:link, a:visited {color:#520;font-weight:bold;text-decoration:underline;}
+ #hatta-content {width:auto;}
+ #hatta-content a:link:after,
+ #hatta-content a:visited:after{content:" ["attr(href)"] ";font-size:90%;}
+}
 """
 
 
 class ServiceUnavailableErr(WikiError):
     code = 503
-

hatta/hg_integration.py

 from config import WikiConfig
 from __main__ import main
 
+
 def run_wiki(ui, repo, directory=None, **opts):
     """Start serving Hatta in the provided repository."""
 
 
 import werkzeug
 import werkzeug.contrib.atom
-import jinja2
 
+pygments = None
 try:
     import pygments
     import pygments.util
     import pygments.formatters
     import pygments.styles
 except ImportError:
-    pygments = None
+    pass
 
+Image = None
 try:
     import Image
 except ImportError:
-    Image = None
+    pass
 
 import parser
 import error
     'archive/gzip'
     """
 
-    addr = title.encode('utf-8') # the encoding doesn't relly matter here
+    addr = title.encode('utf-8')  # the encoding doesn't relly matter here
     mime, encoding = mimetypes.guess_type(addr, strict=False)
     if encoding:
         mime = 'archive/%s' % encoding
     
     return mime
 
+
 def date_html(date_time):
     """
     Create HTML for a date, according to recommendation at
     return date_time.strftime(
         '<abbr class="date" title="%Y-%m-%dT%H:%M:%SZ">%Y-%m-%d %H:%M</abbr>')
 
+
 class WikiPage(object):
     """Everything needed for rendering a page."""
 
-    template_name = 'page.html'
-
     def __init__(self, wiki, request, title, mime):
         self.request = request
         self.title = title
         else:
             self.aliases = {}
 
-
     def link_alias(self, addr):
         """Find a target address for an alias."""
 
                 classes.append('mail')
                 text = text.replace('@', '&#64;').replace('.', '&#46;')
                 href = werkzeug.escape(addr,
-                                       quote=True).replace('@',
-                                                           '%40').replace('.',
-                                                                          '%2E')
+                    quote=True).replace('@', '%40').replace('.', '%2E')
             else:
-                href = werkzeug.escape(addr, quote=True)
+                href = werkzeug.escape(werkzeug.url_fix(addr), quote=True)
         else:
             if '#' in addr:
                 addr, chunk = addr.split('#', 1)
-                chunk = '#' + chunk
+                chunk = '#' + werkzeug.url_fix(chunk)
             if addr.startswith(':'):
                 alias = self.link_alias(addr[1:])
-                href = werkzeug.url_fix(alias + chunk)
+                href = werkzeug.escape(werkzeug.url_fix(alias) + chunk, True)
                 classes.append('external')
                 classes.append('alias')
             elif addr.startswith('+'):
                                  '+' + werkzeug.escape(addr[1:], quote=True)])
                 classes.append('special')
             elif addr == u'':
-                href = chunk
+                href = werkzeug.escape(chunk, True)
                 classes.append('anchor')
             else:
                 classes.append('wiki')
-                href = self.get_url(addr) + werkzeug.url_quote(chunk)
+                href = werkzeug.escape(self.get_url(addr) + chunk, True)
                 if addr not in self.storage:
                     classes.append('nonexistent')
-        class_ = ' '.join(classes) or None
-        return werkzeug.html.a(image or text, href=href, class_=class_,
-                               title=addr+chunk)
+        class_ = werkzeug.escape(' '.join(classes) or '', True)
+        # We need to output HTML on our own to prevent escaping of href
+        return '<a href="%s" class="%s" title="%s">%s</a>' % (
+                href, class_, werkzeug.escape(addr + chunk, True),
+                image or text)
 
     def wiki_image(self, addr, alt, class_='wiki', lineno=0):
         """Create HTML for a wiki image."""
 class WikiPageSpecial(WikiPage):
     """Special pages, like recent changes, index, etc."""
 
-    template_name = 'page_special.html'
-
 
 class WikiPageText(WikiPage):
     """Pages of mime type text/* use this for display."""
 
         return self.storage.page_text(self.title)
 
-
     def view_content(self, lines=None):
-        """Read the page content from storage or preview and return iterator."""
+        """
+        Read the page content from storage or preview and return iterator.
+        """
 
         if lines is None:
             f = self.storage.open_page(self.title)
         for old_line, new_line, changed in diff:
             old_no, old_text = old_line
             new_no, new_text = new_line
-            line_no = (new_no or old_no or 1)-1
+            line_no = (new_no or old_no or 1) - 1
             if changed:
                 yield u'<div class="change" id="line_%d">' % line_no
                 old_iter = infiniter(mark_re.finditer(old_text))
                     line_no, werkzeug.escape(old_text))
         yield u'</pre>'
 
+
 class WikiPageColorText(WikiPageText):
     """Text pages, but displayed colorized with pygments"""
 
             im = Image.open(page_file)
             im = im.convert('RGBA')
             im.thumbnail((128, 128), Image.ANTIALIAS)
-            im.save(cache_file,'PNG')
+            im.save(cache_file, 'PNG')
         except IOError:
             raise error.UnsupportedMediaTypeErr('Image corrupted')
         cache_file.close()
         except csv.Error, e:
             yield u'</table>'
             yield werkzeug.html.p(werkzeug.html(
-                _(u'Error parsing CSV file %{file}s on line %{line}d: %{error}s')
-                % {'file': html_title, 'line': reader.line_num, 'error': e}))
+                _(u'Error parsing CSV file %{file}s on'
+                  u'line %{line}d: %{error}s') %
+                {'file': html_title, 'line': reader.line_num, 'error': e}))
         finally:
             csv_file.close()
         yield u'</table>'
             raise error.NotFoundErr()
         return self.content_iter(lines)
 
+
 class WikiPageRST(WikiPageText):
     """
     Display ReStructured Text.
 mimetypes.add_type('application/x-javascript', '.js')
 mimetypes.add_type('text/x-rst', '.rst')
 mimetypes.add_type('text/x-wiki', '.wiki')
-
 mime_allowed_default = set(['plain','x-wiki','x-rst'])
 import itertools
 import werkzeug
 
-EXTERNAL_URL_RE = re.compile(ur'^[a-z]+://|^mailto:', re.I|re.U)
+
+EXTERNAL_URL_RE = re.compile(ur'^[a-z]+://|^mailto:', re.I | re.U)
+
 
 def external_link(addr):
     """
 
     return EXTERNAL_URL_RE.match(addr)
 
+
 class WikiParser(object):
     r"""
     Responsible for generating HTML markup from the wiki markup.
         "empty": (40, ur"^\s*$"),
         "heading": (50, heading_pat),
         "indent": (60, ur"^[ \t]+"),
-        "macro":(70, ur"^<<\w+\s*$"),
+        "macro": (70, ur"^<<\w+\s*$"),
         "quote": (80, quote_pat),
         "rule": (90, ur"^\s*---+\s*$"),
         "syntax": (100, ur"^\{\{\{\#![\w+#.-]+\s*$"),
         "code": (20, ur"[{][{][{](?P<code_text>([^}]|[^}][}]|[^}][}][}])"
                 ur"*[}]*)[}][}][}]"),
         "free_link": (30, ur"""[a-zA-Z]+://\S+[^\s.,:;!?()'"=+<>-]"""),
-        "italic": (40 , ur"//"),
+        "italic": (40, ur"//"),
         "link": (50, ur"\[\[(?P<link_target>([^|\]]|\][^|\]])+)"
                 ur"(\|(?P<link_text>([^\]]|\][^\]])+))?\]\]"),
         "image": (60, image_pat),
         "math": (100, ur"\$\$(?P<math_text>[^$]+)\$\$"),
         "mono": (110, ur"##"),
         "newline": (120, ur"\n"),
-        "punct": (130, ur'(^|\b|(?<=\s))(%s)((?=[\s.,:;!?)/&=+"\'—-])|\b|$)' %
+        "punct": (130,
+                  ur'(^|\b|(?<=\s))(%s)((?=[\s.,:;!?)/&=+"\'—-])|\b|$)' %
                   ur"|".join(re.escape(k) for k in punct)),
         "table": (140, ur"=?\|=?"),
         "text": (150, ur".+?"),
     }
 
-
     def __init__(self, lines, wiki_link, wiki_image,
                  wiki_syntax=None, wiki_math=None, smilies=None):
         self.wiki_link = wiki_link
     @classmethod
     def extract_links(cls, text):
         links = []
-        def link(addr, label=None, class_=None, image=None, alt=None, lineno=0):
+
+        def link(addr, label=None, class_=None, image=None, alt=None,
+                 lineno=0):
             addr = addr.strip()
-            if external_link(addr) or addr.startswith(':'):
-                # Don't index external links and aliases
+            if external_link(addr):
+                # Don't index external links
                 return u''
             if '#' in addr:
                 addr, chunk = addr.split('#', 1)
     def _block_heading(self, block):
         for self.line_no, line in block:
             level = min(len(self.heading_re.match(line).group(0).strip()), 5)
-            self.headings[level-1] = self.headings.get(level-1, 0)+1
+            self.headings[level - 1] = self.headings.get(level - 1, 0) + 1
             label = u"-".join(str(self.headings.get(i, 0))
                               for i in range(level))
             yield werkzeug.html.a(name="head-%s" % label)
                 in_ul = True
                 level += 1
             while nest < level:
-                yield '</li></ul>'
+                yield '</li></%s>' % kind
                 in_ul = False
                 level -= 1
             if nest == level and not in_ul:
             yield u"".join(self.parse_line(content))
         if in_p:
             yield '%s</p>' % self.pop_to("")
-        yield '</blockquote>'*level
+        yield '</blockquote>' * level
 
     def _block_conflict(self, block):
         for self.line_no, part in block:
     markup = dict(WikiParser.markup)
     camel_link = ur"\w+[%s]\w+" % re.escape(
         u''.join(unichr(i) for i in xrange(sys.maxunicode)
-        if unicodedata.category(unichr(i))=='Lu'))
+        if unicodedata.category(unichr(i)) == 'Lu'))
     markup["camel_link"] = (105, camel_link)
     markup["camel_nolink"] = (106, ur"[!~](?P<camel_text>%s)" % camel_link)
 
 
     def _line_camel_nolink(self, groups):
         return werkzeug.escape(groups["camel_text"])
-
-
         con.execute('CREATE INDEX IF NOT EXISTS index2 '
                          'ON words (word);')
         con.execute('CREATE TABLE IF NOT EXISTS links '
-                '(src INTEGER, target INTEGER, label VARCHAR, number INTEGER);')
+            '(src INTEGER, target INTEGER, label VARCHAR, number INTEGER);')
         con.commit()
 
     @property
     def count_words(self, words):
         count = {}
         for word in words:
-            count[word] = count.get(word, 0)+1
+            count[word] = count.get(word, 0) + 1
         return count
 
     def title_id(self, title, con):
         finally:
             con.commit()
 
-
     def page_backlinks(self, title):
         """Gives a list of pages linking to specified page."""
 
-        con = self.con # sqlite3.connect(self.filename)
+        con = self.con  # sqlite3.connect(self.filename)
         try:
             sql = ('SELECT DISTINCT(titles.title) '
                    'FROM links, titles '
     def page_links(self, title):
         """Gives a list of links on specified page."""
 
-        con = self.con # sqlite3.connect(self.filename)
+        con = self.con  # sqlite3.connect(self.filename)
         try:
             title_id = self.title_id(title, con)
             sql = 'SELECT target FROM links WHERE src=? ORDER BY number;'
         finally:
             con.commit()
 
-    def page_links_and_labels (self, title):
-        con = self.con # sqlite3.connect(self.filename)
+    def page_links_and_labels(self, title):
+        con = self.con  # sqlite3.connect(self.filename)
         try:
             title_id = self.title_id(title, con)
-            sql = 'SELECT target, label FROM links WHERE src=? ORDER BY number;'
+            sql = ('SELECT target, label FROM links '
+                   'WHERE src=? ORDER BY number;')
             for link, label in con.execute(sql, (title_id,)):
                 yield unicode(link), unicode(label)
         finally:
             # Check for the rest of words
             for title_id, title, first_count in first_counts:
                 # Score for the first word
-                score = float(first_count)/first_rank
+                score = float(first_count) / first_rank
                 for rank, word in rest:
                     sql = ('SELECT SUM(count) FROM words '
                            'WHERE page=? AND word LIKE ?;')
                         # If page misses any of the words, its score is 0
                         score = 0
                         break
-                    score += float(count)/rank
+                    score += float(count) / rank
                 if score > 0:
-                    yield int(100*score), unicode(title)
+                    yield int(100 * score), unicode(title)
         finally:
             con.commit()
 
                 text = None
                 title_id = self.title_id(title, cursor)
                 if not list(self.page_backlinks(title)):
-                    cursor.execute("DELETE FROM titles WHERE id=?;", (title_id,))
+                    cursor.execute("DELETE FROM titles WHERE id=?;",
+                                   (title_id,))
         extract_links = getattr(page, 'extract_links', None)
         if extract_links and text:
             links = extract_links(text)
 
         # We use % here because the sqlite3's substitiution doesn't work
         # We store revision 0 as 1, 1 as 2, etc. because 0 means "no revision"
-        self.con.execute('PRAGMA USER_VERSION=%d;' % (int(rev+1),))
+        self.con.execute('PRAGMA USER_VERSION=%d;' % (int(rev + 1),))
 
     def get_last_revision(self):
         """Retrieve the last indexed repository revision."""
         c = con.execute('PRAGMA USER_VERSION;')
         rev = c.fetchone()[0]
         # -1 means "no revision", 1 means revision 0, 2 means revision 1, etc.
-        return rev-1
+        return rev - 1
 
     def update(self, wiki):
         """Reindex al pages that changed since last indexing."""
         self.reindex(wiki, changed)
         rev = self.storage.repo_revision()
         self.set_last_revision(rev)
-
 import mercurial.util
 import mercurial.hgweb
 import mercurial.commands
-
+import mercurial.merge
 
 import error
+import page
+
 
 def locked_repo(func):
     """A decorator for locking the repository when calling a method."""
 
     return new_func
 
+
 def _find_repo_path(path):
     """Go up the directory tree looking for a repository."""
 
             return None
     return path
 
+
 def _get_ui():
     try:
         ui = mercurial.ui.ui(report_untrusted=False,
     change history, using Mercurial repository as the storage method.
     """
 
-    def __init__(self, path, charset=None, _=lambda x:x, unix_eol=False):
+    def __init__(self, path, charset=None, _=lambda x: x, unix_eol=False,
+                 extension=None):
         """
         Takes the path to the directory where the pages are to be kept.
-        If the directory doen't exist, it will be created. If it's inside
+        If the directory doesn't exist, it will be created. If it's inside
         a Mercurial repository, that repository will be used, otherwise
         a new repository will be created in it.
         """
         self._ = _
         self.charset = charset or 'utf-8'
         self.unix_eol = unix_eol
+        self.extension = extension
         self.path = os.path.abspath(path)
         if not os.path.exists(self.path):
             os.makedirs(self.path)
             self._repos[thread_id] = repo
             return repo
 
-
     def _check_path(self, path):
         """
         Ensure that the path is within allowed bounds.
         if (filename.split('.')[0].upper() in _windows_device_files or
             filename.startswith('_') or filename.startswith('.')):
             filename = '_' + filename
+        if page.page_mime(title) == 'text/x-wiki' and self.extension:
+            filename += self.extension
         return os.path.join(self.repo_prefix, filename)
 
     def _file_to_title(self, filepath):
+        _ = self._
         if not filepath.startswith(self.repo_prefix):
             raise error.ForbiddenErr(
                 _(u"Can't read or write outside of the pages repository"))
         name = filepath[len(self.repo_prefix):].strip('/')
-        # Unescape special windows filenames and dot files
-        if name.startswith('_') and len(name)>1:
+        # Un-escape special windows filenames and dot files
+        if name.startswith('_') and len(name) > 1:
             name = name[1:]
+        if self.extension and name.endswith(self.extension):
+            name = name[:len(self.extension)]
         return werkzeug.url_unquote(name)
 
     def __contains__(self, title):
         self.repo.dirstate.setparents(tip_node, node)
         # Mercurial 1.1 and later need updating the merge state
         try:
-            mercurial.merge.mergestate(self.repo).mark(repo_file, "r")
-        except (AttributeError, KeyError):
+            mergestate = mercurial.merge.mergestate
+        except AttributeError:
             pass
+        else:
+            state = mergestate(self.repo)
+            try:
+                state.mark(repo_file, "r")
+            except KeyError:
+                # There were no conflicts to mark
+                pass
+            else:
+                # Mercurial 1.7+ needs a commit
+                try:
+                    commit = state.commit
+                except AttributeError:
+                    pass
+                else:
+                    commit()
         return msg
 
     @locked_repo
-    def save_file(self, title, file_name, author=u'', comment=u'', parent=None):
+    def save_file(self, title, file_name, author=u'', comment=u'',
+                  parent=None):
         """Save an existing file as specified page."""
 
         _ = self._
         try:
             mercurial.util.rename(file_name, file_path)
         except OSError, e:
-            if e.errno == errno.ENAMETOOLONG: # "File name too long"
+            if e.errno == errno.ENAMETOOLONG:
+                # "File name too long"
                 raise error.RequestURITooLarge()
             else:
                 raise
         data = text.encode(self.charset)
         if self.unix_eol:
             data = data.replace('\r\n', '\n')
-            print 'replacing newlines...'
         self.save_data(title, data, author, comment, parent)
 
     def page_text(self, title):
 
         return self._changectx().rev()
 
-
     def _changectx(self):
         """Get the changectx of the tip."""
 
             return
         maxrev = filectx_tip.filerev()
         minrev = 0
-        for rev in range(maxrev, minrev-1, -1):
+        for rev in range(maxrev, minrev - 1, -1):
             filectx = filectx_tip.filectx(rev)
             date = datetime.datetime.fromtimestamp(filectx.date()[0])
             author = unicode(filectx.user(), "utf-8",
         changectx = self._changectx()
         maxrev = changectx.rev()
         minrev = 0
-        for wiki_rev in range(maxrev, minrev-1, -1):
+        for wiki_rev in range(maxrev, minrev - 1, -1):
             change = self.repo.changectx(wiki_rev)
             date = datetime.datetime.fromtimestamp(change.date()[0])
             author = unicode(change.user(), "utf-8",
                 yield werkzeug.url_unquote(filename)
 
     def changed_since(self, rev):
-        """Return all pages that changed since specified repository revision."""
+        """
+        Return all pages that changed since specified repository revision.
+        """
 
         try:
             last = self.repo.lookup(int(rev))
         current = self.repo.lookup('tip')
         status = self.repo.status(current, last)
         modified, added, removed, deleted, unknown, ignored, clean = status
-        for filename in modified+added+removed+deleted:
+        for filename in modified + added + removed + deleted:
             if filename.startswith(self.repo_prefix):
                 yield self._file_to_title(filename)
 
         path = os.path.join(self.repo_prefix, escaped)
         if os.path.isdir(os.path.join(self.repo_path, path)):
             path = os.path.join(path, self.index)
+        if page.page_mime(title) == 'text/x-wiki' and self.extension:
+            path += self.extension
         return path
 
     def _file_to_title(self, filepath):
             mercurial.commands.rename(self.ui, self.repo, path, temp_path)
             os.makedirs(path)
             index_path = os.path.join(path, self.index)
-            mercurial.commands.rename(self.ui, self.repo, temp_path, index_path)
+            mercurial.commands.rename(self.ui, self.repo, temp_path,
+                                      index_path)
         finally:
             try:
                 os.rmdir(temp_dir)
             except OSError:
                 pass
-        self._commit([index_path, path], _(u"made subdirectory page"), "<wiki>")
+        def repo_path(path):
+            return path[len(self.repo_path)+1:]
+        files = [repo_path(index_path), repo_path(path)]
+        self._commit(files, _(u"made subdirectory page"), "<wiki>")
 
     @locked_repo
-    def save_file(self, title, file_name, author=u'', comment=u'', parent=None):
+    def save_file(self, title, file_name, author=u'', comment=u'',
+                  parent=None):
         """Save the file and make the subdirectories if needed."""
 
         path = self._file_path(title)
         try:
             os.makedirs(os.path.join(self.repo_path, dir_path))
         except OSError, e:
-            if e.errno != errno.EEXIST: # "File exists"
+            if e.errno != errno.EEXIST:
+                # "File exists"
                 raise
         super(WikiSubdirectoryStorage, self).save_file(title, file_name,
                                                        author, comment, parent)
         commit after removing empty directories.
         """
 
-        super(WikiSubdirectoryStorage, self).delete_page(title, author, comment)
+        super(WikiSubdirectoryStorage, self).delete_page(title, author,
+                                                         comment)
         file_path = self._file_path(title)
         self._check_path(file_path)
         dir_path = os.path.dirname(file_path)
-        try:
-            os.removedirs(dir_path)
-        except OSError, e:
-            if e.errno != errno.ENOTEMPTY: # "Directory not empty"
-                raise
+        if dir_path != self.repo_path:
+            try:
+                os.removedirs(dir_path)
+            except OSError, e:
+                if e.errno != errno.ENOTEMPTY:
+                    # "Directory not empty"
+                    raise
 
     def all_pages(self):
         """
         """
 
         for (dirpath, dirnames, filenames) in os.walk(self.path):
-            path = dirpath[len(self.path)+1:]
+            path = dirpath[len(self.path) + 1:]
             for name in filenames:
                 if os.path.basename(name) == self.index:
                     filename = os.path.join(path, os.path.dirname(name))
                     if (os.path.isfile(os.path.join(self.path, filename))
                         and not filename.startswith('.')):
                         yield werkzeug.url_unquote(filename)
-
-
-

hatta/templates/base.html

 {% endblock %}
 
 {% block search %}
-    <form action="/+search" id="hatta-search" method="GET"
+    <form action="{{ url(None, wiki.search) }}" id="hatta-search" method="GET"
       ><div><input
         id="hatta-search" name="q"><input
         class="button" type="submit" value="Search"

hatta/templates/changes.html

 {% extends "page_special.html" %} 
 
-{% block page_title %}{{ _("Recent changes") }}{% endblock %}
+{% block page_title %}<h1>{{ _("Recent changes") }}</h1>{% endblock %}
 {% block title %}{{ _("Recent changes") }} - {{ wiki.site_name }}{% endblock %}
 
 {% block content %}
     {% for date, date_url, title, author, comment in changes %}
         <li><a href="{{ date_url }}">{{ date_html(date)|safe }}</a>
             <b>{{ page.wiki_link(title)|safe }}</b> . . . .
-            <i>{{ page.wiki_link('~%s' % author, author)|safe }}</i>
+            <i>{{ page.wiki_link("~%s" % author, author)|safe }}</i>
             <div class="comment">{{ comment }}</div>
         </li>
     {% endfor %}

hatta/templates/edit_file.html

 {% extends "page.html" %}
 
-{% block page_title %}<h1>{{ _("Editing \"%(title)s\"", title=title) }}</h1>{% endblock %}
+{% block page_title %}<h1>{{ _("Editing \"%(title)s\"",
+                             title=title) }}</h1>{% endblock %}
 {% block title %}{{ _("Editing \"%(title)s\"", title=title) }}{% endblock %}
 
 {% block content %}
     <p>{{ _("This is a binary file, it can't be edited on a wiki. "
             "Please upload a new version instead.") }}</p>
-    <form action="" method="POST" class="editor" enctype="multipart/form-data"><div>
+    <form action="" method="POST" class="editor"
+          enctype="multipart/form-data"><div>
         <div class="upload"><input type="file" name="data"></div>
         <label class="comment">{{ _("Comment") }} <input
             name="comment" value="{{ comment }}"></label>

hatta/templates/page_special.html

 {% block title %}{{ special_title }} - {{ wiki.site_name }}{% endblock %}
 
 {% block footer %}
-    <a href="/+history/" class="changes">Changes</a>
-    <a href="/+index" class="index">Index</a>
-    <a href="/+orphaned" class="orphaned">Orphaned</a>
-    <a href="/+wanted" class="wanted">Wanted</a>
+    <a href="{{ url(None, wiki.recent_changes) }}" class="changes">Changes</a>
+    <a href="{{ url(None, wiki.all_pages) }}" class="index">Index</a>
+    <a href="{{ url(None, wiki.orphaned) }}" class="orphaned">Orphaned</a>
+    <a href="{{ url(None, wiki.wanted) }}" class="wanted">Wanted</a>
 {% endblock %}
 import werkzeug.routing
 import jinja2
 
+pygments = None
 try:
     import pygments
 except ImportError:
-    pygments = None
+    pass
 
 import hatta
 import storage
 import error
 import data
 
-import mercurial # import it after storage!
+import mercurial  # import it after storage!
+
 
 class WikiResponse(werkzeug.BaseResponse, werkzeug.ETagResponseMixin,
                    werkzeug.CommonResponseDescriptorsMixin):
             auth = werkzeug.url_unquote(self.environ.get('REMOTE_USER', ""))
         except UnicodeError:
             auth = None
-        author = (self.form.get("author") or cookie or auth or self.remote_addr)
+        author = (self.form.get("author") or cookie or auth or
+                  self.remote_addr)
         return author
 
     def _get_file_stream(self, total_content_length=None, content_type=None,
         self.alias_page = self.config.get('alias_page', 'Alias')
         self.pygments_style = self.config.get('pygments_style', 'tango')
         self.subdirectories = self.config.get_bool('subdirectories', False)
+        self.extension = self.config.get('extension', None)
         self.unix_eol = self.config.get_bool('unix_eol', False)
         if self.subdirectories:
             self.storage = storage.WikiSubdirectoryStorage(self.path,
-                                                self.page_charset, self.gettext)
+                                                           self.page_charset,
+                                                           self.gettext,
+                                                           self.unix_eol,
+                                                           self.extension)
         else:
             self.storage = self.storage_class(self.path, self.page_charset,
-                                              self.gettext, self.unix_eol)
+                                              self.gettext, self.unix_eol,
+                                              self.extension)
         self.cache = os.path.abspath(config.get('cache_path',
                                      os.path.join(self.storage.repo_path,
                                                   '.hg', 'hatta', 'cache')))
         self.index.update(self)
         self.url_rules = URL.rules(self)
         self.url_map = werkzeug.routing.Map(self.url_rules, converters={
-            'title':WikiTitleConverter,
-            'all':WikiAllConverter
+            'title': WikiTitleConverter,
+            'all': WikiAllConverter,
         })
 
     def add_url_rule(self, rule):
 
         self.url_rules.append(rule)
         self.url_map = werkzeug.routing.Map(self.url_rules, converters={
-            'title':WikiTitleConverter,
-            'all':WikiAllConverter
+            'title': WikiTitleConverter,
+            'all': WikiAllConverter,
         })
 
     def get_page(self, request, title):
         response = WikiResponse(content, mimetype=mime)
         if rev is None:
             inode, _size, mtime = self.storage.page_file_meta(title)
-            response.set_etag(u'%s/%s/%d-%d' % (etag, werkzeug.url_quote(title),
+            response.set_etag(u'%s/%s/%d-%d' % (etag,
+                                                werkzeug.url_quote(title),
                                                 inode, mtime))
             if size == -1:
                 size = _size
         except error.NotFoundErr:
             url = request.get_url(title, self.edit, external=True)
             return werkzeug.routing.redirect(url, code=303)
-        html = page.template(page.template_name, content=content)
+        html = page.template("page.html", content=content)
         dependencies = page.dependencies()
         etag = '/(%s)' % u','.join(dependencies)
         return self.response(request, title, html, etag=etag)
             werkzeug.html.p(
                 werkzeug.html(
                     _(u'Content of revision %(rev)d of page %(title)s:'))
-                % {'rev': rev, 'title': link }),
+                % {'rev': rev, 'title': link}),
             werkzeug.html.pre(werkzeug.html(text)),
         ]
         special_title = _(u'Revision of "%(title)s"') % {'title': title}
                     self.storage.delete_page(title, author, comment)
                     url = request.get_url(self.front_page)
                 else:
-                    self.storage.save_text(title, text, author, comment, parent)
+                    self.storage.save_text(title, text, author, comment,
+                                           parent)
             else:
                 text = u''
                 upload = request.files['data']
 
     @URL('/+edit/<title:title>', methods=['GET'])
     def edit(self, request, title, preview=None):
-        _ = self.gettext
         self._check_lock(title)
         exists = title in self.storage
         if exists:
             if rev > 0:
                 url = request.adapter.build(self.diff, {
                     'title': title,
-                    'from_rev': rev-1,
-                    'to_rev': rev
+                    'from_rev': rev - 1,
+                    'to_rev': rev,
                 }, force_external=True)
             else:
                 url = request.adapter.build(self.revision, {
         try:
             wrap_file = werkzeug.wrap_file
         except AttributeError:
-            wrap_file = lambda x, y:y
+            wrap_file = lambda x, y: y
         f = wrap_file(request.environ, self.storage.open_page(title))
         response = self.response(request, title, f, '/download', mime, size=-1)
         response.direct_passthrough = True
         try:
             wrap_file = werkzeug.wrap_file
         except AttributeError:
-            wrap_file = lambda x, y:y
+            wrap_file = lambda x, y: y
         f = wrap_file(request.environ, open(cache_file))
         response = self.response(request, title, f, '/render', cache_mime,
                                  size=cache_size)
             else:
                 comment = _(u'Undo of change %(rev)d of page %(title)s') % {
                     'rev': rev, 'title': title}
-                data = self.storage.page_revision(title, rev-1)
+                data = self.storage.page_revision(title, rev - 1)
                 self.storage.save_data(title, data, author, comment, parent)
             page = self.get_page(request, title)
             self.index.update_page(page, title, data=data)
                 max_rev = rev
             if rev > 0:
                 date_url = request.adapter.build(self.diff, {
-                    'title': title, 'from_rev': rev-1, 'to_rev': rev})
+                    'title': title, 'from_rev': rev - 1, 'to_rev': rev})
             else:
                 date_url = request.adapter.build(self.revision, {
                     'title': title, 'rev': rev})
     def recent_changes(self, request):
         """Serve the recent changes page."""
 
-        _ = self.gettext
         def _changes_list():
             last = {}
             lastrev = {}
                 if rev > 0:
                     date_url = request.adapter.build(self.diff, {
                         'title': title,
-                        'from_rev': rev-1,
-                        'to_rev': lastrev.get(title, rev)
+                        'from_rev': rev - 1,
+                        'to_rev': lastrev.get(title, rev),
                     })
                 elif rev == 0:
                     date_url = request.adapter.build(self.revision, {
         response = WikiResponse(html, mimetype='text/html')
         return response
 
-
     @URL('/+index')
     def all_pages(self, request):
         """Show index of all pages in the wiki."""
     def wanted(self, request):
         """Show all pages that don't exist yet, but are linked."""
 
-        _ = self.gettext
         def _wanted_pages_list():
             for refs, title in self.index.wanted_pages():
-                if not (parser.external_link(title) or title.startswith('+')):
+                if not (parser.external_link(title) or title.startswith('+')
+                        or title.startswith(':')):
                     yield refs, title
 
         page = self.get_page(request, '')
         """Serve the search results page."""
 
         _ = self.gettext
+
         def search_snippet(title, words):
             """Extract a snippet of text for search results."""
 
             except error.NotFoundErr:
                 return u''
             regexp = re.compile(u"|".join(re.escape(w) for w in words),
-                                re.U|re.I)
+                                re.U | re.I)
             match = regexp.search(text)
             if match is None:
                 return u""
             h = werkzeug.html
             self.storage.reopen()
             self.index.update(self)
-            result = sorted(self.index.find(words), key=lambda x:-x[0])
+            result = sorted(self.index.find(words), key=lambda x: -x[0])
             yield werkzeug.html.p(h(_(u'%d page(s) containing all words:')
                                   % len(result)))
             yield u'<ol class="search">'
                 yield h.li(h.b(page.wiki_link(title)), u' ', h.i(str(score)),
                            h.div(search_snippet(title, words),
                                  _class="snippet"),
-                           id_="search-%d" % (number+1))
+                           id_="search-%d" % (number + 1))
             yield u'</ol>'
 
         query = request.values.get('q', u'').strip()
     def backlinks(self, request, title):
         """Serve the page with backlinks."""
 
-        _ = self.gettext
         self.storage.reopen()
         self.index.update(self)
         page = self.get_page(request, title)
                   'Disallow: /+feed\r\n'
                   'Disallow: /+history\r\n'
                   'Disallow: /+search\r\n'
-                  'Disallow: /+hg\r\n'
-                 )
+                  'Disallow: /+hg\r\n')
         return self._serve_default(request, 'robots.txt', robots,
                                    'text/plain')
 
     @URL('/+hg<all:path>', methods=['GET', 'POST', 'HEAD'])
     def hgweb(self, request, path=None):
-        """Serve the pages repository on the web like a normal hg repository."""
+        """
+        Serve the pages repository on the web like a normal hg repository.
+        """
 
         _ = self.gettext
         if not self.config.get_bool('hgweb', False):
             raise error.ForbiddenErr(_(u'Repository access disabled.'))
         app = mercurial.hgweb.request.wsgiapplication(
             lambda: mercurial.hgweb.hgweb(self.storage.repo, self.site_name))
+
         def hg_app(env, start):
             env = request.environ
             prefix = '/+hg'
 
         _ = self.gettext
         if not request.remote_addr.startswith('127.'):
-            raise error.ForbiddenErr(_(u'This URL can only be called locally.'))
+            raise error.ForbiddenErr(
+                _(u'This URL can only be called locally.'))
+
         def agony():
             yield u'Oh dear!'
             self.dead = True
             request.cleanup()
             del request
             del adapter
-
     description=hatta.project_description,
     long_description=hatta.__doc__,
     keywords='wiki wsgi web mercurial repository',
-    py_modules=['hatta'],
+    packages=['hatta'],
     data_files=[
         ('share/locale/ar/LC_MESSAGES', ['locale/ar/LC_MESSAGES/hatta.mo']),
         ('share/locale/da/LC_MESSAGES', ['locale/da/LC_MESSAGES/hatta.mo']),
             'examples/extend_parser.py'
         ]),
     ],
+    include_package_data=True,
     platforms='any',
-    requires=['werkzeug (>=0.3)', 'mercurial (>=1.0)',
-             'pybonjour (>=1.1.1)'],
-    setup_requires=['pybonjour'],
+    requires=['werkzeug (>=0.3)', 'mercurial (>=1.0)'],
     classifiers=[
         'License :: OSI Approved :: GNU General Public License (GPL)',
         'Intended Audience :: Developers',
 
 if __name__ == '__main__':
     setup(**config)
-    try:
-        import pybonjour
-    except ImportError:
-        print u'*** Warning ***'
-        print u'Please install pybonjour to build a full-featured binary.'

tests/test_hatta.py

 import hatta
 import werkzeug
 import os
+import py.test
 import lxml.doctestcompare
 from test_parser import HTML
 
         assert request.get_url('title', wiki.edit) == u'/+edit/title'
         assert request.get_url(None, wiki.favicon_ico) == u'/favicon.ico'
 
+    @py.test.mark.xfail
     def test_html_page(self, req):
         wiki, request = req
         content = ["some &lt;content&gt;"]

tests/test_parser.py

             </ol>
         """
 
+    def test_very_nested_numbers(self):
+        html = parse(u'# 1\n# 2\n## 2.1\n### 2.1.1\n# 3')
+        assert html == """
+            <ol id="line_0">
+                <li>1</li>
+                <li>2<ol id="line_2">
+                    <li>2.1<ol id="line_3">
+                        <li>2.1.1</li>
+                    </ol></li>
+                </ol></li>
+                <li>3</li>
+            </ol>
+        """
+
     def test_mixed_numbers_bullets(self):
         html = parse(u'# test line one\n* test line two\n*# Nested item')
         assert html == """

tests/test_repo.py

 
 import hatta
 import py
+import py.test
 import werkzeug
 
 # Patch for no gettext
         exists = os.path.exists(dirpath)
         assert not exists
 
+    def test_root_delete(self, subdir_repo):
+        """
+        Check if deleting non-subdirectory page works.
+        """
+
+        title = u'ziew'
+        filepath = os.path.join(subdir_repo.path, 'ziew')
+        subdir_repo.save_text(title, self.text, self.author, self.comment,
+                              parent=-1)
+        exists = os.path.exists(filepath)
+        assert exists
+        subdir_repo.delete_page(title, self.author, self.comment)
+        exists = os.path.exists(filepath)
+        assert not exists
+
+    def test_nonexistent_root_delete(self, subdir_repo):
+        """
+        Check if deleting non-existing non-subdirectory page works.
+        """
+
+        title = u'ziew2'
+        filepath = os.path.join(subdir_repo.path, 'ziew2')
+        exists = os.path.exists(filepath)
+        assert not exists
+        subdir_repo.delete_page(title, self.author, self.comment)
+        exists = os.path.exists(filepath)
+        assert not exists
+
     def test_create_parent(self, subdir_repo):
         """
-        Make sure you can create a parent page of existsing page.
+        Make sure you can create a parent page of existing page.
         """
 
         subdir_repo.save_text(u'xxx/yyy', self.text, self.author, self.comment,
 
     def test_create_subpage(self, subdir_repo):
         """
-        Make sure you can create a subpage of existsing page.
+        Make sure you can create a subpage of existing page.
         """
 
         subdir_repo.save_text(u'xxx', self.text, self.author, self.comment,
         assert os.path.exists(os.path.join(subdir_repo.path, 'xxx'))
         assert os.path.exists(os.path.join(subdir_repo.path, 'xxx/yyy'))
         assert os.path.exists(os.path.join(subdir_repo.path, 'xxx/Index'))
+        tracked = subdir_repo._changectx()['xxx/Index']
+        assert tracked
 
     def test_create_subsubpage(self, subdir_repo):
         """
-        Make sure you can create a subpage of existsing page.
+        Make sure you can create a subpage of existing page.
         """
 
         subdir_repo.save_text(u'xxx', self.text, self.author, self.comment,
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.