Commits

Anonymous committed 84ce5ed

start to refactor in view of simplify the code.
For now move some code.

Comments (0)

Files changed (14)

friendpaste/application.py

 from friendpaste.urls import map, all_views
 from friendpaste.utils import local, local_manager
 from friendpaste.template import do_highlight, do_timesince, ALL_COLORSHEME, \
-    datetimeformat, do_blockdiff
-from friendpaste.http import Request
-
+    datetimeformat, do_blockdiff, url_for
+from friendpaste.http import FPRequest
 
 
 class Friendpaste(object):
 
         env = Environment(loader=FileSystemLoader(self.templates_path))
         env.template_charset = 'utf-8'
-        env.filters['highlight'] = do_highlight
-        env.filters['timesince'] = do_timesince
-        env.filters['tabular'] = do_blockdiff
-        env.filters['datetimeformat'] = datetimeformat
-        env.globals['ALL_COLORSHEME'] = ALL_COLORSHEME 
+        env.filters.update({
+            'highlight': do_highlight,
+            'timesince': do_timesince,
+            'tabular': do_blockdiff,
+            'datetimeformat': datetimeformat
+        })
+        env.globals.update({
+            'ALL_COLORSHEME': ALL_COLORSHEME,
+            'url': url_for
+        })
         self.template_env = env
 
         self.initialized = True
     def dispatch_request(self, environ, start_response):
         local.application = self 
         local.url_adapter = adapter  = map.bind_to_environ(environ)
-        request = object.__new__(Request)
+        request = object.__new__(FPRequest)
         local.request = request
         request.__init__(self, environ)
         

friendpaste/generic.py

-# -*- coding: utf-8 -
-# Copyright 2008 by Benoît Chesneau <benoitc@e-engura.com>
-# 
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from friendpaste.template import render_response
-
-def about(request):
-    return render_response("about.html")
-
-def services(request, page=None):
-    if page is None:
-        return render_response("services.html")
-
-    return render_response("services/%s.html" % page)

friendpaste/http.py

 # limitations under the License.
 
 
-from werkzeug import BaseRequest, BaseResponse, AcceptMixin
+from werkzeug import Request, Response
 
-class Request(BaseRequest, AcceptMixin):
+class FPRequest(Request):
     charset = 'utf-8'
 
     def __init__(self, app, environ):
-        super(Request, self).__init__(environ)
+        super(FPRequest, self).__init__(environ)
         self.app = app
 
         
-class Response(BaseResponse):
+class FPResponse(Response):
     """
     An utf-8 response, with text/html as default mimetype.
     """

friendpaste/models.py

+# -*- coding: utf-8 -
+# Copyright 2008 by Benoît Chesneau <benoitc@e-engura.com>
+# 
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#@
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from datetime import datetime
+import binascii
+import base64
+
+# compatibility with python 2.4
+try:
+    from hashlib import sha1 as _sha
+except:
+    import sha
+    _sha = sha.new
+
+
+from couchdb.client import ResourceNotFound
+from couchdb.schema import *
+import simplejson
+
+from friendpaste.utils import local
+
+hex = binascii.hexlify
+
+def hash(text, p):
+    """generate a hash from the given text and its parent hashes
+
+    This hash combines both the current file contents and its history
+    """
+    s = _sha(p)
+    s.update(text.encode('utf-8'))
+    return s.digest()
+
+def short(node):
+    return hex(node[:6])
+
+class Snippet(Document):
+    title=TextField()
+    type=TextField(default='snippet')
+    parent=TextField(default='')
+    revid = TextField()
+    snippet=TextField()
+    language=TextField()
+    created=DateTimeField()
+    updated=DateTimeField()
+
+    def _genid(self):
+        charset = 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'
+        from random import choice
+        return ''.join([choice(charset) for i in range(8)])
+
+    def store(self, db):
+        """
+        override store procedure to generate small id
+        """
+        self.updated = datetime.utcnow()
+        if getattr(self._data, 'id', None) is None:
+            self.created = datetime.utcnow()
+            node = hash(self.snippet+self.title+self.language,'')
+            self.revid = short(node)
+            stored=False
+            docid=None
+            while 1:
+                id = self._genid()
+                try:
+                    docid=db.resource.put(content=self._data, path='/%s/' % str(id))['id']
+                except:
+                    continue
+                if docid is not None:
+                    break
+            self._data = db.get(docid)
+        else:
+            old_data = db.get(self._data.id)
+            self.created = datetime.utcnow()
+            old_hash = hash(old_data['snippet']+old_data['title']+old_data['language'], '')
+            new_hash = hash(self.snippet+self.title+self.language, '')
+            if old_hash != new_hash:
+                #no need to save changes if there isn't
+                old_data['type']='revision'
+                old_data['parent']=self._data.id
+                db.create(old_data)
+
+                # get new revid
+                node = hash(self.snippet+self.title+self.language, old_data['revid'])
+                self.revid = short(node)
+            
+                # save changes
+                db[self._data.id] = self._data
+        
+        return self
+
+
+    

friendpaste/paste/__init__.py

Empty file removed.

friendpaste/paste/diff.py

-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2004-2006 Edgewall Software
-# Copyright (C) 2004-2006 Christopher Lenz <cmlenz@gmx.de>
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://trac.edgewall.org/wiki/TracLicense.
-#
-# This software consists of voluntary contributions made by many
-# individuals. For the exact contribution history, see the revision
-# history and logs, available at http://trac.edgewall.org/log/.
-#
-# Author: Christopher Lenz <cmlenz@gmx.de>
-#
-# Adapted for friendpaste By Benoît Chesneau <benoitc@e-engura.com>
-
-from difflib import SequenceMatcher
-import re
-
-
-def expandtabs(s, tabstop=8, ignoring=None):
-    if '\t' not in s: return s
-    if ignoring is None: return s.expandtabs(tabstop)
-
-    outlines = []
-    for line in s.split('\n'):
-        if '\t' not in line:
-            outlines.append(line)
-            continue
-        p = 0
-        s = []
-        for c in line:
-            if c == '\t':
-                n = tabstop-p%tabstop
-                s.append(' '*n)
-                p+=n
-            elif not ignoring or c not in ignoring:
-                p += 1
-                s.append(c)
-            else:
-                s.append(c)
-        outlines.append(''.join(s))
-    return '\n'.join(outlines)
-
-
-__all__ = ['get_diff_options', 'hdf_diff', 'diff_blocks', 'unified_diff']
-
-
-def _get_change_extent(str1, str2):
-    """
-    Determines the extent of differences between two strings. Returns a tuple
-    containing the offset at which the changes start, and the negative offset
-    at which the changes end. If the two strings have neither a common prefix
-    nor a common suffix, (0, 0) is returned.
-    """
-    start = 0
-    limit = min(len(str1), len(str2))
-    while start < limit and str1[start] == str2[start]:
-        start += 1
-    end = -1
-    limit = limit - start
-    while -end <= limit and str1[end] == str2[end]:
-        end -= 1
-    return (start, end + 1)
-
-def _get_opcodes(fromlines, tolines, ignore_blank_lines=False,
-                 ignore_case=False, ignore_space_changes=False):
-    """
-    Generator built on top of SequenceMatcher.get_opcodes().
-    
-    This function detects line changes that should be ignored and emits them
-    as tagged as 'equal', possibly joined with the preceding and/or following
-    'equal' block.
-    """
-
-    def is_ignorable(tag, fromlines, tolines):
-        if tag == 'delete' and ignore_blank_lines:
-            if ''.join(fromlines) == '':
-                return True
-        elif tag == 'insert' and ignore_blank_lines:
-            if ''.join(tolines) == '':
-                return True
-        elif tag == 'replace' and (ignore_case or ignore_space_changes):
-            if len(fromlines) != len(tolines):
-                return False
-            def f(str):
-                if ignore_case:
-                    str = str.lower()
-                if ignore_space_changes:
-                    str = ' '.join(str.split())
-                return str
-            for i in range(len(fromlines)):
-                if f(fromlines[i]) != f(tolines[i]):
-                    return False
-            return True
-
-    matcher = SequenceMatcher(None, fromlines, tolines)
-    previous = None
-    for tag, i1, i2, j1, j2 in matcher.get_opcodes():
-        if tag == 'equal':
-            if previous:
-                previous = (tag, previous[1], i2, previous[3], j2)
-            else:
-                previous = (tag, i1, i2, j1, j2)
-        else:
-            if is_ignorable(tag, fromlines[i1:i2], tolines[j1:j2]):
-                if previous:
-                    previous = 'equal', previous[1], i2, previous[3], j2
-                else:
-                    previous = 'equal', i1, i2, j1, j2
-                continue
-            if previous:
-                yield previous
-            yield tag, i1, i2, j1, j2
-            previous = None
-
-    if previous:
-        yield previous
-
-def _group_opcodes(opcodes, n=3):
-    """
-    Python 2.2 doesn't have SequenceMatcher.get_grouped_opcodes(), so let's
-    provide equivalent here. The opcodes parameter can be any iterable or
-    sequence.
-
-    This function can also be used to generate full-context diffs by passing 
-    None for the parameter n.
-    """
-    # Full context produces all the opcodes
-    if n is None:
-        yield list(opcodes)
-        return
-
-    # Otherwise we leave at most n lines with the tag 'equal' before and after
-    # every change
-    nn = n + n
-    group = []
-    for idx, (tag, i1, i2, j1, j2) in enumerate(opcodes):
-        if idx == 0 and tag == 'equal': # Fixup leading unchanged block
-            i1, j1 = max(i1, i2 - n), max(j1, j2 - n)
-        elif tag == 'equal' and i2 - i1 > nn:
-            group.append((tag, i1, min(i2, i1 + n), j1, min(j2, j1 + n)))
-            yield group
-            group = []
-            i1, j1 = max(i1, i2 - n), max(j1, j2 - n)
-        group.append((tag, i1, i2, j1 ,j2))
-
-    if group and not (len(group) == 1 and group[0][0] == 'equal'):
-        if group[-1][0] == 'equal': # Fixup trailing unchanged block
-            tag, i1, i2, j1, j2 = group[-1]
-            group[-1] = tag, i1, min(i2, i1 + n), j1, min(j2, j1 + n)
-        yield group
-
-def hdf_diff(*args, **kwargs):
-    return diff_blocks(*args, **kwargs)
-
-def diff_blocks(fromlines, tolines, context=None, tabwidth=8,
-                ignore_blank_lines=0, ignore_case=0, ignore_space_changes=0):
-    """Return an array that is adequate for adding to the data dictionary
-
-    See the diff_div.html template.
-    """
-
-    type_map = {'replace': 'mod', 'delete': 'rem', 'insert': 'add',
-                'equal': 'unmod'}
-
-    space_re = re.compile(' ( +)|^ ')
-    def htmlify(match):
-        div, mod = divmod(len(match.group(0)), 2)
-        return div * '&nbsp; ' + mod * '&nbsp;'
-
-    def markup_intraline_changes(opcodes):
-        for tag, i1, i2, j1, j2 in opcodes:
-            if tag == 'replace' and i2 - i1 == j2 - j1:
-                for i in range(i2 - i1):
-                    fromline, toline = fromlines[i1 + i], tolines[j1 + i]
-                    (start, end) = _get_change_extent(fromline, toline)
-                    if start != 0 or end != 0:
-                        last = end+len(fromline)
-                        fromlines[i1+i] = fromline[:start] + '\0' + fromline[start:last] + \
-                                       '\1' + fromline[last:]
-                        last = end+len(toline)
-                        tolines[j1+i] = toline[:start] + '\0' + toline[start:last] + \
-                                     '\1' + toline[last:]
-            yield tag, i1, i2, j1, j2
-
-    changes = []
-    opcodes = _get_opcodes(fromlines, tolines, ignore_blank_lines, ignore_case,
-                           ignore_space_changes)
-    for group in _group_opcodes(opcodes, context):
-        blocks = []
-        last_tag = None
-        for tag, i1, i2, j1, j2 in markup_intraline_changes(group):
-            if tag != last_tag:
-                blocks.append({'type': type_map[tag],
-                               'base': {'offset': i1, 'lines': []},
-                               'changed': {'offset': j1, 'lines': []}})
-            if tag == 'equal':
-                for line in fromlines[i1:i2]:
-                    line = line.expandtabs(tabwidth)
-                    line = space_re.sub(htmlify, line)
-                    blocks[-1]['base']['lines'].append(unicode(line))
-                for line in tolines[j1:j2]:
-                    line = line.expandtabs(tabwidth)
-                    line = space_re.sub(htmlify, line)
-                    blocks[-1]['changed']['lines'].append(unicode(line))
-            else:
-                if tag in ('replace', 'delete'):
-                    for line in fromlines[i1:i2]:
-                        line = expandtabs(line, tabwidth, '\0\1')
-                        line = line
-                        line = '<del>'.join([space_re.sub(htmlify, seg)
-                                             for seg in line.split('\0')])
-                        line = line.replace('\1', '</del>')
-                        blocks[-1]['base']['lines'].append(
-                            unicode(line))
-                if tag in ('replace', 'insert'):
-                    for line in tolines[j1:j2]:
-                        line = expandtabs(line, tabwidth, '\0\1')
-                        line = line
-                        line = '<ins>'.join([space_re.sub(htmlify, seg)
-                                             for seg in line.split('\0')])
-                        line = line.replace('\1', '</ins>')
-                        blocks[-1]['changed']['lines'].append(
-                            unicode(line))
-        changes.append(blocks)
-    return changes
-
-def unified_diff(fromlines, tolines, context=None, ignore_blank_lines=0,
-                 ignore_case=0, ignore_space_changes=0):
-    opcodes = _get_opcodes(fromlines, tolines, ignore_blank_lines, ignore_case,
-                           ignore_space_changes)
-    for group in _group_opcodes(opcodes, context):
-        i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
-        if i1 == 0 and i2 == 0:
-            i1, i2 = -1, -1 # support for 'A'dd changes
-        yield '@@ -%d,%d +%d,%d @@' % (i1 + 1, i2 - i1, j1 + 1, j2 - j1)
-        for tag, i1, i2, j1, j2 in group:
-            if tag == 'equal':
-                for line in fromlines[i1:i2]:
-                    yield ' ' + line
-            else:
-                if tag in ('replace', 'delete'):
-                    for line in fromlines[i1:i2]:
-                        yield '-' + line
-                if tag in ('replace', 'insert'):
-                    for line in tolines[j1:j2]:
-                        yield '+' + line
-
-def get_diff_options(req):
-    options_data = {}
-    data = {'options': options_data}
-    
-    def get_bool_option(name, default=0):
-        pref = int(req.session.get('diff_' + name, default))
-        arg = int(req.args.has_key(name))
-        if req.args.has_key('update') and arg != pref:
-            req.session['diff_' + name] = arg
-        else:
-            arg = pref
-        return arg
-
-    pref = req.session.get('diff_style', 'inline')
-    style = req.args.get('style', pref)
-    if req.args.has_key('update') and style != pref:
-        req.session['diff_style'] = style
-    data['style'] = style
-
-    pref = int(req.session.get('diff_contextlines', 2))
-    try:
-        arg = int(req.args.get('contextlines', pref))
-    except ValueError:
-        arg = -1
-    if req.args.has_key('update') and arg != pref:
-        req.session['diff_contextlines'] = arg
-    options = ['-U%d' % arg]
-    options_data['contextlines'] = arg
-
-    arg = get_bool_option('ignoreblanklines')
-    if arg:
-        options.append('-B')
-    options_data['ignoreblanklines'] = arg
-
-    arg = get_bool_option('ignorecase')
-    if arg:
-        options.append('-i')
-    options_data['ignorecase'] = arg
-
-    arg = get_bool_option('ignorewhitespace')
-    if arg:
-        options.append('-b')
-    options_data['ignorewhitespace'] = arg
-
-    return (style, options, data)

friendpaste/paste/models.py

-# -*- coding: utf-8 -
-# Copyright 2008 by Benoît Chesneau <benoitc@e-engura.com>
-# 
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from datetime import datetime
-import binascii
-import base64
-
-# compatibility with python 2.4
-try:
-    from hashlib import sha1 as _sha
-except:
-    import sha
-    _sha = sha.new
-
-
-from couchdb.client import ResourceNotFound
-from couchdb.schema import *
-import simplejson
-
-from friendpaste.utils import local
-
-hex = binascii.hexlify
-
-def hash(text, p):
-    """generate a hash from the given text and its parent hashes
-
-    This hash combines both the current file contents and its history
-    """
-    s = _sha(p)
-    s.update(text.encode('utf-8'))
-    return s.digest()
-
-def short(node):
-    return hex(node[:6])
-
-class Snippet(Document):
-    title=TextField()
-    type=TextField(default='snippet')
-    parent=TextField(default='')
-    revid = TextField()
-    snippet=TextField()
-    language=TextField()
-    created=DateTimeField()
-    updated=DateTimeField()
-
-    def _genid(self):
-        charset = 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'
-        from random import choice
-        return ''.join([choice(charset) for i in range(8)])
-
-    def store(self, db):
-        """
-        override store procedure to generate small id
-        """
-        self.updated = datetime.utcnow()
-        if getattr(self._data, 'id', None) is None:
-            self.created = datetime.utcnow()
-            node = hash(self.snippet+self.title+self.language,'')
-            self.revid = short(node)
-            stored=False
-            docid=None
-            while 1:
-                id = self._genid()
-                try:
-                    docid=db.resource.put(content=self._data, path='/%s/' % str(id))['id']
-                except:
-                    continue
-                if docid is not None:
-                    break
-            self._data = db.get(docid)
-        else:
-            old_data = db.get(self._data.id)
-            self.created = datetime.utcnow()
-            old_hash = hash(old_data['snippet']+old_data['title']+old_data['language'], '')
-            new_hash = hash(self.snippet+self.title+self.language, '')
-            if old_hash != new_hash:
-                #no need to save changes if there isn't
-                old_data['type']='revision'
-                old_data['parent']=self._data.id
-                db.create(old_data)
-
-                # get new revid
-                node = hash(self.snippet+self.title+self.language, old_data['revid'])
-                self.revid = short(node)
-            
-                # save changes
-                db[self._data.id] = self._data
-        
-        return self
-
-
-    

friendpaste/paste/views.py

-# -*- coding: utf-8 -
-# Copyright 2008 by Benoît Chesneau <benoitc@e-engura.com>
-# 
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os.path
-import time
-
-from wtforms import Form, TextField, TextAreaField, SelectField, ValidationError, \
-        validators
-from pygments.lexers import get_all_lexers, get_lexer_for_filename
-from pygments.styles import get_all_styles
-from pygments.formatters import HtmlFormatter
-from werkzeug import redirect
-from werkzeug.utils import url_quote
-from werkzeug.routing import NotFound
-
-import simplejson as json
-
-import friendpaste.settings as settings
-from friendpaste.utils import local, local_manager, datetimestr_topython, strptime
-from friendpaste.http import Response
-from friendpaste.template import render_response, highlighter
-from friendpaste.paste.models import Snippet
-from friendpaste.paste.diff import unified_diff, diff_blocks
-from friendpaste.feeds import RssFeed, RssFeedEntry 
-
-
-class FoundRev(Exception):
-    pass
-
-def _get_lexers():
-    lexers = get_all_lexers()
-    top_lexers = None
-    nl = []
-    ret=[]
-
-    try:
-        top_lexers = settings.TOP_LEXERS
-    except:
-        pass
-    if top_lexers is not None: 
-        nl = [l[0] for l in top_lexers]
-    
-    for l in lexers:
-        if l[1][0] not in nl:
-            ret.append((l[1][0],l[0]))
-    ret.sort()
-    if top_lexers is not None and top_lexers: 
-        ret = top_lexers + [('text', '------------')] + ret
-    return ret
-
-LEXERS_CHOICE = [('text', 'Plain text')] + _get_lexers()
-ALL_LEXERS=get_all_lexers()
-
-ALL_COLORSHEME = list(get_all_styles())
-
-class PasteForm(Form):
-    title=TextField('Title')
-    snippet=TextAreaField('Paste', validators.length(min=3, max=500000))
-    language = SelectField(u'Programming Language', choices=LEXERS_CHOICE)
-
-def create_snippet(request):
-    mimetypes = request.accept_mimetypes
-    if request.method=='POST' and 'application/json' in mimetypes:
-        d=json.loads(request.data)
-        lang=request.values.get("lang", None)
-        language=d.get('language', 'text')
-        if lang == "ext":
-            try:
-                l = get_lexer_for_filename("test."+language)
-                language = l.aliases[0]
-            except:
-                language = 'text'
-        try:
-            s = Snippet(
-                    title=d.get('title', ""),
-                    snippet=d.get('snippet'),
-                    language=language
-            )
-        except:
-            return send_json({'ok': False, 'reason': 'you should provide snippet code'})
-        try:
-            s.store(local.db)
-        except:
-            return send_json({'ok': False, 'reason': 'something wrong happend while saving data'})
-
-        return send_json({
-            'ok': True,
-            'id': s.id,
-            'revid': s.revid,
-            'url': '%s/%s' % (settings.SITE_URI, s.id)
-        })
-
-    form = PasteForm(request.form, prefix='paste')
-    if request.method=='POST' and form.validate():
-        s = Snippet(title=form.data['title'], snippet=form.data['snippet'], language=form.data['language'])
-        s.store(local.db)
-        print "id created : %s" % s.id
-        return redirect ('/%s' % s.id)
-
-    return render_response('paste/index.html', form=form)
-
-def edit_snippet(request, id):
-    s = Snippet.load(local.db, id)
-    if not s:
-        raise NotFound
-    form = PasteForm(request.form, prefix='paste', **{
-        'title': s.title,
-        'snippet': s.snippet,
-        'language': str(s.language)
-    })
-
-    if request.method=='POST' or request.method=='PUT' and form.validate():
-        old_revid = s.revid
-        s.title=form.data['title']
-        s.snippet = form.data['snippet']
-        s.language = form.data['language']
-        s.store(local.db)
-        if s.revid == old_revid:
-            return redirect ('/%s?msg=%s' % (s.id, url_quote("No changes detected.")))           
-        return redirect ('/%s' % s.id)
-    return render_response('paste/edit.html', form=form, snippet=s) 
-
-def _get_theme(request):
-    theme = request.cookies.get('theme', 'default')
-    highlight_css = "%s/css/%s.css" % (settings.SHARED_DATA, theme)
-    if not os.path.exists(highlight_css):
-        css = HtmlFormatter(style=theme).get_style_defs('.highlight')
-        f = open(highlight_css, 'w')
-        f.write(css)
-        f.close()
-    return theme
-
-def send_json(Body):
-    resp = Response(json.dumps(Body))
-    resp.headers['content-type'] = 'application/json'
-    return resp
-
-def _get_snippet_revisions(id):
-    
-    def _fetch():
-        r = local.db.view('_view/paste/by_id', startkey=[str(id)], endkey=[str(id),10])
-        return r, len(r)
-    
-    try:
-        res, l = _fetch()
-    except:
-        try:
-            res, l = _fetch()
-        except:
-            raise NotFound
-
-    results = []
-    for r in res.__iter__():
-        results.append(r)
-    return results
-
-def get_snippet(id, rev=None):
-    revisions = {}
-    res = _get_snippet_revisions(id)
-    if not res:
-        raise NotFound 
-    # set revisions
-    if len(res) > 1:
-        revisions = res[1:]
-        # reorder revisions
-        revisions.sort(lambda a,b: cmp(a.key[1], b.key[1]))
-        revisions.reverse()
-
-
-    # set snippet (maybe better to use a new view ?) 
-    data = {}
-    if rev and revisions and rev!=res[0].value['revid']:
-        try:
-            for r in revisions:
-                if r.value['revid']==rev:
-                    data = r.value
-                    data['_id']=r.value['parent']
-                    raise FoundRev
-        except FoundRev:
-            pass
-        if not data:
-            data = res[0].value
-            data['_id'] = res[0].id 
-    else:
-        data = res[0].value
-        data['_id'] = res[0].id
-    
-    snippet= Snippet.wrap(data)
-    return snippet, revisions
-
-def get_revision(snippetid, revid):
-    revision = local.db.view('_view/paste/by_id', key=[str(snippetid), str(revid)])
-    data = revision.value
-    data['_id'] = r.value['parent']
-    return Snippet.wrap(data)
-
-
-def view_snippet(request, id):
-    mimetypes = request.accept_mimetypes
-    
-    if 'rss+xml' in mimetypes:
-        return view_rss(request, id)
-    elif 'application/json' in mimetypes and request.method=='PUT':
-        try:
-            s = Snippet.load(local.db, id)
-        except:
-            raise NotFound
-        if not s:
-            raise NotFound
-        d=json.loads(request.data)
-        lang=request.values.get("lang", None)
-        language=d.get('language', 'text')
-        if lang == "ext":
-            try:
-                l = get_lexer_for_filename("test."+language)
-                language = l.aliases[0]
-            except:
-                language = 'text'
-        old_revid = s.revid
-        try:
-            s.title=d.get('title')
-            s.snippet = d.get('snippet')
-            s.language = language
-            s.store(local.db)
-        except:
-            return send_json({'ok': False, 'reason': 'you should provide snippet code'})
-        if s.revid == old_revid:
-            return send_json({'ok': False, 'reason': 'no changes detected'})
-        return send_json({
-            'ok': True,
-            'id': s.id,
-            'revid': s.revid,
-            'url': '%s/%s' % (settings.SITE_URI, s.id)
-        })
-
-    rev = request.values.get('rev', None)
-    if rev is None:
-        try:
-            s = Snippet.load(local.db, id)
-        except:
-            raise NotFound
-    else:
-        s, revisions = get_snippet(id, rev)
-
-    if not s:
-        raise NotFound
-
-    if 'application/json' in mimetypes:
-        if request.method == 'GET':
-            if s is None:
-                return send_json({'ok': False})
-            snippet_hash = {
-                'id': s.id,
-                'revid': s.revid,
-                'title': s.title,
-                'snippet': s.snippet,
-                'language': str(s.language)
-            }
-            return send_json(snippet_hash)
-    
-    form = PasteForm(request.form, prefix='paste', **{
-        'title': s.title,
-        'snippet': s.snippet,
-        'language': str(s.language)
-    })
-
-    # get theme
-    theme = _get_theme(request)
-    return render_response('paste/view.html', snippet=s, theme=theme, form=form)
-
-
-
-def view_rss(request, id):
-    rev = request.values.get('rev', None)
-    s, revisions = get_snippet(id, rev)
-
-    if not s:
-        raise NotFound
-    feed = RssFeed(
-        title="Revisions to %s on Friendpaste" % (s.title and s.title or "snippet #%s" % s.id),
-        description = '',
-        link = "%s/%s" % (settings.SITE_URI, s.id)
-    
-    )
-    feed.add_entry(RssFeedEntry(
-        title="%s revision %s" % (s.title,s.revid),
-        link= "%s/%s?rev=%s" % (settings.SITE_URI, s.id, s.revid),
-        description="",
-        **{ 
-                'published': s.created,
-                'updated':s.updated
-        }    
-    ))
- 
-    for revision in revisions:
-        feed.add_entry(RssFeedEntry(
-            title="%s revision %s" % (revision.value['title'], revision.value['revid']),
-            link= "%s/%s?rev=%s" % (settings.SITE_URI, s.id, revision.value['revid']),
-            description="",
-            **{ 
-                'published': datetimestr_topython(revision.value['created']),
-                'updated': datetimestr_topython(revision.value['updated'])
-            }
-        ))
-     
-    return feed.get_response()
-
-def view_revisions(request, id):
-    revs = local.db.view('_view/paste/revisions', startkey=[str(id), "0"], endkey=[str(id), "9"])
-    revisions = []
-    s = None
-    if len(revs) > 0:
-        for rev in revs:
-            if rev.value['_id'] == id:
-                s = Snippet.wrap(rev.value)
-                rev.value['parent']=rev.value['_id']
-            revisions.append(rev.value)
-            revisions.sort(lambda a,b: cmp(a['updated'], b['updated']), reverse=True)
-     
-    if 'application/json' in request.accept_mimetypes:
-        if len(revisions) <= 0:
-            return send_json([])
-        return send_json(revisions)
-
-    if s is None:
-        raise NotFound
-    return render_response('paste/revisions.html', snippet=s, revisions=revisions)
-
-def view_rawsnippet(request, id, rev):
-    s, revisions = get_snippet(id, rev)
-
-    response = Response(s.snippet)
-    response.headers['content-type'] = 'text/plain'
-    return response
-
-def view_original(request, id, rev):
-    s, revisions = get_snippet(id, rev)
-    from pygments import lexers
-    lexer = lexers.get_lexer_by_name(s.language)
-    response = Response(s.snippet)
-    response.headers['content-type'] = lexer.mimetypes[0]
-    if lexer.filenames and len(lexer.filenames)>0:
-        response.headers['content-disposition'] =  "filename=%s%s" % (
-            s.id, lexer.filenames[0][1:])
-    else:
-        response.headers['content-disposition'] =  "filename=%s.txt" % s.id
-    return response
-
-def get_changeset(id, rev=None):
-    res = _get_snippet_revisions(id)
-    if not res:
-        raise NotFound
-    snippet = res[0].value
-    snippet['_id'] = res[0].id
-    diff_to = ""
-    diff_from = ""
-    old_rev=rev
-    after=False
-    if snippet['revid']==rev:
-        diff_to=snippet['snippet']
-        after = True
-
-    revisions = res[1:]
-    if revisions and len(revisions)>0:
-        revisions.reverse()          
-        try:
-            for r in revisions:
-                if not after and r.value['revid']==rev:
-                    if not diff_to:
-                        diff_to = r.value['snippet']
-                    after=True
-                elif after:
-                    diff_from = r.value['snippet']
-                    old_rev=r.value['revid']
-                    raise FoundRev
-        except FoundRev:
-            pass
-            
-    unidiff = '--- Revision %s\n+++ Revision %s\n' % (old_rev, rev) + \
-            '\n'.join(unified_diff (diff_from.splitlines(), diff_to.splitlines(), 3))
-    tabular = diff_blocks(diff_from.splitlines(), diff_to.splitlines(), 3)
-    
-    
-    return snippet, rev, old_rev, unidiff, tabular
-
-def view_changeset(request, id):
-    snippet, rev, old_rev, unidiff, tabular = get_changeset(id, request.values.get('rev', None))
-    mimetypes = request.accept_mimetypes
-    if 'application/json' in mimetypes:
-        return send_json({
-            'id': snippet['_id'], 
-            'rev': rev,
-            'changeset': unidiff
-        })
-
-    format = request.values.get('format', None)
-    if format is not None:
-        response = Response(unidiff)
-        response.headers['content-type'] = 'text/plain'
-        return response
-        
-    theme = _get_theme(request)
-    return render_response('paste/diff.html', unidiff=unidiff, difft=tabular, theme=theme,
-            snippet=snippet, rev=rev, old_rev=old_rev)
-
-def get_all_languages(request):
-    lexers = get_all_lexers()
-  
-    languages=[(l[1][0],l[0]) for l in lexers]
-    languages.sort()
-
-    return send_json(dict(languages))

friendpaste/template.py

 from pygments import highlight, lexers, formatters
 from pygments.styles import get_all_styles
 
-from friendpaste.http import Request, Response
+from friendpaste.http import FPResponse
 from friendpaste.utils import local, timesince, datetimestr_topython
 
+
+
 def render_response(template_name, **kwargs):
-    return Response(render_template(template_name, **kwargs))
+    return FPResponse(render_template(template_name, **kwargs))
 
 def render_template(template_name, _stream=False, **kwargs):
     tmpl = local.application.template_env.get_template(template_name)
         return tmpl.stream(kwargs)
     return tmpl.render(kwargs)
 
+def url_for(endpoint, _external=False, **values):
+    return local.url_adapter.build(endpoint, values, force_external=_external)
+
 class SnippetHtmlFormatter(formatters.HtmlFormatter):
     """
     formatter to have better rendering with wrapped line.

friendpaste/urls.py

 
 from werkzeug.routing import Map, Rule, RequestRedirect,Submount
 
-from friendpaste.paste import views as paste
-from friendpaste.generic import about, services
+from friendpaste import views
 
 all_views = {
-        'paste/create': paste.create_snippet,
-        'paste/view': paste.view_snippet,
-        'paste/raw': paste.view_rawsnippet,
-        'paste/original': paste.view_original,
-        'paste/edit': paste.edit_snippet,
-        'paste/changeset': paste.view_changeset,
-        'paste/revisions': paste.view_revisions,
-        'paste/rss': paste.view_rss,
-        'paste/all_languages': paste.get_all_languages,
-        'generic/about': about,
-        'generic/services': services
+        'paste/create': views.create_snippet,
+        'paste/view': views.view_snippet,
+        'paste/raw': views.view_rawsnippet,
+        'paste/original': views.view_original,
+        'paste/edit': views.edit_snippet,
+        'paste/changeset': views.view_changeset,
+        'paste/revisions': views.view_revisions,
+        'paste/rss': views.view_rss,
+        'paste/all_languages': views.get_all_languages,
+        'generic/about': views.about,
+        'generic/services': views.services
 }
 
 

friendpaste/utils.py

-# -*- coding: utf-8 -
-# Copyright 2008 by Benoît Chesneau <benoitc@e-engura.com>
-# 
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from calendar import timegm
-from datetime import datetime, timedelta, time, tzinfo
-from time import strptime, localtime, struct_time
-from gettext import gettext, ngettext
-import thread, threading
-
-from werkzeug import Local, LocalManager
-
-
-local = Local()
-local_manager = LocalManager([local])
-
-
-def ungettext(singular, plural, number):
-    """ stupid wrapper yet waiting internationalisation """
-    return ngettext(singular, plural, number)
-
-def ugettext(message):
-    """ stupid wrapper yet waiting internationalisation """
-    return gettext(message)
-
-class LocalTimezone(tzinfo):
-    "Proxy timezone information from time module."
-    def __init__(self, dt):
-        tzinfo.__init__(self, dt)
-        self._tzname = self.tzname(dt)
-
-    def __repr__(self):
-        return self._tzname
-
-    def utcoffset(self, dt):
-        if self._isdst(dt):
-            return timedelta(seconds=-time.altzone)
-        else:
-            return timedelta(seconds=-time.timezone)
-
-    def dst(self, dt):
-        if self._isdst(dt):
-            return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
-        else:
-            return timedelta(0)
-
-    def tzname(self, dt):
-        try:
-            return smart_unicode(time.tzname[self._isdst(dt)], DEFAULT_ENCODING)
-        except UnicodeDecodeError:
-            return None
-
-    def _isdst(self, dt):
-        tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)
-        try:
-            stamp = time.mktime(tt)
-        except OverflowError:
-            # 32 bit systems can't handle dates after Jan 2038, so we fake it
-            # in that case (since we only care about the DST flag here).
-            tt = (2037,) + tt[1:]
-            stamp = time.mktime(tt)
-        tt = time.localtime(stamp)
-        return tt.tm_isdst > 0
-
-def timesince(d, now=None):
-    """
-    Takes two datetime objects and returns the time between d and now
-    as a nicely formatted string, e.g. "10 minutes".  If d occurs after now,
-    then "0 minutes" is returned.
-
-    Units used are years, months, weeks, days, hours, and minutes.
-    Seconds and microseconds are ignored.  Up to two adjacent units will be
-    displayed.  For example, "2 weeks, 3 days" and "1 year, 3 months" are
-    possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
-
-    Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
-    """
-    chunks = (
-      (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
-      (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
-      (60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)),
-      (60 * 60 * 24, lambda n : ungettext('day', 'days', n)),
-      (60 * 60, lambda n: ungettext('hour', 'hours', n)),
-      (60, lambda n: ungettext('minute', 'minutes', n))
-    )
-    
-    # Convert datetime.date to datetime.datetime for comparison
-    if d.__class__ is not datetime:
-        type= d.__class__
-        d = datetime(d.year, d.month, d.day)
-    if now:
-        t = now.timetuple()
-    else:
-        t = localtime()
-    if d.tzinfo:
-        tz = LocalTimezone(d)
-    else:
-        tz = None
-    tz = None
-    now = datetime(t[0], t[1], t[2], t[3], t[4], t[5], tzinfo=tz).utcnow()
-
-    # ignore microsecond part of 'd' since we removed it from 'now'
-    delta = now - (d - timedelta(0, 0, d.microsecond))
-    since = delta.days * 24 * 60 * 60 + delta.seconds
-    if since <= 0:
-        # d is in the future compared to now, stop processing.
-        return u'0 ' + ugettext('minutes')
-    for i, (seconds, name) in enumerate(chunks):
-        count = since // seconds
-        if count != 0:
-            break
-    s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
-    if i + 1 < len(chunks):
-        # Now get the second item
-        seconds2, name2 = chunks[i + 1]
-        count2 = (since - (seconds * count)) // seconds2
-        if count2 != 0:
-            s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
-    return s
-
-def utf8(text):
-    """Encodes text in utf-8.
-        
-        >> utf8(u'\u1234') # doctest doesn't seem to like utf-8
-        '\xe1\x88\xb4'
-
-        >>> utf8('hello')
-        'hello'
-        >>> utf8(42)
-        '42'
-    """
-    if isinstance(text, unicode):
-        return text.encode('utf-8')
-    elif isinstance(text, str):
-        return text
-    else:
-        return str(text)
-
-def to_str(s):
-    """
-    return a bytestring version of s, encoded in utf8
-    """
-
-    if not isinstance(s, basestring):
-        try:
-            return str(s)
-        except UnicodeEncodeError:
-            return unicode(s).encode('utf-8', 'strict')
-    elif isinstance(s, unicode):
-        return s.encode('utf-8', 'strict')
-    else:
-        return s
-
-
-def iri_to_uri(iri):
-    """
-    Convert an Internationalized Resource Identifier (IRI) portion to a URI
-    portion that is suitable for inclusion in a URL.
-
-    Returns an ASCII string containing the encoded result.
-    code from djangoprohect.
-    """
-    # The list of safe characters here is constructed from the printable ASCII
-    # characters that are not explicitly excluded by the list at the end of
-    # section 3.1 of RFC 3987.
-    if iri is None:
-        return iri
-
-    return urllib.quote(to_str(iri), safe='/#%[]=:;$&()+,!?*')
-
-def datetimestr_topython(value):
-    if isinstance(value, basestring):
-        try:
-            value = value.split('.', 1)[0] # strip out microseconds
-            value = value.rstrip('Z') # remove timezone separator
-            timestamp = timegm(strptime(value, '%Y-%m-%dT%H:%M:%S'))
-            value = datetime.utcfromtimestamp(timestamp)
-        except ValueError, e:
-            raise ValueError('Invalid ISO date/time %r' % value)
-    return value
-
-
-def datetime_tojson(value):
-    if isinstance(value, struct_time):
-        value = datetime.utcfromtimestamp(timegm(value))
-    elif not isinstance(value, datetime):
-        value = datetime.combine(value, time(0))
-    return value.replace(microsecond=0).isoformat() + 'Z'

friendpaste/utils/__init__.py

+# -*- coding: utf-8 -
+# Copyright 2008 by Benoît Chesneau <benoitc@e-engura.com>
+# 
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from calendar import timegm
+from datetime import datetime, timedelta, time, tzinfo
+from time import strptime, localtime, struct_time
+from gettext import gettext, ngettext
+import thread, threading
+
+from werkzeug import Local, LocalManager
+
+
+local = Local()
+local_manager = LocalManager([local])
+
+
+def ungettext(singular, plural, number):
+    """ stupid wrapper yet waiting internationalisation """
+    return ngettext(singular, plural, number)
+
+def ugettext(message):
+    """ stupid wrapper yet waiting internationalisation """
+    return gettext(message)
+
+class LocalTimezone(tzinfo):
+    "Proxy timezone information from time module."
+    def __init__(self, dt):
+        tzinfo.__init__(self, dt)
+        self._tzname = self.tzname(dt)
+
+    def __repr__(self):
+        return self._tzname
+
+    def utcoffset(self, dt):
+        if self._isdst(dt):
+            return timedelta(seconds=-time.altzone)
+        else:
+            return timedelta(seconds=-time.timezone)
+
+    def dst(self, dt):
+        if self._isdst(dt):
+            return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
+        else:
+            return timedelta(0)
+
+    def tzname(self, dt):
+        try:
+            return smart_unicode(time.tzname[self._isdst(dt)], DEFAULT_ENCODING)
+        except UnicodeDecodeError:
+            return None
+
+    def _isdst(self, dt):
+        tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)
+        try:
+            stamp = time.mktime(tt)
+        except OverflowError:
+            # 32 bit systems can't handle dates after Jan 2038, so we fake it
+            # in that case (since we only care about the DST flag here).
+            tt = (2037,) + tt[1:]
+            stamp = time.mktime(tt)
+        tt = time.localtime(stamp)
+        return tt.tm_isdst > 0
+
+def timesince(d, now=None):
+    """
+    Takes two datetime objects and returns the time between d and now
+    as a nicely formatted string, e.g. "10 minutes".  If d occurs after now,
+    then "0 minutes" is returned.
+
+    Units used are years, months, weeks, days, hours, and minutes.
+    Seconds and microseconds are ignored.  Up to two adjacent units will be
+    displayed.  For example, "2 weeks, 3 days" and "1 year, 3 months" are
+    possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
+
+    Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
+    """
+    chunks = (
+      (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
+      (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
+      (60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)),
+      (60 * 60 * 24, lambda n : ungettext('day', 'days', n)),
+      (60 * 60, lambda n: ungettext('hour', 'hours', n)),
+      (60, lambda n: ungettext('minute', 'minutes', n))
+    )
+    
+    # Convert datetime.date to datetime.datetime for comparison
+    if d.__class__ is not datetime:
+        type= d.__class__
+        d = datetime(d.year, d.month, d.day)
+    if now:
+        t = now.timetuple()
+    else:
+        t = localtime()
+    if d.tzinfo:
+        tz = LocalTimezone(d)
+    else:
+        tz = None
+    tz = None
+    now = datetime(t[0], t[1], t[2], t[3], t[4], t[5], tzinfo=tz).utcnow()
+
+    # ignore microsecond part of 'd' since we removed it from 'now'
+    delta = now - (d - timedelta(0, 0, d.microsecond))
+    since = delta.days * 24 * 60 * 60 + delta.seconds
+    if since <= 0:
+        # d is in the future compared to now, stop processing.
+        return u'0 ' + ugettext('minutes')
+    for i, (seconds, name) in enumerate(chunks):
+        count = since // seconds
+        if count != 0:
+            break
+    s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
+    if i + 1 < len(chunks):
+        # Now get the second item
+        seconds2, name2 = chunks[i + 1]
+        count2 = (since - (seconds * count)) // seconds2
+        if count2 != 0:
+            s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
+    return s
+    
+
+def utf8(text):
+    """Encodes text in utf-8.
+        
+        >> utf8(u'\u1234') # doctest doesn't seem to like utf-8
+        '\xe1\x88\xb4'
+
+        >>> utf8('hello')
+        'hello'
+        >>> utf8(42)
+        '42'
+    """
+    if isinstance(text, unicode):
+        return text.encode('utf-8')
+    elif isinstance(text, str):
+        return text
+    else:
+        return str(text)
+
+def to_str(s):
+    """
+    return a bytestring version of s, encoded in utf8
+    """
+
+    if not isinstance(s, basestring):
+        try:
+            return str(s)
+        except UnicodeEncodeError:
+            return unicode(s).encode('utf-8', 'strict')
+    elif isinstance(s, unicode):
+        return s.encode('utf-8', 'strict')
+    else:
+        return s
+
+
+def iri_to_uri(iri):
+    """
+    Convert an Internationalized Resource Identifier (IRI) portion to a URI
+    portion that is suitable for inclusion in a URL.
+
+    Returns an ASCII string containing the encoded result.
+    code from djangoprohect.
+    """
+    # The list of safe characters here is constructed from the printable ASCII
+    # characters that are not explicitly excluded by the list at the end of
+    # section 3.1 of RFC 3987.
+    if iri is None:
+        return iri
+
+    return urllib.quote(to_str(iri), safe='/#%[]=:;$&()+,!?*')
+
+def datetimestr_topython(value):
+    if isinstance(value, basestring):
+        try:
+            value = value.split('.', 1)[0] # strip out microseconds
+            value = value.rstrip('Z') # remove timezone separator
+            timestamp = timegm(strptime(value, '%Y-%m-%dT%H:%M:%S'))
+            value = datetime.utcfromtimestamp(timestamp)
+        except ValueError, e:
+            raise ValueError('Invalid ISO date/time %r' % value)
+    return value
+
+
+def datetime_tojson(value):
+    if isinstance(value, struct_time):
+        value = datetime.utcfromtimestamp(timegm(value))
+    elif not isinstance(value, datetime):
+        value = datetime.combine(value, time(0))
+    return value.replace(microsecond=0).isoformat() + 'Z'

friendpaste/utils/diff.py

+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2004-2006 Edgewall Software
+# Copyright (C) 2004-2006 Christopher Lenz <cmlenz@gmx.de>
+# All rights reserved.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at http://trac.edgewall.org/wiki/TracLicense.
+#
+# This software consists of voluntary contributions made by many
+# individuals. For the exact contribution history, see the revision
+# history and logs, available at http://trac.edgewall.org/log/.
+#
+# Author: Christopher Lenz <cmlenz@gmx.de>
+#
+# Adapted for friendpaste By Benoît Chesneau <benoitc@e-engura.com>
+
+from difflib import SequenceMatcher
+import re
+
+
+def expandtabs(s, tabstop=8, ignoring=None):
+    if '\t' not in s: return s
+    if ignoring is None: return s.expandtabs(tabstop)
+
+    outlines = []
+    for line in s.split('\n'):
+        if '\t' not in line:
+            outlines.append(line)
+            continue
+        p = 0
+        s = []
+        for c in line:
+            if c == '\t':
+                n = tabstop-p%tabstop
+                s.append(' '*n)
+                p+=n
+            elif not ignoring or c not in ignoring:
+                p += 1
+                s.append(c)
+            else:
+                s.append(c)
+        outlines.append(''.join(s))
+    return '\n'.join(outlines)
+
+
+__all__ = ['get_diff_options', 'hdf_diff', 'diff_blocks', 'unified_diff']
+
+
+def _get_change_extent(str1, str2):
+    """
+    Determines the extent of differences between two strings. Returns a tuple
+    containing the offset at which the changes start, and the negative offset
+    at which the changes end. If the two strings have neither a common prefix
+    nor a common suffix, (0, 0) is returned.
+    """
+    start = 0
+    limit = min(len(str1), len(str2))
+    while start < limit and str1[start] == str2[start]:
+        start += 1
+    end = -1
+    limit = limit - start
+    while -end <= limit and str1[end] == str2[end]:
+        end -= 1
+    return (start, end + 1)
+
+def _get_opcodes(fromlines, tolines, ignore_blank_lines=False,
+                 ignore_case=False, ignore_space_changes=False):
+    """
+    Generator built on top of SequenceMatcher.get_opcodes().
+    
+    This function detects line changes that should be ignored and emits them
+    as tagged as 'equal', possibly joined with the preceding and/or following
+    'equal' block.
+    """
+
+    def is_ignorable(tag, fromlines, tolines):
+        if tag == 'delete' and ignore_blank_lines:
+            if ''.join(fromlines) == '':
+                return True
+        elif tag == 'insert' and ignore_blank_lines:
+            if ''.join(tolines) == '':
+                return True
+        elif tag == 'replace' and (ignore_case or ignore_space_changes):
+            if len(fromlines) != len(tolines):
+                return False
+            def f(str):
+                if ignore_case:
+                    str = str.lower()
+                if ignore_space_changes:
+                    str = ' '.join(str.split())
+                return str
+            for i in range(len(fromlines)):
+                if f(fromlines[i]) != f(tolines[i]):
+                    return False
+            return True
+
+    matcher = SequenceMatcher(None, fromlines, tolines)
+    previous = None
+    for tag, i1, i2, j1, j2 in matcher.get_opcodes():
+        if tag == 'equal':
+            if previous:
+                previous = (tag, previous[1], i2, previous[3], j2)
+            else:
+                previous = (tag, i1, i2, j1, j2)
+        else:
+            if is_ignorable(tag, fromlines[i1:i2], tolines[j1:j2]):
+                if previous:
+                    previous = 'equal', previous[1], i2, previous[3], j2
+                else:
+                    previous = 'equal', i1, i2, j1, j2
+                continue
+            if previous:
+                yield previous
+            yield tag, i1, i2, j1, j2
+            previous = None
+
+    if previous:
+        yield previous
+
+def _group_opcodes(opcodes, n=3):
+    """
+    Python 2.2 doesn't have SequenceMatcher.get_grouped_opcodes(), so let's
+    provide equivalent here. The opcodes parameter can be any iterable or
+    sequence.
+
+    This function can also be used to generate full-context diffs by passing 
+    None for the parameter n.
+    """
+    # Full context produces all the opcodes
+    if n is None:
+        yield list(opcodes)
+        return
+
+    # Otherwise we leave at most n lines with the tag 'equal' before and after
+    # every change
+    nn = n + n
+    group = []
+    for idx, (tag, i1, i2, j1, j2) in enumerate(opcodes):
+        if idx == 0 and tag == 'equal': # Fixup leading unchanged block
+            i1, j1 = max(i1, i2 - n), max(j1, j2 - n)
+        elif tag == 'equal' and i2 - i1 > nn:
+            group.append((tag, i1, min(i2, i1 + n), j1, min(j2, j1 + n)))
+            yield group
+            group = []
+            i1, j1 = max(i1, i2 - n), max(j1, j2 - n)
+        group.append((tag, i1, i2, j1 ,j2))
+
+    if group and not (len(group) == 1 and group[0][0] == 'equal'):
+        if group[-1][0] == 'equal': # Fixup trailing unchanged block
+            tag, i1, i2, j1, j2 = group[-1]
+            group[-1] = tag, i1, min(i2, i1 + n), j1, min(j2, j1 + n)
+        yield group
+
+def hdf_diff(*args, **kwargs):
+    return diff_blocks(*args, **kwargs)
+
+def diff_blocks(fromlines, tolines, context=None, tabwidth=8,
+                ignore_blank_lines=0, ignore_case=0, ignore_space_changes=0):
+    """Return an array that is adequate for adding to the data dictionary
+
+    See the diff_div.html template.
+    """
+
+    type_map = {'replace': 'mod', 'delete': 'rem', 'insert': 'add',
+                'equal': 'unmod'}
+
+    space_re = re.compile(' ( +)|^ ')
+    def htmlify(match):
+        div, mod = divmod(len(match.group(0)), 2)
+        return div * '&nbsp; ' + mod * '&nbsp;'
+
+    def markup_intraline_changes(opcodes):
+        for tag, i1, i2, j1, j2 in opcodes:
+            if tag == 'replace' and i2 - i1 == j2 - j1:
+                for i in range(i2 - i1):
+                    fromline, toline = fromlines[i1 + i], tolines[j1 + i]
+                    (start, end) = _get_change_extent(fromline, toline)
+                    if start != 0 or end != 0:
+                        last = end+len(fromline)
+                        fromlines[i1+i] = fromline[:start] + '\0' + fromline[start:last] + \
+                                       '\1' + fromline[last:]
+                        last = end+len(toline)
+                        tolines[j1+i] = toline[:start] + '\0' + toline[start:last] + \
+                                     '\1' + toline[last:]
+            yield tag, i1, i2, j1, j2
+
+    changes = []
+    opcodes = _get_opcodes(fromlines, tolines, ignore_blank_lines, ignore_case,
+                           ignore_space_changes)
+    for group in _group_opcodes(opcodes, context):
+        blocks = []
+        last_tag = None
+        for tag, i1, i2, j1, j2 in markup_intraline_changes(group):
+            if tag != last_tag:
+                blocks.append({'type': type_map[tag],
+                               'base': {'offset': i1, 'lines': []},
+                               'changed': {'offset': j1, 'lines': []}})
+            if tag == 'equal':
+                for line in fromlines[i1:i2]:
+                    line = line.expandtabs(tabwidth)
+                    line = space_re.sub(htmlify, line)
+                    blocks[-1]['base']['lines'].append(unicode(line))
+                for line in tolines[j1:j2]:
+                    line = line.expandtabs(tabwidth)
+                    line = space_re.sub(htmlify, line)
+                    blocks[-1]['changed']['lines'].append(unicode(line))
+            else:
+                if tag in ('replace', 'delete'):
+                    for line in fromlines[i1:i2]:
+                        line = expandtabs(line, tabwidth, '\0\1')
+                        line = line
+                        line = '<del>'.join([space_re.sub(htmlify, seg)
+                                             for seg in line.split('\0')])
+                        line = line.replace('\1', '</del>')
+                        blocks[-1]['base']['lines'].append(
+                            unicode(line))
+                if tag in ('replace', 'insert'):
+                    for line in tolines[j1:j2]:
+                        line = expandtabs(line, tabwidth, '\0\1')
+                        line = line
+                        line = '<ins>'.join([space_re.sub(htmlify, seg)
+                                             for seg in line.split('\0')])
+                        line = line.replace('\1', '</ins>')
+                        blocks[-1]['changed']['lines'].append(
+                            unicode(line))
+        changes.append(blocks)
+    return changes
+
+def unified_diff(fromlines, tolines, context=None, ignore_blank_lines=0,
+                 ignore_case=0, ignore_space_changes=0):
+    opcodes = _get_opcodes(fromlines, tolines, ignore_blank_lines, ignore_case,
+                           ignore_space_changes)
+    for group in _group_opcodes(opcodes, context):
+        i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
+        if i1 == 0 and i2 == 0:
+            i1, i2 = -1, -1 # support for 'A'dd changes
+        yield '@@ -%d,%d +%d,%d @@' % (i1 + 1, i2 - i1, j1 + 1, j2 - j1)
+        for tag, i1, i2, j1, j2 in group:
+            if tag == 'equal':
+                for line in fromlines[i1:i2]:
+                    yield ' ' + line
+            else:
+                if tag in ('replace', 'delete'):
+                    for line in fromlines[i1:i2]:
+                        yield '-' + line
+                if tag in ('replace', 'insert'):
+                    for line in tolines[j1:j2]:
+                        yield '+' + line
+
+def get_diff_options(req):
+    options_data = {}
+    data = {'options': options_data}
+    
+    def get_bool_option(name, default=0):
+        pref = int(req.session.get('diff_' + name, default))
+        arg = int(req.args.has_key(name))
+        if req.args.has_key('update') and arg != pref:
+            req.session['diff_' + name] = arg
+        else:
+            arg = pref
+        return arg
+
+    pref = req.session.get('diff_style', 'inline')
+    style = req.args.get('style', pref)
+    if req.args.has_key('update') and style != pref:
+        req.session['diff_style'] = style
+    data['style'] = style
+
+    pref = int(req.session.get('diff_contextlines', 2))
+    try:
+        arg = int(req.args.get('contextlines', pref))
+    except ValueError:
+        arg = -1
+    if req.args.has_key('update') and arg != pref:
+        req.session['diff_contextlines'] = arg
+    options = ['-U%d' % arg]
+    options_data['contextlines'] = arg
+
+    arg = get_bool_option('ignoreblanklines')
+    if arg:
+        options.append('-B')
+    options_data['ignoreblanklines'] = arg
+
+    arg = get_bool_option('ignorecase')
+    if arg:
+        options.append('-i')
+    options_data['ignorecase'] = arg
+
+    arg = get_bool_option('ignorewhitespace')
+    if arg:
+        options.append('-b')
+    options_data['ignorewhitespace'] = arg
+
+    return (style, options, data)

friendpaste/views.py

+# -*- coding: utf-8 -
+# Copyright 2008 by Benoît Chesneau <benoitc@e-engura.com>
+# 
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+import time
+
+from wtforms import Form, TextField, TextAreaField, SelectField, ValidationError, \
+        validators
+from pygments.lexers import get_all_lexers, get_lexer_for_filename
+from pygments.styles import get_all_styles
+from pygments.formatters import HtmlFormatter
+from werkzeug import redirect
+from werkzeug.utils import url_quote
+from werkzeug.routing import NotFound
+
+import simplejson as json
+
+import friendpaste.settings as settings
+from friendpaste.utils import local, local_manager, datetimestr_topython, strptime
+from friendpaste.http import Response
+from friendpaste.template import render_response, highlighter
+from friendpaste.models import Snippet
+from friendpaste.utils.diff import unified_diff, diff_blocks
+from friendpaste.feeds import RssFeed, RssFeedEntry 
+
+
+class FoundRev(Exception):
+    pass
+
+def _get_lexers():
+    lexers = get_all_lexers()
+    top_lexers = None
+    nl = []
+    ret=[]
+
+    try:
+        top_lexers = settings.TOP_LEXERS
+    except:
+        pass
+    if top_lexers is not None: 
+        nl = [l[0] for l in top_lexers]
+    
+    for l in lexers:
+        if l[1][0] not in nl:
+            ret.append((l[1][0],l[0]))
+    ret.sort()
+    if top_lexers is not None and top_lexers: 
+        ret = top_lexers + [('text', '------------')] + ret
+    return ret
+
+LEXERS_CHOICE = [('text', 'Plain text')] + _get_lexers()
+ALL_LEXERS=get_all_lexers()
+
+ALL_COLORSHEME = list(get_all_styles())
+
+class PasteForm(Form):
+    title=TextField('Title')
+    snippet=TextAreaField('Paste', validators.length(min=3, max=500000))
+    language = SelectField(u'Programming Language', choices=LEXERS_CHOICE)
+    
+
+
+def create_snippet(request):
+    mimetypes = request.accept_mimetypes
+    if request.method=='POST' and 'application/json' in mimetypes:
+        d=json.loads(request.data)
+        lang=request.values.get("lang", None)
+        language=d.get('language', 'text')
+        if lang == "ext":
+            try:
+                l = get_lexer_for_filename("test."+language)
+                language = l.aliases[0]
+            except:
+                language = 'text'
+        try:
+            s = Snippet(
+                    title=d.get('title', ""),
+                    snippet=d.get('snippet'),
+                    language=language
+            )
+        except:
+            return send_json({'ok': False, 'reason': 'you should provide snippet code'})
+        try:
+            s.store(local.db)
+        except:
+            return send_json({'ok': False, 'reason': 'something wrong happend while saving data'})
+
+        return send_json({
+            'ok': True,
+            'id': s.id,
+            'revid': s.revid,
+            'url': '%s/%s' % (settings.SITE_URI, s.id)
+        })
+
+    form = PasteForm(request.form, prefix='paste')
+    if request.method=='POST' and form.validate():
+        s = Snippet(title=form.data['title'], snippet=form.data['snippet'], language=form.data['language'])
+        s.store(local.db)
+        print "id created : %s" % s.id
+        return redirect ('/%s' % s.id)
+
+    return render_response('paste/index.html', form=form)
+
+def edit_snippet(request, id):
+    s = Snippet.load(local.db, id)
+    if not s:
+        raise NotFound
+    form = PasteForm(request.form, prefix='paste', **{
+        'title': s.title,
+        'snippet': s.snippet,
+        'language': str(s.language)
+    })
+
+    if request.method=='POST' or request.method=='PUT' and form.validate():
+        old_revid = s.revid
+        s.title=form.data['title']
+        s.snippet = form.data['snippet']
+        s.language = form.data['language']
+        s.store(local.db)
+        if s.revid == old_revid:
+            return redirect ('/%s?msg=%s' % (s.id, url_quote("No changes detected.")))           
+        return redirect ('/%s' % s.id)
+    return render_response('paste/edit.html', form=form, snippet=s) 
+
+def _get_theme(request):
+    theme = request.cookies.get('theme', 'default')
+    highlight_css = "%s/css/%s.css" % (settings.SHARED_DATA, theme)
+    if not os.path.exists(highlight_css):
+        css = HtmlFormatter(style=theme).get_style_defs('.highlight')
+        f = open(highlight_css, 'w')
+        f.write(css)
+        f.close()
+    return theme
+
+def send_json(Body):
+    resp = Response(json.dumps(Body))
+    resp.headers['content-type'] = 'application/json'
+    return resp
+
+def _get_snippet_revisions(id):
+    
+    def _fetch():
+        r = local.db.view('_view/paste/by_id', startkey=[str(id)], endkey=[str(id),10])
+        return r, len(r)
+    
+    try:
+        res, l = _fetch()
+    except:
+        try:
+            res, l = _fetch()
+        except:
+            raise NotFound
+
+    results = []
+    for r in res.__iter__():
+        results.append(r)
+    return results
+
+def get_snippet(id, rev=None):
+    revisions = {}
+    res = _get_snippet_revisions(id)
+    if not res:
+        raise NotFound 
+    # set revisions
+    if len(res) > 1:
+        revisions = res[1:]
+        # reorder revisions
+        revisions.sort(lambda a,b: cmp(a.key[1], b.key[1]))
+        revisions.reverse()
+
+
+    # set snippet (maybe better to use a new view ?) 
+    data = {}
+    if rev and revisions and rev!=res[0].value['revid']:
+        try:
+            for r in revisions:
+                if r.value['revid']==rev:
+                    data = r.value
+                    data['_id']=r.value['parent']
+                    raise FoundRev
+        except FoundRev:
+            pass
+        if not data:
+            data = res[0].value
+            data['_id'] = res[0].id 
+    else:
+        data = res[0].value
+        data['_id'] = res[0].id
+    
+    snippet= Snippet.wrap(data)
+    return snippet, revisions
+
+def get_revision(snippetid, revid):
+    revision = local.db.view('_view/paste/by_id', key=[str(snippetid), str(revid)])
+    data = revision.value
+    data['_id'] = r.value['parent']
+    return Snippet.wrap(data)
+
+
+def view_snippet(request, id):
+    mimetypes = request.accept_mimetypes
+    
+    if 'rss+xml' in mimetypes:
+        return view_rss(request, id)
+    elif 'application/json' in mimetypes and request.method=='PUT':
+        try:
+            s = Snippet.load(local.db, id)
+        except:
+            raise NotFound
+        if not s:
+            raise NotFound
+        d=json.loads(request.data)
+        lang=request.values.get("lang", None)
+        language=d.get('language', 'text')
+        if lang == "ext":
+            try:
+                l = get_lexer_for_filename("test."+language)
+                language = l.aliases[0]
+            except:
+                language = 'text'
+        old_revid = s.revid
+        try:
+            s.title=d.get('title')
+            s.snippet = d.get('snippet')
+            s.language = language
+            s.store(local.db)
+        except:
+            return send_json({'ok': False, 'reason': 'you should provide snippet code'})
+        if s.revid == old_revid:
+            return send_json({'ok': False, 'reason': 'no changes detected'})
+        return send_json({
+            'ok': True,
+            'id': s.id,
+            'revid': s.revid,
+            'url': '%s/%s' % (settings.SITE_URI, s.id)
+        })
+
+    rev = request.values.get('rev', None)
+    if rev is None:
+        try:
+            s = Snippet.load(local.db, id)
+        except:
+            raise NotFound
+    else:
+        s, revisions = get_snippet(id, rev)
+
+    if not s:
+        raise NotFound
+
+    if 'application/json' in mimetypes:
+        if request.method == 'GET':
+            if s is None:
+                return send_json({'ok': False})
+            snippet_hash = {
+                'id': s.id,
+                'revid': s.revid,
+                'title': s.title,
+                'snippet': s.snippet,
+                'language': str(s.language)
+            }
+            return send_json(snippet_hash)
+    
+    form = PasteForm(request.form, prefix='paste', **{
+        'title': s.title,
+        'snippet': s.snippet,
+        'language': str(s.language)
+    })
+
+    # get theme
+    theme = _get_theme(request)
+    return render_response('paste/view.html', snippet=s, theme=theme, form=form)
+
+
+
+def view_rss(request, id):
+    rev = request.values.get('rev', None)
+    s, revisions = get_snippet(id, rev)
+
+    if not s:
+        raise NotFound
+    feed = RssFeed(
+        title="Revisions to %s on Friendpaste" % (s.title and s.title or "snippet #%s" % s.id),
+        description = '',
+        link = "%s/%s" % (settings.SITE_URI, s.id)
+    
+    )
+    feed.add_entry(RssFeedEntry(
+        title="%s revision %s" % (s.title,s.revid),
+        link= "%s/%s?rev=%s" % (settings.SITE_URI, s.id, s.revid),
+        description="",
+        **{ 
+                'published': s.created,
+                'updated':s.updated
+        }    
+    ))
+ 
+    for revision in revisions:
+        feed.add_entry(RssFeedEntry(
+            title="%s revision %s" % (revision.value['title'], revision.value['revid']),
+            link= "%s/%s?rev=%s" % (settings.SITE_URI, s.id, revision.value['revid']),
+            description="",
+            **{ 
+                'published': datetimestr_topython(revision.value['created']),
+                'updated': datetimestr_topython(revision.value['updated'])
+            }