Source

Mango / main.py

Full commit
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
#!/usr/bin/python
# -*- coding: utf-8 -*-

from __future__ import with_statement
import datetime
import os
import re
import urlparse
try:
    from urlparse import parse_qsl # >= Python 2.6
except ImportError:
    from cgi import parse_qsl # Python 2.5

import disqus
import markdown
import pytz

from django.conf import settings
from django.core.cache import cache
from django.template import Context, loader
from django.utils.safestring import mark_safe

import mango
from mango import utf8
from mango.settings import *
from mango.templatetags.mango_extras import _convert
from mango.utils import canonicalize, logger

block = r'(?m)^(%s(?=[ \n])[^\n]*(\n|$))+'
match = r'(?m)^(%s(?=[ \n]) ?)'
_meta = r'(?m)^(( {,3})%s:(\s*).*\n)'
_update_meta = r'(?m)^(((?:\| )?\|\| {1,4})%s:(\s*).*\n)'

RE = {
    '\r\n?': re.compile(r'\r\n?'),
    'alias=canon': re.compile(r'^(0*(?P<alias>.*?)(?P<separator>=>?))?(?P<canon>.+)$'),
    'control_characters': re.compile(r'[\a\b\s]'),
    'excerpt': re.compile(block % r'\|'),
    'excerpt_pipes': re.compile(match % r'\|'),
    'filesize': re.compile(r'''{{\s*filesize:(['"])(?P<filepath>\S+)\1\s*}}'''),
    'fragment': re.compile(r'(?s)(<code>.*?</code>|<pre>.*?</pre>|<skip>.*?</skip>)'),
    'hand-crafted': re.compile(r'( {,3}\S+:.*(\n[ \t]*\S+:.*)*\n{2,})?(?P<excerpt>(\|(?=[ \n])[^\n]*\n)+)'),
    'heading': re.compile(r'(?m)\s*<(h[1-6])[^>]*>(?P<title>.+?)</\1>$(?P<html>[\s\S]*)'),
    'replacements': (
        # ... -> ellipsis
        (re.compile(r'(?<![.])[.]{3}(?![.])'), u'\u2026'),
        # [space][hyphen][hyphen][space] -> [thin space][em dash][thin space]
        (re.compile(r' -- '), u'\u2009\u2014\u2009'),
    ),
    'skip': re.compile(r'</?skip>'),
    'snippet': re.compile(r'(?s)^<(code|pre|skip)>.*?</\1>$'),
    'time': re.compile(_meta % 'time'),
    'update': re.compile(r'(?m)^((?:\|\|(?=[ \n])[^\n]*(?:\n|$))+)'),
    'update_pipes': re.compile(match % r'\|\|'),
    'update_time': re.compile(_update_meta % 'time'),
    'update_zone': re.compile(_update_meta % 'zone'),
    'video_link?': re.compile(r'(?m)^<(dt|p)><a href="(?P<href>[^"]+)"(?: title="(?P<title>[^"]*)")?>(?P<text>[^<]+)</a></\1>$'),
    'zone': re.compile(_meta % 'zone'),
}

md = markdown.Markdown(extensions=('meta',) + MARKDOWN_EXTENSIONS)
update_template = loader.get_template('update.dhtml')
youtube_template = loader.get_template('video/youtube.dhtml')

class Resource(object):
    def __init__(self, path):
        super(Resource, self).__init__()
        canon_fragments, short_fragments = [], []
        head, tail = os.path.split(path)
        while tail:
            match = re.match(RE['alias=canon'], tail)
            if match:
                if match.group('separator') == '=>':
                    logger.warning('"=>" separator is deprecated -- rename document, replacing "=>" with "="')
                canon = match.group('canon')
                if not canon_fragments and not os.path.isdir(path):
                    canon = os.path.splitext(canon)[0] # strip extension
                canon_fragments.insert(0, canon)
                short_fragments.insert(0, match.group('alias') or canon)
            head, tail = os.path.split(head)
        replacement = (mango.settings.UNIX_DOCUMENTS_PATH, u'', 1)
        canon_path = (u'/%s/' % '/'.join(canon_fragments)).replace(*replacement).lower()
        short_path = (u'/%s/' % '/'.join(short_fragments)).replace(*replacement).lower()
        self.urls = {
            'canon': {'rel': canon_path, 'abs': BASE_URL + canon_path},
            'short': {'rel': short_path, 'abs': SHORT_URL_BASE + short_path},
        }

        aliases = [u'']
        fragments = zip(canon_path.split(u'/')[1:], short_path.split(u'/')[1:])
        for canon_fragment, short_fragment in fragments:
            # append the canonical fragment to each alias
            updated = [u'/'.join([alias, canon_fragment]) for alias in aliases]
            if short_fragment != canon_fragment:
                # branch each alias and append the short fragment to the new branches
                updated += [u'/'.join([alias, short_fragment]) for alias in aliases]
            aliases = updated
        self._aliases = aliases

class Document(Resource):
    def __init__(self, filepath=None, parent=None):
        if parent is None and filepath:
            super(Document, self).__init__(path=filepath)
            with open(filepath) as f:
                self._contents = utf8(f.read())
        else:
            self._contents = None

        self._filepath = filepath
        self._parent = parent
        self._thread = None

        self.previous = None
        self.next = None

        if parent is None:
            self.convert()

    def apt(self, if_update, if_master=''):
        return if_update if self._parent else if_master

    def convert(self, contents=None):
        if contents is None:
            contents = self._contents
            if not contents:
                return self
        else:
            self._contents = contents

        contents = u'%s\n' % re.sub(RE['\r\n?'], '\n', contents)
        if self._parent:
            contents = re.sub(RE['update_pipes'], u'', contents)

        self.body = contents

        # excerpts
        snippets = []
        match = re.match(RE['hand-crafted'], contents)
        if match:
            capture = match.group('excerpt')
            snippets.append(re.sub(RE['excerpt_pipes'], u'', capture))
            contents = contents.replace(capture, u'')
        for match in re.finditer(RE['excerpt'], contents):
            capture = match.group(0)
            snippet = re.sub(RE['excerpt_pipes'], u'', capture)
            snippets.append(snippet)
            contents = contents.replace(capture, snippet)
        md.reset()
        self.excerpt = md.convert('\n\n'.join(snippets))

        # updates
        split = re.split(RE['update'], contents)
        md.reset()
        chunks = md.convert(u'\n<mango/>\n'.join(split[0::2])).split('<mango/>')
        self.meta = getattr(md, 'Meta', {})
        updates = [update_template.render(Context({'update': Document(parent=self).convert(item)})) for item in split[1::2]]
        combined = [None] * (len(chunks) + len(updates))
        combined[0::2] = chunks
        combined[1::2] = updates
        self.html = u''.join(combined)

        for key, value in self.meta.items():
            if len(value) != 1: # note: `value` is always a list
                self.meta[key] = value
            elif key in mango.settings.META_LISTS:
                self.meta[key] = [item.strip() for item in value[0].split(',')]
            else:
                self.meta[key] = value[0].strip()

        self.author = None
        author_name = self.meta.get('author')
        if author_name:
            self.author = {
                'name': author_name,
                'url': AUTHORS[author_name] if author_name in AUTHORS else None,
            }

        self.datetime = None
        if 'date' in self.meta and 'time' in self.meta:
            def update_post(pattern, repl):
                master = self.master()
                if master._filepath:
                    with open(master._filepath, 'w') as f:
                        contents = self._contents
                        if master._contents.find(contents) == -1: # update within an excerpt
                            contents = re.sub(RE['update_pipes'], r'| \1', contents)
                        contents = master._contents.replace(contents, re.sub(pattern, repl, contents), 1)
                        f.write(contents.encode('utf-8'))
                        master._contents = contents

            if 'zone' in self.meta:
                tz = pytz.timezone(canonicalize(self.meta['zone'], TIME_ZONES))
                if self.meta['zone'] != tz.zone:
                    pattern = RE[self.apt('update_zone', 'zone')]
                    update_post(pattern, r'\2zone:\3%s\n' % tz.zone)
            else:
                tz = pytz.timezone(settings.TIME_ZONE)
                pattern = RE[self.apt('update_time', 'time')]
                update_post(pattern, r'\1\2zone:\3%s\n' % tz.zone)

            dt_format = u'%s %s' % (MARKDOWN_DATE_FORMAT, MARKDOWN_TIME_FORMAT)
            try:
                self.datetime = tz.localize(datetime.datetime.strptime('%s %s' % (
                        self.meta['date'], self.meta['time']), dt_format)).astimezone(pytz.utc)
            except ValueError:
                logger.warning('Date and/or time incorrectly formatted')

        self.title = self.meta.get('title', u'')
        if not self.title:
            match = re.match(RE['heading'], self.html)
            if match:
                self.title = match.group('title')
                self.html = match.group('html')

        def video_markup(text):
            def sub(match):
                o = urlparse.urlparse(match.group('href'))
                if re.match(r'https?$', o.scheme) and re.match(r'(www[.])?youtube[.]com$', o.netloc):
                    query = dict(parse_qsl(o.query))
                    if 'v' in query:
                        video_id = query.pop('v')
                        return youtube_template.render(Context({
                            'url': u'%s://%s/embed/%s' % (o.scheme, o.netloc, video_id),
                            'query': query,
                            'title': match.group('title'),
                            'tag': match.group(1),
                        }))
                return match.group(0)
            return re.sub(RE['video_link?'], sub, text)

        self.html = video_markup(self.html)

        def filesize(filepath):
            if not os.path.isabs(filepath):
                filepath = os.path.join(PROJECT_PATH, filepath)
            try:
                filesize = os.path.getsize(filepath)
            except OSError:
                return u'' # fail silently

            bytes = (
                ('bytes', 1),
                ('kB', KILOBYTE_SIZE**1),
                ('MB', KILOBYTE_SIZE**2),
                ('GB', KILOBYTE_SIZE**3),
                ('TB', KILOBYTE_SIZE**4),
            )
            for unit, value in bytes:
                if filesize <= value * KILOBYTE_SIZE or unit == 'TB':
                    if unit == 'bytes':
                        return u'(%s\u2009bytes)' % filesize
                    else:
                        return u'(≈%.1f\u2009%s)' % (float(filesize)/value, unit)

        fragments = re.split(RE['fragment'], self.html.lstrip('\n'))
        self.html = u''
        for fragment in fragments:
            if not re.match(RE['snippet'], fragment):
                fragment = re.sub(RE['filesize'],
                        lambda match: u'<span class="filesize">%s</span>' % (
                        filesize(match.group('filepath'))), fragment)
                if REPLACEMENTS:
                    for pattern, repl in RE['replacements']:
                        fragment = re.sub(pattern, repl, fragment)
            self.html += fragment
        self.body = re.sub(RE['skip'], '', self.body)
        self.html = mark_safe(re.sub(RE['skip'], '', self.html))
        self.excerpt = mark_safe(self.excerpt) or self.html
        self.type = self.meta.get('type', 'post' if self.datetime else 'page')

        # attach comments thread
        if FORUM and hasattr(self, 'urls'):
            cache_key = 'mango:disqus:%s' % self.urls['canon']['abs']
            cached = cache.get(cache_key)
            if cached is not None:
                self._thread = cached
                logger.debug('Disqus thread for "%s" retrieved from cache' % self._thread.title)
            else:
                try:
                    # accommodate old threads (Mango-made threads must be accessed by identifier)
                    self._thread = (DISQUS.get_thread_by_url(FORUM, self.urls['canon']['abs'])
                                 or DISQUS.thread_by_identifier(FORUM, self.title, self.urls['canon']['rel'])['thread'])
                except disqus.APIError, error:
                    logger.warning('Disqus API error: %s' % error)
                else:
                    cache.set(cache_key, self._thread, 24*60*60)
                    logger.debug('Disqus thread for "%s" cached' % self._thread.title)

        return self

    def comments(self):
        if not self._thread:
            return []

        cache_key = 'mango:disqus:%s' % self._thread.id
        cached = cache.get(cache_key)
        if cached is not None:
            logger.debug('Disqus comments for "%s" retrieved from cache' % self._thread.title)
            return cached

        comments = []

        try:
            thread_posts = DISQUS.get_thread_posts(FORUM, self._thread, limit=9999, exclude='killed')
        except disqus.APIError, error:
            logger.warning('Disqus API error: %s' % error)
        else:
            for comment in thread_posts:
                if comment.has_been_moderated or not COMMENTS_REQUIRE_APPROVAL:
                    comment.html = _convert(comment.message)
                    comments.append(comment)
            comments.sort(key=lambda comment: comment.created_at)
            cache.set(cache_key, comments, 24*60*60)
            logger.debug('Disqus comments for "%s" cached' % self._thread.title)

        return comments

    def has_tag(self, tag):
        return tag in self.meta.get('tags', [])

    def master(self):
        return self._parent or self

    def __str__(self):
        return self.__unicode__().encode('utf-8')

    def __unicode__(self):
        return u''.join((getattr(self.master(), 'title', 'Untitled Document'), self.apt(' (update)')))

class Category(Resource):
    @classmethod
    def toplevel(cls):
        if INDEX_CACHE_SECONDS:
            cache_key = 'mango:toplevel:%s' % BASE_URL
            toplevel = cache.get(cache_key)
            if toplevel is not None:
                logger.debug('Document tree retrieved from cache')
            else:
                toplevel = Category(DOCUMENTS_PATH)
                cache.set(cache_key, toplevel, INDEX_CACHE_SECONDS)
                logger.debug('Document tree created and cached')
        else:
            toplevel = Category(DOCUMENTS_PATH)

        return toplevel

    def __init__(self, dirpath):
        super(Category, self).__init__(path=dirpath)
        self._dirpath = dirpath
        self.name = re.match(RE['alias=canon'],
                os.path.basename(dirpath)).group('canon')
        self.pages = []
        self.posts = []
        self.subcategories = []

        for name in [f for f in os.listdir(dirpath) if not f.startswith('.') and not re.search(RE['control_characters'], f)]:
            path = os.path.join(dirpath, name)
            if os.path.isdir(path):
                category = Category(path)
                self.subcategories.append(category)
            else:
                self.add_document(path)

        self.subcategories.sort(key=lambda category: category.name)
        self.pages.sort(key=lambda page: page.title)

    def add_document(self, filepath):
        if POST_CACHE_SECONDS:
            cache_key = 'mango:%s' % filepath
            document, mod_time = cache.get(cache_key, (None, None)) # retrieve Document
            if document is not None and mod_time == os.path.getmtime(filepath):
                logger.debug('Document object retrieved from cache: %s' % filepath)
            else: # modified or not in cache, so create and cache a new Document object
                document = Document(filepath)
                cache.set(cache_key, (document, os.path.getmtime(filepath)), POST_CACHE_SECONDS)
                logger.debug('Document object created and cached: %s' % filepath)
        else:
            document = Document(filepath)

        if document.type == 'page':
            self.pages.append(document)
        else:
            length = len(self.posts)
            for index, post in enumerate(self.posts):
                if document.datetime < post.datetime:
                    break
            else:
                index = length

            prev_index = index - 1
            next_index = index

            if 0 <= prev_index < length:
                self.posts[prev_index].next = document
                document.previous = self.posts[prev_index]

            if 0 <= next_index < length:
                self.posts[next_index].previous = document
                document.next = self.posts[next_index]

            self.posts.insert(index, document)

        return self

    def descendants(self, pages=False):
        documents = self.posts[::-1]
        if pages:
            documents += self.pages[:]
        for subcategory in self.subcategories:
            documents += subcategory.descendants(pages)
        return documents

    def archives(self):
        archives = []
        posts = self.descendants()
        if posts:
            posts.sort(key=lambda post: post.datetime, reverse=True)
            year = posts[0].datetime.year
            month = posts[0].datetime.month
            these_posts = []
            for post in posts:
                if post.datetime.year == year and post.datetime.month == month:
                    these_posts.append(post)
                else:
                    archives.append((year, month, these_posts))
                    year, month = post.datetime.year, post.datetime.month
                    these_posts = [post]
            archives.append((year, month, these_posts))
        return archives

    def find_match(self, urlpath):
        if urlpath in self._aliases:
            return self
        for document in self.pages + self.posts:
            if urlpath in document._aliases:
                return document
        for subcategory in self.subcategories:
            match = subcategory.find_match(urlpath)
            if match:
                return match
        return None

    def _tags(self):
        tags = []
        for document in self.pages + self.posts:
            tags += [tag for tag in document.meta.get('tags', [])]
        for subcategory in self.subcategories:
            tags += subcategory._tags()
        return tags

    def tags(self):
        tags = {}
        for tag in self._tags():
            tags[tag] = tags.get(tag, 0) + 1
        return [(key, value) for key, value in sorted(tags.items(),
                key=lambda pair: pair[0].lower())]

    def __str__(self):
        return self.name

    def __unicode__(self):
        return self.name