Commits

Ian Lewis committed 9e23e35

Used require_http_methods and removed the extra decorators module

  • Participants
  • Parent commits c34057a

Comments (0)

Files changed (4)

File lifestream/util.py

+#!/usr/bin/env python
+#:coding=utf-8:
+#:tabSize=2:indentSize=2:noTabs=true:
+#:folding=explicit:collapseFolds=1:
+
+import re
+
+from django.conf import settings
+from django.core.cache import cache
+
+from BeautifulSoup import BeautifulSoup, Comment
+
+class CacheStorage(object):
+    """
+    A class implementing python's dictionary API
+    for use as a storage backend for feedcache.
+    
+    Uses django's cache framework for the backend
+    of the cache.
+
+    TODO: Use a cache timeout setting for the cache
+    time and use the same value for feedcache.
+    """
+    def get(self, key, default=None):
+        return cache.get(self._get_key(key), default)
+
+    def __setitem__(self, key, value):
+        cache.set(self._get_key(key), value)
+
+    def __getitem__(self, key):
+        return cache.get(self._get_key(key))
+
+    def __delitem__(self, key):
+        return cache.delete(self._get_key(key))
+
+    def _get_key(self, key):
+        return "lifestream-cache-%s" % key
+
+def get_url_domain(url):
+    """
+    Get a domain from the feed url. This attempts to
+    get a clean url by ignoring know subdomains used for
+    serving feeds such as www, feeds, api etc.
+    """
+    protocol_index = url.find('://')+3 if url.find('://')!=-1 else 0
+    slash_index = url.find('/', protocol_index) if url.find('/', protocol_index)!=-1 else len(url)
+  
+    sub_url = url[protocol_index:slash_index]
+    parts = sub_url.split('.')
+  
+    if len(parts) > 2 and parts[0] in ('feeds','www','feedproxy','rss','gdata','api'):
+        return '.'.join(parts[1:])
+    else:
+        return sub_url
+
+# default settings
+# VALID_TAGS is a dictionary where the key is a tag name
+# and the value is a list of valid attributes.
+# If the attributes list is None then all attributes are allowed.
+# An empty list specifies that no attributes are allowed.
+VALID_TAGS = {
+    'b': (),
+    'blockquote': (),
+    'em': (),
+    'strong': (),
+    'strike': (),
+    'a': ('href', 'title', 'rel'),
+    'i': (),
+    'br': (),
+    'ul': (),
+    'ol': (),
+    'li': (),
+    'u': (),
+    'p': (),
+    'h1': (),
+    'h2': (),
+    'h3': (),
+    'h4': (),
+    'table': (),
+    'thead': (),
+    'tbody': (),
+    'tfoot': (),
+    'th': (),
+    'td': ('colspan',),
+    'tr': ('rowspan',),
+    'img': ('src', 'alt', 'title', 'width', 'height'),
+    'span': (),
+}
+
+# VALID_STYLES is a list of css style names that are
+# valid in style attributes.
+VALID_STYLES = (
+    "color",
+    "font-weight",
+)
+
+def sanitize_html(htmlSource, encoding=None, valid_tags=None, valid_styles=None):
+    """
+    Clean bad html content. Currently this simply strips tags that
+    are not in the VALID_TAGS setting.
+    
+    This function is used as a replacement for feedparser's _sanitizeHTML
+    and fixes problems like unclosed tags and gives finer grained control
+    over what attributes can appear in what tags.
+
+    Returns the sanitized html content.
+    """
+    if valid_tags is None:
+        valid_tags = getattr(settings, "LIFESTREAM_VALID_TAGS", VALID_TAGS)
+    if valid_styles is None:
+        valid_styles = getattr(settings, "LIFESTREAM_VALID_STYLES", VALID_STYLES)
+
+
+    js_regex = re.compile(r'[\s]*(&#x.{1,7})?'.join(list('javascript')))
+    css_regex = re.compile(r' *(%s): *([^;]*);?' % '|'.join(valid_styles), re.IGNORECASE)
+
+    # Sanitize html with BeautifulSoup
+    if encoding:
+        soup = BeautifulSoup(htmlSource, fromEncoding=encoding)
+    else:
+        soup = BeautifulSoup(htmlSource)
+    
+
+    def entities(text):
+        return text.replace('<','&lt;')\
+                   .replace('>', '&gt;')\
+                   .replace('"', '&quot;')\
+                   .replace("'", '&apos;')
+
+    # Remove html comments
+    for comment in soup.findAll(text=lambda text: isinstance(text, Comment)):
+        comment.extract()
+ 
+    for tag in soup.findAll(True):
+        if tag.name not in valid_tags:
+            tag.hidden = True
+        else:
+            tag.attrs = [(attr, js_regex.sub('', val)) for attr, val in tag.attrs
+                         if attr in valid_tags[tag.name]]
+    
+    # Strip disallowed css tags.
+    for tag in soup.findAll(attrs={"style":re.compile(".*")}):
+        style = ""
+        for key,val in css_regex.findall(tag["style"]):
+            style += "%s:%s;" % (key,val.strip())
+        tag["style"] = style
+
+    # Sanitize html text by changing bad text to entities.
+    # BeautifulSoup will do this for href and src attributes
+    # on anchors and image tags but not for text.
+    for text in soup.findAll(text=True):
+        text.replaceWith(entities(text))
+   
+    # Strip disallowed tags and attributes.
+    return soup.renderContents().decode('utf8') 

File lifestream/util/__init__.py

-#!/usr/bin/env python
-#:coding=utf-8:
-#:tabSize=2:indentSize=2:noTabs=true:
-#:folding=explicit:collapseFolds=1:
-
-import re
-
-from django.conf import settings
-from django.core.cache import cache
-
-from BeautifulSoup import BeautifulSoup, Comment
-
-class CacheStorage(object):
-    """
-    A class implementing python's dictionary API
-    for use as a storage backend for feedcache.
-    
-    Uses django's cache framework for the backend
-    of the cache.
-
-    TODO: Use a cache timeout setting for the cache
-    time and use the same value for feedcache.
-    """
-    def get(self, key, default=None):
-        return cache.get(self._get_key(key), default)
-
-    def __setitem__(self, key, value):
-        cache.set(self._get_key(key), value)
-
-    def __getitem__(self, key):
-        return cache.get(self._get_key(key))
-
-    def __delitem__(self, key):
-        return cache.delete(self._get_key(key))
-
-    def _get_key(self, key):
-        return "lifestream-cache-%s" % key
-
-def get_url_domain(url):
-    """
-    Get a domain from the feed url. This attempts to
-    get a clean url by ignoring know subdomains used for
-    serving feeds such as www, feeds, api etc.
-    """
-    protocol_index = url.find('://')+3 if url.find('://')!=-1 else 0
-    slash_index = url.find('/', protocol_index) if url.find('/', protocol_index)!=-1 else len(url)
-  
-    sub_url = url[protocol_index:slash_index]
-    parts = sub_url.split('.')
-  
-    if len(parts) > 2 and parts[0] in ('feeds','www','feedproxy','rss','gdata','api'):
-        return '.'.join(parts[1:])
-    else:
-        return sub_url
-
-# default settings
-# VALID_TAGS is a dictionary where the key is a tag name
-# and the value is a list of valid attributes.
-# If the attributes list is None then all attributes are allowed.
-# An empty list specifies that no attributes are allowed.
-VALID_TAGS = {
-    'b': (),
-    'blockquote': (),
-    'em': (),
-    'strong': (),
-    'strike': (),
-    'a': ('href', 'title', 'rel'),
-    'i': (),
-    'br': (),
-    'ul': (),
-    'ol': (),
-    'li': (),
-    'u': (),
-    'p': (),
-    'h1': (),
-    'h2': (),
-    'h3': (),
-    'h4': (),
-    'table': (),
-    'thead': (),
-    'tbody': (),
-    'tfoot': (),
-    'th': (),
-    'td': ('colspan',),
-    'tr': ('rowspan',),
-    'img': ('src', 'alt', 'title', 'width', 'height'),
-    'span': (),
-}
-
-# VALID_STYLES is a list of css style names that are
-# valid in style attributes.
-VALID_STYLES = (
-    "color",
-    "font-weight",
-)
-
-def sanitize_html(htmlSource, encoding=None, valid_tags=None, valid_styles=None):
-    """
-    Clean bad html content. Currently this simply strips tags that
-    are not in the VALID_TAGS setting.
-    
-    This function is used as a replacement for feedparser's _sanitizeHTML
-    and fixes problems like unclosed tags and gives finer grained control
-    over what attributes can appear in what tags.
-
-    Returns the sanitized html content.
-    """
-    if valid_tags is None:
-        valid_tags = getattr(settings, "LIFESTREAM_VALID_TAGS", VALID_TAGS)
-    if valid_styles is None:
-        valid_styles = getattr(settings, "LIFESTREAM_VALID_STYLES", VALID_STYLES)
-
-
-    js_regex = re.compile(r'[\s]*(&#x.{1,7})?'.join(list('javascript')))
-    css_regex = re.compile(r' *(%s): *([^;]*);?' % '|'.join(valid_styles), re.IGNORECASE)
-
-    # Sanitize html with BeautifulSoup
-    if encoding:
-        soup = BeautifulSoup(htmlSource, fromEncoding=encoding)
-    else:
-        soup = BeautifulSoup(htmlSource)
-    
-
-    def entities(text):
-        return text.replace('<','&lt;')\
-                   .replace('>', '&gt;')\
-                   .replace('"', '&quot;')\
-                   .replace("'", '&apos;')
-
-    # Remove html comments
-    for comment in soup.findAll(text=lambda text: isinstance(text, Comment)):
-        comment.extract()
- 
-    for tag in soup.findAll(True):
-        if tag.name not in valid_tags:
-            tag.hidden = True
-        else:
-            tag.attrs = [(attr, js_regex.sub('', val)) for attr, val in tag.attrs
-                         if attr in valid_tags[tag.name]]
-    
-    # Strip disallowed css tags.
-    for tag in soup.findAll(attrs={"style":re.compile(".*")}):
-        style = ""
-        for key,val in css_regex.findall(tag["style"]):
-            style += "%s:%s;" % (key,val.strip())
-        tag["style"] = style
-
-    # Sanitize html text by changing bad text to entities.
-    # BeautifulSoup will do this for href and src attributes
-    # on anchors and image tags but not for text.
-    for text in soup.findAll(text=True):
-        text.replaceWith(entities(text))
-   
-    # Strip disallowed tags and attributes.
-    return soup.renderContents().decode('utf8') 

File lifestream/util/decorators.py

-#!/usr/bin/env python
-#:coding=utf-8:
-#:tabSize=2:indentSize=2:noTabs=true:
-#:folding=explicit:collapseFolds=1:
-
-from django.http import HttpResponseNotAllowed
-
-def allow_methods(*method_list):
-    """
-    Checks the request method is in one of the given methods
-    """
-    def _func(func):
-        def __func(request, *argv, **kwargv):
-            methods = method_list
-            if "GET" in methods:
-                methods += ("HEAD",)
-            if request.method in methods:
-                return func(request, *argv, **kwargv)
-            return HttpResponseNotAllowed(methods)
-        return __func
-    return _func

File lifestream/views.py

 from django.views.generic.list_detail import object_list,object_detail
 from django.conf import settings
 from django.shortcuts import get_object_or_404
-
-from lifestream.util.decorators import allow_methods
+from django.views.decorators.http import require_http_methods
 
 from lifestream.models import *
 
 DEFAULT_PAGINATION = getattr(settings, 'LIFESTREAM_DEFAULT_PAGINATION', 20)
 
-@allow_methods('GET')
+@require_http_methods(['GET', 'HEAD'])
 def main_page(request, lifestream_slug):
     lifestream = get_object_or_404(Lifestream, slug=lifestream_slug)
     return object_list(request, 
         },
     )
 
-@allow_methods('GET', 'POST')
+@require_http_methods(['GET', 'HEAD', 'POST'])
 def item_page(request, lifestream_slug, item_id):
     lifestream = get_object_or_404(Lifestream, slug=lifestream_slug)
     return object_detail(
         },
     )
 
-@allow_methods('GET')
+@require_http_methods(['GET', 'HEAD'])
 def domain_page(request, lifestream_slug, domain):
     lifestream = get_object_or_404(Lifestream, slug=lifestream_slug)
     return object_list(