Commits

Alexander Solovyov committed 278f751

remove trailing whitespaces from code

Comments (0)

Files changed (27)

apps/accounts/admin.py

 def get_form(self, request, obj=None, *args, **kwargs):
 
     form_cls = super(UserAdmin, self).get_form(request, obj, *args, **kwargs)
-    
-    if obj is not None: # additional interface feature for users and commentnodes 
+
+    if obj is not None: # additional interface feature for users and commentnodes
         comment_list_url = urlreverse('admin', args=['discussion/commentnode'])
         comment_list_url += '/?q=%s' % obj.username
         form_cls.base_fields['email_new'].help_text += u'<a href="%s">%s</a>' % (comment_list_url, _('Users comments'))

apps/accounts/backends.py

 
     def activate_action(self, activation_key):
         user = ActionRecord.registrations.activate_user(activation_key.lower())
-        if user: 
+        if user:
             return user
         else:
             return None

apps/accounts/views.py

     else:
         form = MergeForm(request)
         return {'form':form}
-    
+

apps/blogroll/management/commands/migrate_blogroll.py

     style.ALREADY = termcolors.make_style(fg='yellow')
     style.OK = termcolors.make_style(fg='green', opts=('bold'),)
     style.REGULAR = termcolors.make_style()
-    
+
     return style
 
 def out(style, msg, newline=False):
 
 class Command(base.NoArgsCommand):
     help = "Copy blogroll data to friends"
-    
+
     def handle_noargs(self, **options):
         from blogroll.models import Link
         from friends.models import FriendBlog
             'link': 'url',
             'rel_friendship': 'friendship_rel',
             'rel_professional': 'professional_rel',
-            'rel_geographical': 'geographical_rel',  
+            'rel_geographical': 'geographical_rel',
             'rel_family': 'family_rel',
             'rel_romantic': 'romantic_rel',
             'rel_identity': 'identity_rel',

apps/captcha/__init__.py

     bgcolor = 0x000000
     font = ImageFont.truetype(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/Vera.ttf'), 25)
     dim = font.getsize(solution)
-    im = Image.new('RGB', (dim[0] + 20, dim[1] + 10), bgcolor) 
+    im = Image.new('RGB', (dim[0] + 20, dim[1] + 10), bgcolor)
     d = ImageDraw.Draw(im)
     x, y = im.size
     #r = random.randint

apps/captcha/fields.py

                    )
         #self._test_id = None
         super( CaptchaWidget, self ).__init__( widgets, attrs )
-    
+
     def format_output(self, widgets):
         return u'<span class="captcha">%s%s%s</span>' % (widgets[0], widgets[1], widgets[2])
-    
+
     def decompress(self, value):
         # id - 0
         # image - 1
             captcha_id,
             reverse('captcha_image', args=[captcha_id]),
             '')
-    
+
 class CaptchaField(forms.Field):
     widget = CaptchaWidget
 

apps/discussion/admin.py

     def get_form(self, request, obj=None, **kwargs):
         form_cls = super(CommentNodeAdmin, self).get_form(request, obj, **kwargs)
 
-        if obj is not None: # additional interface features in admin 
+        if obj is not None: # additional interface features in admin
             if obj.object_id and obj.content_type: # link to parent object
                 object_url = urlreverse('admin', args=['%s/%s/%d' % (obj.content_type.app_label, obj.content_type.model, obj.object_id)])
                 form_cls.base_fields['object_id'].help_text += u'| <a href="%s">%s</a> |' % (object_url, _('Parent object'))
-            
+
             if obj.reply_to_id: # link to reply_to comment node
                 comment_url = urlreverse('admin', args=['discussion/CommentNode/%d/' % obj.reply_to_id])
                 form_cls.base_fields['object_id'].help_text += u'| <a href="%s">%s</a> |' % (comment_url, _('Parent comment'))

apps/friends/admin.py

     exclude = ('etag', 'bad_dates', 'bad_tags', 'target', 'owner')
     fieldsets = (
         (None, {'fields': ('link', 'name', 'slug', 'feed')}),
-        (_('Extra'), {'classes': ('collapse',), 
+        (_('Extra'), {'classes': ('collapse',),
                       'fields': ('active', 'weight',)}),
         (_('Multiuser/multisite support'), {'classes': ('collapse',),
                                             'fields': ('author', 'site',)}),

apps/friends/managers.py

         # just cut after symbols_limit till las dot
         postdata['spoiler'] = '.'.join(postdata['body'][:symbols_limit].split('.')[:-1])
         postdata['is_full_entry'] = False
-    
+
     # feed already with spoliers ;)
     if postdata['is_full_entry'] and \
        ( postdata['spoiler'].endswith(u'[...]') or

apps/friends/models.py

     rel_family = models.CharField(_('Family relation'), max_length=20, choices=rels.FAMILY_REL, blank=True)
     rel_romantic = models.CharField(_('Romantic relation'), max_length=20, choices=rels.ROMANTIC_REL, blank=True)
     rel_identity = models.CharField(_('Identity relation'), max_length=20, choices=rels.IDENTITY_REL, blank=True)
-    
+
     objects = FriendBlogManager()
-    
+
     def __unicode__(self):
         return u"Friend: %s" % self.name
-    
+
     def save(self, **kwargs):
         self.target = self.__class__._meta.app_label
         super(FriendBlog, self).save(**kwargs)
-    
+
     @property
     def relations(self):
         rels = ['rel_friendship', 'rel_physical', 'rel_professional',
                 'rel_geographical', 'rel_family', 'rel_romantic', 'rel_identity']
         return ' '.join(getattr(self, x) for x in rels if getattr(self, x, None))
-    
+
     @property
     def _post_class(self):
         """
     spoiler = models.TextField()
     is_full_entry = models.BooleanField()
     objects = FriendPostManager()
-    
+
     def __unicode__(self):
         return u"Post %s from %s" % (self.title, self.friend)

apps/friends/urls.py

 urlpatterns = patterns('',
     url(r'^opml/$', views.friends_opml, name='friends_opml'),
     url(r'^fetch/$', views.friends_fetch_feeds, name='friends_fetch_feeds'),
-    url(r'^$', views.friends_index, name='friends_index'),   
+    url(r'^$', views.friends_index, name='friends_index'),
     url('^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/$', views.friends_archive_day, name='friends_day_archive'),
     url('^(?P<year>\d{4})/(?P<month>\d{2})/$', views.friends_archive_month, name='friends_month_archive'),
     url('^(?P<year>\d{4})/$', views.friends_archive_year, name='friends_year_archive'),

apps/friends/views.py

     required body key with list of dict-like outlines.
     For outline required keys are: type, text, xmlUrl.
     Optional one are: description, htmlUrl, language.
-    
+
     Example:
         opmldata = {
             ownerId = 'http://pyobject.ru/about/',
                      description='204 No Content Blog'),
             ]
         }
-    
+
     Set arg flattened to False if you want to get ETree
     istead of xml string.
     """
         year=year,
         date_field='posted',
         queryset=FriendPost.objects.active(),
-        template_name='friends/archive_year.html'        
+        template_name='friends/archive_year.html'
     )
 
 def friends_archive_month(request, year, month):
     month:
       (date) this month
     next_month:
-      (date) the first day of the next month, 
+      (date) the first day of the next month,
       or None if the next month is in the future
     previous_month:
       (date) the first day of the previous month
         month_format='%m',
         date_field='posted',
         queryset=FriendPost.objects.active(),
-        template_name='friends/index.html'        
+        template_name='friends/index.html'
     )
 
 def friends_archive_day(request, year, month, day):
         day=day,
         date_field='posted',
         queryset=FriendPost.objects.active(),
-        template_name='friends/index.html'        
+        template_name='friends/index.html'
     )
 
 def friends_fetch_feeds(request):
         return HttpResponseForbidden("Only superuser can fetch friends feeds")
     FriendBlog.objects.fetch_feeds()
     return HttpResponseRedirect(reverse('friends_index'))
-        
+

apps/life/adapters/__init__.py

 def identify_flow(flow_link):
     """
     Identify given flow by it's link
-    
+
     Always return one of ADAPTERS
     """
     found = None

apps/life/adapters/bitbucket.py

 def _get_user_feed_for(feed_url):
     """
     Return user this activity feed for
-    
+
     >>> _get_user_feed_for('http://bitbucket.org/j2a/atom/feed/')
     'j2a'
     """

apps/life/adapters/juick.py

 def _get_micropost_number(micropost_url):
     """
     Return micropost number by it's url
-    
+
     >>> _get_micropost_number('http://juick.com/j2a/110618')
     '#110618'
-    
+
     >>> _get_micropost_number('http://juick.com/110618')
     '#110618'
     """

apps/life/adapters/slideshare.py

     """
     Remove ambigous info (from <user> and tags)
     from SlideShare's item text
-    
+
     >>> _clean_body('<div class="snap_preview"><img src="http://cdn.slidesharecdn.com/sqlalchemy-seminar-090427104510-phpapp01-thumbnail-2?1240847701" alt ="" style="border:1px solid #C3E6D8;float:right;" /> <p>from: <a href="http://www.slideshare.net/j2a">j2a</a> 2 weeks ago</p><p>Seminar topic about SQLAlchemy -- Pythonic ORM and SQL toolkit.</p><p>Tags: <a style="text-decoration:underline;" href="http://slideshare.net/tag/python">python</a> <a style="text-decoration:underline;" href="http://slideshare.net/tag/sqlalchemy">sqlalchemy</a> </p></div>')
     '<p>Seminar topic about SQLAlchemy -- Pythonic ORM and SQL toolkit.</p>'
     """

apps/life/adapters/twitter.py

 def _make_links(tweet):
     """
     Got tweet text and make links to @users and #keywords
-    
+
     >>> _make_links('@yurevich looks good')
     '<a href="http://twitter.com/yurevich">@yurevich</a> looks good'
-    
+
     >>> _make_links('example for @yurevich')
     'example for <a href="http://twitter.com/yurevich">@yurevich</a>'
-    
+
     >>> _make_links('examples for @yurevich and @ingspree.')
     'examples for <a href="http://twitter.com/yurevich">@yurevich</a> and <a href="http://twitter.com/ingspree">@ingspree</a>.'
-    
+
     >>> _make_links('let\\'s got to #rupyru')
     'let\\'s got to <a href="http://twitter.com/#search?q=%23rupyru">#rupyru</a>'
 
     >>> _make_links('let\\'s got to #rupyru')
     'let\\'s got to <a href="http://twitter.com/#search?q=%23rupyru">#rupyru</a>'
-    
+
     >>> _make_links('@ingspree, let\\'s got to #rupyru')
     '<a href="http://twitter.com/ingspree">@ingspree</a>, let\\'s got to <a href="http://twitter.com/#search?q=%23rupyru">#rupyru</a>'
-    
+
     """
     for pattern, repl in (USER_SUB, KEYWORD_SUB):
         tweet = re.sub(pattern, repl, tweet)
 def _clean_name(tweet):
     """
     Remove amibgous user name from feed
-    
+
     >>> _clean_name('yurevich: jibjib works!')
     u'jibjib works!'
     """
     return tweet.split(u': ', 1)[1]
-    
+
 def _get_tweet_number(tweet_url):
     """
     Return tweet number by it's url
-    
+
     >>> _get_tweet_number('http://twitter.com/yurevich/statuses/1808292157')
     '#1808292157'
-    
+
     >>> _get_tweet_number('http://twitter.com/ingspree/status/1808006316')
     '#1808006316'
     """

apps/middleware/profile.py

             sys.stdout = old_stdout
             response.content = '<pre>%s</pre>' % out.getvalue()
         return response
-    
+

apps/nebula/models.py

                 feed_location = feedfinder.feed(self.link)
             else:
                 logging.error('Neither link nor feed location is not available')
-            
+
             if not self.feed and feed_location:
                 logging.info('Updating blog feed url from feed -- %s' % feed_location)
                 self.feed = feed_location

apps/nebula/time_utilities.py

     def _isdst(self, dt):
         # We can't use mktime here. It is unstable when deciding if
         # the hour near to a change is DST or not.
-        # 
+        #
         # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
         #                         dt.minute, dt.second, dt.weekday(), 0, -1))
         # return time.localtime(timestamp).tm_isdst
     else:
         next_month = the_date.month + 1
         next_year = the_date.year
-        
+
     if the_date.month <= 1:
         prev_month = 12
         prev_year = the_date.year - 1
     else:
         prev_month = the_date.month - 1
         prev_year = the_date.year
-    
+
     if the_date.month == now.month and the_date.year == now.year:
         next_month_dt = now
     else:
     prev_month_dt = date(prev_year, prev_month, 1)
 
     return next_month_dt, prev_month_dt
-    
+
 def days_in_month(year,month):
     year = str(year)
     if isinstance(month, unicode):
     else:
         month += 1
     return (date(year, month+1, 1) - timedelta(days=1)).day
-    
+
 def get_now():
     now = date.today()
     current_month = now.month
     current_year = now.year
     current_day = now.day
     return now, current_year, current_month, current_day
-    
+
 def convert_short_month_to_int(month):
     if isinstance(month, unicode):
        return int(date(*time.strptime(month, '%b')[:3]).month)

apps/nebula/utilities.py

     body = headings_start.sub('<p class="heading">', body)
     body = headings_end.sub('</p>', body)
     body = comments.sub('', body)
-    
-    # Remove junky feedburner links:  
+
+    # Remove junky feedburner links:
     # Note, we don't remove all links that reference feedburner,
     #  only those which contain image elements that reference
     #  feedburner.
-    
+
     # You cannot simply remove all links that point to feedburner
     #  because some publishers use a feature that rewrites all links
     #  in the content to proxy through FB for tracking purposes.
         for i in images:
             # Remove the parent link (and by association, the image)
             i.parent.extract()
-        body = unicode(soup) # Using unicode to be nice, I guess. str() 
+        body = unicode(soup) # Using unicode to be nice, I guess. str()
                              #  might work just as well.
     return body.strip()
-    
+
 def clean_title(title):
     if title:
         bracketed_text = re.compile(r'\[(.*?)\]')
     for entry in d.entries:
         created = False
         active = True
-        
+
         guid = entry.get('guid', entry.get('link'))
-        
+
         if not guid:
             logging.warning('Entry %r from feed have %s no guid' % (entry.title, blog.feed))
             continue
-        
+
         try:
             existing_post = FeedPostClass.objects.get(guid__iexact=guid)
             continue
         except FeedPostClass.DoesNotExist:
             logging.debug('Post %r from feed %s does not already exist in DB' % (guid, blog.feed))
             pass
-        
+
         date_posted = entry.get('modified_parsed', None)
         if date_posted:
             date_posted = time_to_datetime(date_posted)
         if body != '':
             body = clean_body(body)
         if title == body:
-            body = '' 
+            body = ''
         if title != '':
             title = clean_title(title)
         link = entry.get('feedburner_origlink', entry.get('link', None))
         #title = title.encode('ascii', 'xmlcharrefreplace')
         #if body:
         #    body = body.encode('ascii', 'xmlcharrefreplace')
-        #author = None 
+        #author = None
         author = entry.get('author_detail')
         if not author:
             author = entry.get('author', '')
         #    author = author.encode('ascii', 'xmlcharrefreplace')
         #else:
         #    author = ''
-        
+
         # Process tags if they exist
         tags = entry.get('tags', '')
         if tags != '':
             num_with_tags += 1
             tags = ' '.join([tag.term.lower() for tag in tags])
             logging.debug('Found tags for entry %r from feed %s: %s' % (guid, blog.feed, tags,))
-        
+
         # shorten url if length bigger than 255
         if len(link) >= 255:
             link = shorten_url(link)
-        
+
         # calls callback filter for entry
         defaults = {
             'blog'  : blog,

apps/nebula/views.py

         paginate_by = 20,
         page = page,
     )
-  
+
 def blog_list(request):
     """
     Blog list
         queryset = AggregatedBlog.objects.filter(active=True),
         template_name = 'nebula/aggregatedblog_list.html',
     )
-  
+
 def blog_detail(request, slug):
     """
     Blog detail

apps/openidconsumer/middleware.py

 class OpenIDMiddleware(object):
     """
-    Populate request.openid and request.openids with their openid. This comes 
-    eithen from their cookie or from their session, depending on the presence 
+    Populate request.openid and request.openids with their openid. This comes
+    eithen from their cookie or from their session, depending on the presence
     of OPENID_USE_SESSIONS.
     """
     def process_request(self, request):

apps/robots/models.py

 
 class Url(models.Model):
     """
-    Defines a URL pattern for use with a robot exclusion rule. It's 
+    Defines a URL pattern for use with a robot exclusion rule. It's
     case-sensitive and exact, e.g., "/admin" and "/admin/" are different URLs.
     """
     pattern = models.CharField(_('pattern'), max_length=255, help_text=_(
                                "Case-sensitive. A missing trailing slash does al"
                                "so match to files which start with the name of "
                                "the pattern, e.g., '/admin' matches /admin.html "
-                               "too. Some major search engines allow an asterisk"  
+                               "too. Some major search engines allow an asterisk"
                                " (*) as a wildcard and a dollar sign ($) to "
                                "match the end of the URL, e.g., '/*.jpg$'."))
     class Meta:
 class Rule(models.Model):
     """
     Defines an abstract rule which is used to respond to crawling web robots,
-    using the robot exclusion standard, a.k.a. robots.txt. It allows or 
+    using the robot exclusion standard, a.k.a. robots.txt. It allows or
     disallows the robot identified by its user agent to access the given URLs.
     The Site contrib app is used to enable multiple robots.txt per instance.
     """

apps/robots/views.py

 USE_SITEMAP = getattr(settings, 'ROBOTS_USE_SITEMAP', True)
 SITEMAP_URL = getattr(settings,'ROBOTS_SITEMAP_URL', None)
 
-def rules_list(request, template_name='robots/rule_list.html', 
+def rules_list(request, template_name='robots/rule_list.html',
         mimetype='text/plain', status_code=200):
     """
     Returns a generated robots.txt file with correct mimetype (text/plain),

apps/sape/util.py

 
         random.shuffle(self.server_list)
 
-    
+
     def fetch_remote_file(self, host, path):
         old_timeout = socket.getdefaulttimeout()
         socket.setdefaulttimeout(self.socket_timeout)
 
 
 class SapeClient(SapeBase):
-    
+
     def __init__(self, *args, **kwargs):
         self.links_delimiter = ''
         self.links = []
         self.links_page = []
-        
+
         super(SapeClient, self).__init__(self, *args, **kwargs)
         self.set_data(self.load_data())
 
             self.links_page = self.links_page[number:]
 
             links = map(self.decode, links)
-            if join: 
+            if join:
                 html = self.links_delimiter.join(links)
                 if self.is_our_bot:
                     html = '<sape_noindex>%s</sape_noindex>' % html
-                return html 
+                return html
             else:
                 return links
         else:

apps/tagging_autocomplete/widgets.py

 
 class TagAutocomplete(Input):
     input_type = 'text'
-    
+
     def render(self, name, value, attrs=None):
         json_view = reverse('tagging_autocomplete-list')
         html = super(TagAutocomplete, self).render(name, value, attrs)
         js = u'<script type="text/javascript">jQuery().ready(function() { jQuery("#%s").autocomplete("%s", { multiple: true }); });</script>' % (attrs['id'], json_view)
         return mark_safe("\n".join([html, js]))
-    
+
     class Media:
         js_base_url = getattr(settings, 'TAGGING_AUTOCOMPLETE_JS_BASE_URL','%s/jquery-autocomplete' % settings.MEDIA_URL)
         css = {