Commits

Olemis Lang committed b016ee5 Merge

TracGViz : Merge 67f90e04d769620a50e1a36f incorporating XSV typecasts and dutest-trac dependency

  • Participants
  • Parent commits 67f90e0, 620a50e

Comments (0)

Files changed (27)

 .\#*
 # Temporary files used by the vim editor.
 .*.swp
+.*.swo
 # A hidden file created by the Mac OS X Finder.
 .DS_Store
 # Compiled Python scripts

File trac-dev/gviz/TODO

 Outstanding tasks
 -----------------
 
+X Migrate tracgviz.testing to a new package dutest-trac
+
+X Remove relative import statements
+
+X Error message for GViz content providers when guessed mimetype is still `None`
+
+X Move `dutest` to `tests_require` rather than `install_requires`.
+
+X Naive typecasts in CSV to GViz MIME converter.
+
 ~ Write tests for data sources included in TracGViz 1.3.4 that are 
   not about version control.
 
 ~ Upgrade GViz providers for Trac=1.0 and Apache(TM) Bloodhound.
 
+- Test error message for GViz content providers when guessed mimetype is 
+  still `None`
+
+- Typecasts in CSV to GViz MIME converter powered by PyICU.
+
+- Test typecasts in CSV to GViz MIME converter.
+
 - Use FSL to select files in GViz providers ?
 
 - Fix error using Python 2.6 `hmac` hashing in Ubuntu 10.04.

File trac-dev/gviz/setup.py

                """
 
 setup(
-  name=DIST_NM,
-  version=latest,
-  description=DESC.split('\n', 1)[0],
-  author='Olemis Lang',
-  author_email='olemis@gmail.com',
-  maintainer='Olemis Lang',
-  maintainer_email='olemis@gmail.com',
-  url='https://dataviz.blood-hound.net/wiki/Packages/%s' % (DIST_NM,),
-  download_url='http://pypi.python.org/packages/2.5/%s/%s/%s-%s-py2.5.egg' % \
-                                (DIST_NM[0], DIST_NM, DIST_NM, latest,),
-  requires = ['trac', 'tracrpc', 'gviz_api', 'pygments', 'dutest'],
-  tests_require = ['dutest>=0.2.4'],
-  install_requires = [
-      'setuptools>=0.6b1',
-      'Trac>=0.11',
-      'TracXMLRPC',
-      'Pygments>=1.0',
-      'dutest>=0.2.4',
-      'PyICU',
-  ],
-  package_dir = dict([p, i[0]] for p, i in PKG_INFO.iteritems()),
-  packages = PKG_INFO.keys(),
-  package_data = dict([p, i[1]] for p, i in PKG_INFO.iteritems()),
-  include_package_data=True,
-  provides = ['%s (%s)' % (p, latest) for p in PKG_INFO.keys()],
-  obsoletes = ['%s (>=%s.0.0, <%s)' % (p, versions[-1][0], latest) \
-                for p in PKG_INFO.keys()],
-  entry_points = ENTRY_POINTS,
-  classifiers = cats,
-  long_description= DESC,
-  cmd_class = dist_cmds,
-  )
+    name=DIST_NM,
+    version=latest,
+    description=DESC.split('\n', 1)[0],
+    author='Olemis Lang',
+    author_email='olemis@gmail.com',
+    maintainer='Olemis Lang',
+    maintainer_email='olemis+trac@gmail.com',
+    url='https://dataviz.blood-hound.net/wiki/Packages/%s' % (DIST_NM,),
+    download_url='http://pypi.python.org/packages/2.6/%s/%s/%s-%s-py2.6.egg' % \
+                                  (DIST_NM[0], DIST_NM, DIST_NM, latest,),
+    requires = ['trac', 'tracrpc', 'gviz_api', 'pygments', 'dutest'],
+    install_requires = ['setuptools>=0.6b1',
+                        'Trac>=0.11',
+                        'Pygments>=1.0',
+                        'TracXMLRPC',
+                        'PyICU',],
+    tests_require = ['dutest>=0.2.4',
+                     'dutest-trac',],
+    package_dir = dict([p, i[0]] for p, i in PKG_INFO.iteritems()),
+    packages = PKG_INFO.keys(),
+    package_data = dict([p, i[1]] for p, i in PKG_INFO.iteritems()),
+    include_package_data=True,
+    provides = ['%s (%s)' % (p, latest) for p in PKG_INFO.keys()],
+    obsoletes = ['%s (>=%s.0.0, <%s)' % (p, versions[-1][0], latest) \
+                  for p in PKG_INFO.keys()],
+    entry_points = ENTRY_POINTS,
+    classifiers = cats,
+    long_description= DESC,
+    cmd_class = dist_cmds,
+    )
+

File trac-dev/gviz/tracgviz/__init__.py

             'GVizCSVEncoder', 'GVizHtmlEncoder'
 
 try:
-    from api import TracGVizSystem
-    from rpc import *
-    from stdhash import *
-    from proto import GViz_0_5
-    from stdfmt import GVizJsonEncoder, GVizHtmlEncoder, GVizCSVEncoder
-    from extfmt import *
-    from ticket import *
-    from wiki import *
-    from search import *
-    from timeline import *
-    from vcs import *
-    from xsv import GvizXSVConverter
-    from attachment import *
+    from tracgviz.api import TracGVizSystem
+    from tracgviz.rpc import *
+    from tracgviz.stdhash import *
+    from tracgviz.proto import GViz_0_5
+    from tracgviz.stdfmt import GVizJsonEncoder, GVizHtmlEncoder, GVizCSVEncoder
+    from tracgviz.extfmt import *
+    from tracgviz.ticket import *
+    from tracgviz.wiki import *
+    from tracgviz.search import *
+    from tracgviz.timeline import *
+    from tracgviz.vcs import *
+    from tracgviz.xsv import GvizXSVConverter
+    from tracgviz.attachment import *
     from ig import *
     msg = 'Ok'
 except Exception, exc:

File trac-dev/gviz/tracgviz/aggregate.py

File contents unchanged.

File trac-dev/gviz/tracgviz/api.py

 try:
     import gviz_api
 except ImportError:
-    import _gviz_api as gviz_api
+    import tracgviz._gviz_api as gviz_api
 import sys
 from xmlrpclib import DateTime
 from datetime import datetime
 
 class GVizException(Exception):
-  r"""Base class for all exception types defined in this package"""
+    r"""Base class for all exception types defined in this package"""
 
 class GVizProviderNotFoundError(GVizException):
     r"""Exception raised to denote that there is no GViz provider able
     
     # Public API
     def get_formatted_data(self, req, path, **params):
-      r"""Simulate a request to the data source in the same context 
-      defined by another request.
-      
-      @param req          the "real" request.
-      @param path         a string containing all the information 
-                          included in the URI after `/gviz` prefix
-                          (e.g. `/ticket/milestone?filter_by=goal[12]`).
-      @param params       further custom arguments. They supersede 
-                          those present in `path` and should be 
-                          supported by the target data provider.
-      @return             the same data that'd be returned on 
-                          accessing the data provider at the requested 
-                          `path`.
-      @throws TracError   if an error is raised or a problem is detacted.
-      """
-      from util import RedirectIntercept, IoIntercept, StringIO
+        r"""Simulate a request to the data source in the same context 
+        defined by another request.
+        
+        @param req          the "real" request.
+        @param path         a string containing all the information 
+                            included in the URI after `/gviz` prefix
+                            (e.g. `/ticket/milestone?filter_by=goal[12]`).
+        @param params       further custom arguments. They supersede 
+                            those present in `path` and should be 
+                            supported by the target data provider.
+        @return             the same data that'd be returned on 
+                            accessing the data provider at the requested 
+                            `path`.
+        @throws TracError   if an error is raised or a problem is detacted.
+        """
+        from tracgviz.util import RedirectIntercept, IoIntercept, StringIO
 
-      
-      path = '/gviz' + path
-      self.log.debug("IG: Accessing data source `%s` directly", path)
-      strm = StringIO()
-      try:
-        reqi = IoIntercept(req, strm)
-        reqi = RedirectIntercept(reqi, self.env, **params)
+        path = '/gviz' + path
+        self.log.debug("IG: Accessing data source `%s` directly", path)
+        strm = StringIO()
         try:
-          reqi.redirect(path)
-        except RequestDone:
-          pass
-        val = strm.getvalue()
-        self.log.debug("IG: Returning data: %s", val)
-        return val
-      except TracError:
-        self.log.exception("IG: Error accessing %s directly", path)
-        raise
-      except Exception, exc:
-        self.log.exception("IG: Error accessing %s directly", path)
-        raise TracError("Error %s : %s" % (exc.__class__.__name__, \
-                            exc.message))
-      finally:
-        strm.close()
+            reqi = IoIntercept(req, strm)
+            reqi = RedirectIntercept(reqi, self.env, **params)
+            try:
+                reqi.redirect(path)
+            except RequestDone:
+                pass
+            val = strm.getvalue()
+            self.log.debug("IG: Returning data: %s", val)
+            return val
+        except TracError:
+            self.log.exception("IG: Error accessing %s directly", path)
+            raise
+        except Exception, exc:
+            self.log.exception("IG: Error accessing %s directly", path)
+            raise TracError("Error %s : %s" % (exc.__class__.__name__, \
+                                exc.message))
+        finally:
+            strm.close()
 
 class IGVizTableEncoder(Interface):
     r"""Implementing this interface is mandatory for every component

File trac-dev/gviz/tracgviz/attachment.py

 """
 __author__ = 'Olemis Lang'
 
+import codecs
+from itertools import chain, count
+
 from trac.core import Component, implements
 from trac.attachment import Attachment
 from trac.mimeview.api import Mimeview, content_to_unicode
 
-from api import gviz_col, gviz_param, IGVizDataProvider, GVizBadRequestError
-from util import GVizContentProvider
-
-from itertools import chain, count
+from tracgviz.api import gviz_col, gviz_param, IGVizDataProvider, \
+                         GVizBadRequestError
+from tracgviz.util import GVizContentProvider
 
 __all__ = 'GVizAttachmentProvider',
 
         r"""Retrieve a file object used to read attachment contents.
         """
         try:
-          realm, parent, fnm = (req.args[x] for x in ('realm', 'id', 'name'))
+            realm, parent, fnm = (req.args[x] for x in ('realm', 'id', 'name'))
         except KeyError:
-          raise GVizBadRequestError("Missing realm, resource ID or file name.")
+            raise GVizBadRequestError("Missing realm, resource ID or file name.")
         a = Attachment(self.env, realm, parent, fnm)
         req.perm(a.resource).require('ATTACHMENT_VIEW')
         fd = a.open()
         content, mimetype, rewinded = self.guess_mimetype(fd, fnm)
         if rewinded:
-          mimesys = Mimeview(self.env)
-          charset = mimesys.get_charset(content, mimetype)
-          import codecs
-          fd.close()
-          fd = codecs.open(a.path, 'rb', encoding=charset)
-          return fd, mimetype
+            mimesys = Mimeview(self.env)
+            charset = mimesys.get_charset(content, mimetype)
+            fd.close()
+            fd = codecs.open(a.path, 'rb', encoding=charset)
+            return fd, mimetype
         else :
-          return content_to_unicode(self.env, content, mimetype)
+            return content_to_unicode(self.env, content, mimetype)
     
     # IGVizDataProvider methods
     @gviz_param('realm', "the realm of the resource owning the " \
     @gviz_param('id', "an `id`, which uniquely identifies a resource " \
                       "within its realm (e.g. wiki page name or ticket ID).")
     @gviz_param('name', "the attachment file name.")
+    @gviz_param('colsdef', "Customize column types using 'type:format' "
+                           "expressions.")
     def get_data(self, req, tq, realm, id, name, **tqx):
         r"""Retrieve attachment contents if possible (i.e. if there 
         is a MIME converter available and able to transform the input 

File trac-dev/gviz/tracgviz/extfmt.py

 __all__ = 'GVizMoinEncoder',
 
 from trac.core import Component, implements
-from api import IGVizTableEncoder
-from util import render_gviz_value
+
+from tracgviz.api import IGVizTableEncoder
+from tracgviz.util import render_gviz_value
 
 class GVizMoinEncoder(Component):
     r"""MoinMoin wiki encoder for Google Visualization API.
                             for col in table._DataTable__columns])
         
         def rowvalues(row):
-          for col in columns_order:
-            # Do not display None values
-            value = row.get(col, None)
-            if value is None:
-              yield ""
-            else:
-#              value = table.SingleValueToJS(value, col_dict[col]["type"])
-              value = render_gviz_value(value, col_dict[col]["type"], \
-                                        table, self.env)
-              for changes in [('||', '| |'), ('\n', ' '), ('\r', ' '),]:
-                value = value.replace(*changes)
-              if isinstance(value, tuple):
-                # We have a formatted value as well ... show it
-                value = value[1]
-              yield value
+            for col in columns_order:
+                # Do not display None values
+                value = row.get(col, None)
+                if value is None:
+                    yield ""
+                else:
+#                    value = table.SingleValueToJS(value, col_dict[col]["type"])
+                    value = render_gviz_value(value, col_dict[col]["type"], \
+                                              table, self.env)
+                    for changes in [('||', '| |'), ('\n', ' '), ('\r', ' '),]:
+                        value = value.replace(*changes)
+                    if isinstance(value, tuple):
+                        # We have a formatted value as well ... show it
+                        value = value[1]
+                    yield value
         
         for row in table._PreparedData(()):
             out+= '||'

File trac-dev/gviz/tracgviz/grammar.py

File contents unchanged.

File trac-dev/gviz/tracgviz/gvizql.py

File contents unchanged.

File trac-dev/gviz/tracgviz/proto.py

 from trac.resource import ResourceNotFound
 from trac.ticket.query import QuerySyntaxError
 
-from api import IGVizProtocolHandler
-from util import BaseGVizHandler, send_response
+from tracgviz.api import IGVizProtocolHandler
+from tracgviz.util import BaseGVizHandler, send_response
 
 # GViz-specific exceptions
-from api import GVizDataNotModifiedError, GVizNotSupportedError, \
+from tracgviz.api import GVizDataNotModifiedError, GVizNotSupportedError, \
                 GVizUnknownProviderError, GVizDataNotModifiedError, \
                 GVizBadRequestError, GVizNotAuthenticatedError, \
                 GVizInvalidConfigError
             contents = e.stream_contents(_table)
             if out == 'json':
                 if self.hash_obj is None and self._hlib is not None :
-                  self.hash_obj = self._hlib.new_hash_obj(self.hash_name)
+                    self.hash_obj = self._hlib.new_hash_obj(self.hash_name)
                 if self.hash_obj is not None:
-                  hash_obj = self.hash_obj.copy()
-                  hash_obj.update(contents)
-                  hash_str = hash_obj.hexdigest()
-                  if sig is not None and hash_str == sig:
-                    exc = GVizDataNotModifiedError('')
-                    self._error(_req, exc, reqId, version, responseHandler)
-                  hash_str = "'sig':'%s'," % (hash_str,)
+                    hash_obj = self.hash_obj.copy()
+                    hash_obj.update(contents)
+                    hash_str = hash_obj.hexdigest()
+                    if sig is not None and hash_str == sig:
+                        exc = GVizDataNotModifiedError('')
+                        self._error(_req, exc, reqId, version, responseHandler)
+                    hash_str = "'sig':'%s'," % (hash_str,)
                 else:
                     hash_str = ""
                 contents = "%(h)s({'version':'%(v)s', " \
         else:
             exc = GVizNotSupportedError('Invalid format id %s' % (out,))
             self._error(_req, exc, reqId, version, responseHandler)
+

File trac-dev/gviz/tracgviz/rpc.py

 from urlparse import urlunparse, urlparse
 import xmlrpclib
 
-from util import get_column_desc, rpc_to_datetime, rpc_opt_sigs, \
-                  compile_pattern
+from tracgviz.util import get_column_desc, rpc_to_datetime, rpc_opt_sigs, \
+                          compile_pattern
 
 __metaclass__ = type
 
               timeline.ticket_show_details option in trac.ini to true.
         """
         if filters is not None :
-          if not filters:
-            # No filter selected
-            return []
-          # Filter unwanted filters ... 8$
-          filters = [f for f, _ in self.getEventFilters(req) \
-                      if f in filters]
-          if not filters:
-            # No filter selected
-            return []
+            if not filters:
+                # No filter selected
+                return []
+            # Filter unwanted filters ... 8$
+            filters = [f for f, _ in self.getEventFilters(req) \
+                       if f in filters]
+            if not filters:
+                # No filter selected
+                return []
         else :
-          filters = list(f for f, _ in self.getEventFilters(req))
+            filters = list(f for f, _ in self.getEventFilters(req))
         
         if start is None:
             start = _epoc
     implements(IXMLRPCHandler)
     
     def __init__(self):
-      self.repmdl = ReportModule(self.env)
+        self.repmdl = ReportModule(self.env)
     
     # IXMLRPCHandler methods
     def xmlrpc_namespace(self):
         sql = ("SELECT id FROM report ORDER BY id")
         cursor = db.cursor()
         try:
-          cursor.execute(sql)
-          result = cursor.fetchall() or []
-          return (x[0] for x in result)
+            cursor.execute(sql)
+            result = cursor.fetchall() or []
+            return (x[0] for x in result)
         finally:
-          cursor.close()
+            cursor.close()
     
     def get(self, req, id):
         r"""Return information about an specific report as a dict 
                           request. Otherwise it is empty.
         """
         if 'REPORT_SQL_VIEW' in req.perm:
-          sql = "SELECT id,title,query,description from report " \
+            sql = "SELECT id,title,query,description from report " \
                    "WHERE id=%s" % (id,)
         else :
-          sql = "SELECT id,title,NULL,description from report " \
+            sql = "SELECT id,title,NULL,description from report " \
                    "WHERE id=%s" % (id,)
         db = self.env.get_db_cnx()
         cursor = db.cursor()
         try:
-          cursor.execute(sql)
-          for report_info in cursor:
-              return dict(zip(['id','title','query','description'], report_info))
-          else:
-              return None
+            cursor.execute(sql)
+            for report_info in cursor:
+                return dict(zip(['id','title','query','description'],
+                            report_info))
+            else:
+                return None
         finally:
-          cursor.close()
+            cursor.close()
     
     def _execute_sql(self, req, id, sql, limit=0):
         r"""Execute a SQL report and return no more than `limit` rows 
         repmdl = self.repmdl
         db = self.env.get_db_cnx()
         try:
-          args = repmdl.get_var_args(req)
+            args = repmdl.get_var_args(req)
         except ValueError,e:
-          raise ValueError(_('Report failed: %(error)s', error=e))
+            raise ValueError(_('Report failed: %(error)s', error=e))
         try:
             try:
-              # Paginated exec (>=0.11)
-              exec_proc = repmdl.execute_paginated_report
-              kwargs = dict(limit=limit)
+                # Paginated exec (>=0.11)
+                exec_proc = repmdl.execute_paginated_report
+                kwargs = dict(limit=limit)
             except AttributeError:
-              # Legacy exec (<=0.10)
-              exec_proc = repmdl.execute_report
-              kwargs = {}
+                # Legacy exec (<=0.10)
+                exec_proc = repmdl.execute_report
+                kwargs = {}
             return exec_proc(req, db, id, sql, args, **kwargs)[:2]
         except Exception, e:
             db.rollback()
         sql = self.get(req, id)['query']
         query = ''.join([line.strip() for line in sql.splitlines()])
         if query and (query[0] == '?' or query.startswith('query:?')):
-          raise NotImplementedError('Saved custom queries specified ' \
-                                  'using URLs are not supported.')
+            raise NotImplementedError('Saved custom queries specified ' \
+                                      'using URLs are not supported.')
         elif query.startswith('query:'):
-          query = Query.from_string(self.env, query[6:], report=id)
-          server_url = urlparse(req.base_url)
-          server_href = Href(urlunparse((server_url.scheme, \
-                                        server_url.netloc, \
-                                        '', '', '', '')))
-          def rel2abs(row):
-            """Turn relative value in 'href' into absolute URLs."""
-            self.log.debug('IG: Query Row %s', row)
-            url = row['href']
-            urlobj = urlparse(url)
-            if not urlobj.netloc:
-              row['href'] = server_href(url)
-            return row
+            query = Query.from_string(self.env, query[6:], report=id)
+            server_url = urlparse(req.base_url)
+            server_href = Href(urlunparse((server_url.scheme, \
+                                           server_url.netloc, \
+                                           '', '', '', '')))
+            def rel2abs(row):
+                """Turn relative value in 'href' into absolute URLs."""
+                self.log.debug('IG: Query Row %s', row)
+                url = row['href']
+                urlobj = urlparse(url)
+                if not urlobj.netloc:
+                    row['href'] = server_href(url)
+                return row
             
-          return imap(rel2abs, query.execute(req))
+            return imap(rel2abs, query.execute(req))
         else:
-          cols, results = self._execute_sql(req, id, sql)
-          return (dict(zip(cols, list(row))) for row in results)
+            cols, results = self._execute_sql(req, id, sql)
+            return (dict(zip(cols, list(row))) for row in results)
     
     def _sql_cursor(self, req, id, sql, args, limit=0, offset=0):
       r"""Retrieve a cursor to access the data returned by a SQL 
       repmdl = self.repmdl
       sql, args, missing_args = repmdl.sql_sub_vars(sql, args)
       if missing_args:
-        self.log.warning('The following arguments are missing: %s',
-            ", ".join(missing_args))
+          self.log.warning('The following arguments are missing: %s',
+                           ", ".join(missing_args))
 
       if not sql:
           raise ValueError(_('Report %(num)s has no SQL query.', num=id))
       self.log.debug('IG: Request args: %r' % req.args)
 
       with self.env.db_query as db:
-        cursor = db.cursor()
+          cursor = db.cursor()
         
-        # The column name is obtained.
-        get_col_name_sql = 'SELECT * FROM ( ' + sql + ' ) AS tab LIMIT 1'
-        cursor.execute(get_col_name_sql, args)
-        self.env.log.debug("IG: Query SQL(Get col names): " + get_col_name_sql)
-        return cursor
+          # The column name is obtained.
+          get_col_name_sql = 'SELECT * FROM ( ' + sql + ' ) AS tab LIMIT 1'
+          cursor.execute(get_col_name_sql, args)
+          self.env.log.debug("IG: Query SQL(Get col names): " + get_col_name_sql)
+          return cursor
     
     def _sql_columns(self, req, id, sql):
       r"""Retrieve the description of columns returned by a SQL 
       """
       repmdl = self.repmdl
       try:
-        args = repmdl.get_var_args(req)
+          args = repmdl.get_var_args(req)
       except ValueError,e:
-        raise ValueError(_('Report failed: %(error)s', error=e))
+          raise ValueError(_('Report failed: %(error)s', error=e))
       cursor = self._sql_cursor(req, id, sql, args)
       self.log.debug('IG: Cursor desc %s', cursor.description)
       cols = list(get_column_desc(cursor, True))
         sql = self.get(req, id)['query']
         query = ''.join([line.strip() for line in sql.splitlines()])
         if query and (query[0] == '?' or query.startswith('query:?')):
-          raise NotImplementedError('Saved custom queries specified ' \
-                                  'using URLs are not supported.')
+            raise NotImplementedError('Saved custom queries specified ' \
+                                      'using URLs are not supported.')
         elif query.startswith('query:'):
-          query = Query.from_string(self.env, query[6:], report=id)
-          fields = query.fields
-          return [(f['name'], 'string', _(f['label'])) for f in fields] + \
-                  [
-                      ('href', 'string', _('URL')), \
-                      ('id', 'number', _('Ticket')), \
-                  ]
+            query = Query.from_string(self.env, query[6:], report=id)
+            fields = query.fields
+            return [(f['name'], 'string', _(f['label'])) for f in fields] + \
+                    [
+                        ('href', 'string', _('URL')), \
+                        ('id', 'number', _('Ticket')), \
+                    ]
         else:
-          return self._sql_columns(req, id, sql)
+            return self._sql_columns(req, id, sql)
     
     def create(self, req, summary, description, query):
         r"""Create a custom report.
         db = self.env.get_db_cnx()
         cursor = db.cursor()
         try :
-          cursor.execute("INSERT INTO report (title,query,description) " \
-                         "VALUES (%s,%s,%s)", \
+            cursor.execute("INSERT INTO report (title,query,description) " \
+                           "VALUES (%s,%s,%s)", \
                                       (summary, query, description))
-          rid = db.get_last_id(cursor, 'report')
-          db.commit()
-          return rid
+            rid = db.get_last_id(cursor, 'report')
+            db.commit()
+            return rid
         finally :
-          cursor.close()
+            cursor.close()
 
 #--------------------------------------------------
 #   Version Control RPC
 #--------------------------------------------------
 
 def _normalize_timestamp(repos, req, timestamp, default=None):
-  r"""Normalize datetime and revision numbers. Return only 
-  datetime values.
-  """
-  if isinstance(timestamp, datetime):
-    return timestamp
-  elif isinstance(timestamp, xmlrpclib.DateTime):
-    return rpc_to_datetime(timestamp, req)
-  elif isinstance(default, (datetime, date, time)): # Return default
-    return default
-  else:
-    return datetime.now(req.tz)
+    r"""Normalize datetime and revision numbers. Return only 
+    datetime values.
+    """
+    if isinstance(timestamp, datetime):
+        return timestamp
+    elif isinstance(timestamp, xmlrpclib.DateTime):
+        return rpc_to_datetime(timestamp, req)
+    elif isinstance(default, (datetime, date, time)): # Return default
+        return default
+    else:
+        return datetime.now(req.tz)
 
 def _filter_revs(seq, repos, req, start, stop, full, \
-                  accessor=None, log=None):
-  r"""Filter values in seq so that only references to revisions 
-  commited during a time interval be enumerated. 
-  Items are binary tuples of the form `(revision id, changeset object)`.
-  
-  @param seq        original sequence to be filtered.
-  @param repos      the repository managed by VCS.
-  @param req        object containing information about the user 
-                    requesting information in repository and more.
-  @param start      boundary value. Revisions older than 
-                    this value will not be retrieved. Dates 
-                    and revision numbers are both accepted.
-  @param stop       boundary value. Younger revisions 
-                    will not be retrieved. Dates 
-                    and revision numbers are both accepted.
-  @param full       retrieve also the changeset object for revision.
-  @param accesor    a function used to access the revision value 
-                    stored in each item of the input sequence.
-  @return           a sequence of tuples. The firts item is the 
-                    element of the original sequence for which 
-                    revision is in input time interval. The second is 
-                    the changeset object or None (depending upon the 
-                    value of `full` parameter)
-  """
-  if seq is not None:
-    seq = iter(seq)
-  if log is None:
-    class VoidLogger:
-      def __getattr__(self, attrnm):
-        return lambda *args, **kwds: None
-    log = VoidLogger()
-  DO_NOTHING, DO_RETURN, DO_YIELD = xrange(3)
-  load_chgset = True
-  if isinstance(start, types.StringTypes) and \
-      isinstance(stop, types.StringTypes):
-    load_chgset = False
-    if not repos.rev_older_than(stop, start):
-      def cond(rev, chgset):
-        if repos.rev_older_than(rev, start):
-          return DO_RETURN
-        elif repos.rev_older_than(stop, rev):
-          return DO_NOTHING
+                 accessor=None, log=None):
+    r"""Filter values in seq so that only references to revisions 
+    commited during a time interval be enumerated. 
+    Items are binary tuples of the form `(revision id, changeset object)`.
+    
+    @param seq        original sequence to be filtered.
+    @param repos      the repository managed by VCS.
+    @param req        object containing information about the user 
+                      requesting information in repository and more.
+    @param start      boundary value. Revisions older than 
+                      this value will not be retrieved. Dates 
+                      and revision numbers are both accepted.
+    @param stop       boundary value. Younger revisions 
+                      will not be retrieved. Dates 
+                      and revision numbers are both accepted.
+    @param full       retrieve also the changeset object for revision.
+    @param accesor    a function used to access the revision value 
+                      stored in each item of the input sequence.
+    @return           a sequence of tuples. The firts item is the 
+                      element of the original sequence for which 
+                      revision is in input time interval. The second is 
+                      the changeset object or None (depending upon the 
+                      value of `full` parameter)
+    """
+    if seq is not None:
+        seq = iter(seq)
+    if log is None:
+        class VoidLogger:
+            def __getattr__(self, attrnm):
+                return lambda *args, **kwds: None
+        log = VoidLogger()
+    DO_NOTHING, DO_RETURN, DO_YIELD = xrange(3)
+    load_chgset = True
+    if isinstance(start, types.StringTypes) and \
+            isinstance(stop, types.StringTypes):
+        load_chgset = False
+        if not repos.rev_older_than(stop, start):
+            def cond(rev, chgset):
+                if repos.rev_older_than(rev, start):
+                    return DO_RETURN
+                elif repos.rev_older_than(stop, rev):
+                    return DO_NOTHING
+                else:
+                    return DO_YIELD
         else:
-          return DO_YIELD
+            return                      # `start` committed after `stop`
+    elif isinstance(start, types.StringTypes):
+        if stop is None:
+            load_chgset = False
+            def cond(rev, chgset):
+                # Process starts in youngest so there is no need to skip revisions
+                return repos.rev_older_than(rev, start) and DO_RETURN or DO_YIELD
+        else:
+            ts = _normalize_timestamp(repos, req, stop)
+            stop = repos.youngest_rev
+            def cond(rev, chgset):
+                if repos.rev_older_than(rev, start):
+                    return DO_RETURN
+                else:
+                    if chgset.date < ts:
+                        return DO_YIELD
+                    else:
+                        return DO_NOTHING  
+    elif isinstance(stop, types.StringTypes):
+        if start is None:
+            start = 0
+            load_chgset = False
+            def cond(rev, chgset):
+                # Start in `stop` and stop in oldest so no need for DO_RETURN 
+                # No need for DO_NOTHING since iterators will start from stop 
+                # (assuming that revision numbers are valid and that's a 
+                # precondition ;)
+                return DO_YIELD
+        else:
+            ts = _normalize_timestamp(repos, req, start, _epoc)
+            def cond(rev, chgset):
+                # We start from `stop` so no need for DO_NOTHING ;)
+                if chgset.date < ts:
+                        return DO_RETURN
+                else:
+                    return DO_YIELD
     else:
-      return                      # `start` committed after `stop`
-  elif isinstance(start, types.StringTypes):
-    if stop is None:
-      load_chgset = False
-      def cond(rev, chgset):
-        # Process starts in youngest so there is no need to skip revisions
-        return repos.rev_older_than(rev, start) and DO_RETURN or DO_YIELD
-    else:
-      ts = _normalize_timestamp(repos, req, stop)
-      stop = repos.youngest_rev
-      def cond(rev, chgset):
-        if repos.rev_older_than(rev, start):
-          return DO_RETURN
-        else:
-          if chgset.date < ts:
-            return DO_YIELD
-          else:
-            return DO_NOTHING  
-  elif isinstance(stop, types.StringTypes):
-    if start is None:
-      start = 0
-      load_chgset = False
-      def cond(rev, chgset):
-        # Start in `stop` and stop in oldest so no need for DO_RETURN 
-        # No need for DO_NOTHING since iterators will start from stop 
-        # (assuming that revision numbers are valid and that's a 
-        # precondition ;)
-        return DO_YIELD
-    else:
-      ts = _normalize_timestamp(repos, req, start, _epoc)
-      def cond(rev, chgset):
-        # We start from `stop` so no need for DO_NOTHING ;)
-        if chgset.date < ts:
-            return DO_RETURN
-        else:
-          return DO_YIELD
-  else:
-    start_ts = _normalize_timestamp(repos, req, start, _epoc)
-    stop_ts = _normalize_timestamp(repos, req, stop)
-    start, stop = 0, repos.youngest_rev
-    def cond(rev, chgset):
-      ts = chgset.date
-      if ts < start_ts:
-          return DO_RETURN
-      elif ts < stop_ts:
-          return DO_YIELD
-      else:
-          return DO_NOTHING
-  # Search backwards
-  load_chgset = load_chgset or full
-  try:
-    while True:         # Stops when StopIteration is raised by seq
-      item = seq.next()
-      log.debug("IG: Processing item %s", item)
-      if accessor:
-        rev = accessor(item)
-      else:
-        rev = item
-      log.debug("IG: Processing revision %s", rev)
-      if repos.authz.has_permission_for_changeset(rev):
-        try:
-          chgset = load_chgset and repos.get_changeset(rev) or None
-          action = cond(rev, chgset)
-        except NoSuchChangeset:
-          continue
-        if action == DO_RETURN:
-            return
-        elif action == DO_YIELD:    # Implicit DO_NOTHING
-          if full:
-            yield item, chgset
-          else:
-            yield item, None
-  except NoSuchChangeset:
-    return
+        start_ts = _normalize_timestamp(repos, req, start, _epoc)
+        stop_ts = _normalize_timestamp(repos, req, stop)
+        start, stop = 0, repos.youngest_rev
+        def cond(rev, chgset):
+            ts = chgset.date
+            if ts < start_ts:
+                    return DO_RETURN
+            elif ts < stop_ts:
+                    return DO_YIELD
+            else:
+                    return DO_NOTHING
+    # Search backwards
+    load_chgset = load_chgset or full
+    try:
+        while True:         # Stops when StopIteration is raised by seq
+            item = seq.next()
+            log.debug("IG: Processing item %s", item)
+            if accessor:
+                rev = accessor(item)
+            else:
+                rev = item
+            log.debug("IG: Processing revision %s", rev)
+            if repos.authz.has_permission_for_changeset(rev):
+                try:
+                    chgset = load_chgset and repos.get_changeset(rev) or None
+                    action = cond(rev, chgset)
+                except NoSuchChangeset:
+                    continue
+                if action == DO_RETURN:
+                        return
+                elif action == DO_YIELD:    # Implicit DO_NOTHING
+                    if full:
+                        yield item, chgset
+                    else:
+                        yield item, None
+    except NoSuchChangeset:
+        return
 
 class VersionControlRPC(Component):
     r""" An interface to Trac's `Repository` and `RepositoryManager`.
         no_depth = depth is None
         already = set()
         if filter_by is not None:
-          filter_by = compile_pattern(filter_by)
+            filter_by = compile_pattern(filter_by)
         
         for item in files:
-          if isinstance(item, types.StringTypes):
-            try:
-              d, node = 0, repos.get_node(item, rev)
-              path = item
-            except NoSuchNode:
-              continue
-            except NoSuchChangeset:
-              return
-          else:
-            d, node = item
-            path = node.path
-          if path not in already: # Dont process filename twice
-            if node.isfile:
-              if (not filter_by) or filter_by.match(path) is not None:
-                yield path
-            elif node.isdir:
-              for child in node.get_entries():
-                if (not filter_by) or \
-                              filter_by.match(child.path) is not None:
-                  yield child.path
-                if child.isdir and rec and (no_depth or d < depth):
-                  files.append([d + 1, child])
-                elif child.isfile:
-                  already.add(child.path)     # Mark filename
+            if isinstance(item, types.StringTypes):
+                try:
+                    d, node = 0, repos.get_node(item, rev)
+                    path = item
+                except NoSuchNode:
+                    continue
+                except NoSuchChangeset:
+                    return
+            else:
+                d, node = item
+                path = node.path
+            if path not in already: # Dont process filename twice
+                if node.isfile:
+                    if (not filter_by) or filter_by.match(path) is not None:
+                        yield path
+                elif node.isdir:
+                    for child in node.get_entries():
+                        if (not filter_by) or \
+                                filter_by.match(child.path) is not None:
+                            yield child.path
+                        if child.isdir and rec and (no_depth or d < depth):
+                            files.append([d + 1, child])
+                        elif child.isfile:
+                            already.add(child.path)     # Mark filename
+                        else:
+                            self.log.error("Unknown node type %s at %s", \
+                                           node.kind, node.path)
                 else:
-                  self.log.error("Unknown node type %s at %s", \
-                                                node.kind, node.path)
-            else:
-              self.log.error("Unknown node type %s at %s", \
-                                                node.kind, node.path)
-            already.add(path)                 # Mark filename
+                    self.log.error("Unknown node type %s at %s", \
+                                   node.kind, node.path)
+                already.add(path)                 # Mark filename
     
     def getFileAttributes(self, req, files, rev=None):
         r"""Retrieve the attributes of a group of files. The root 
         """
         repos = RepositoryManager(self.env).get_repository(req.authname)
         if rev is None:
-          rev = repos.youngest_rev
+            rev = repos.youngest_rev
         mimeview = Mimeview(self.env)
         changesets = {}
         for path in files:
-          try:
-            node = repos.get_node(path, rev)
-          except NoSuchNode:
-            yield {}
-            continue
-          _rev = node.rev
-          attrs = dict(path=node.path, kind=node.kind, lastrev=_rev)
-          try:
-            chgset = changesets[_rev]
-            attrs.update(dict(changed=chgset.date, log=chgset.message))
-          except KeyError:
             try:
-              changesets[_rev] = chgset = repos.get_changeset(_rev)
-              attrs.update(dict(changed=chgset.date, log=chgset.message))
-            except NoSuchChangeset:
-              changesets[_rev] = attrs['changed'] = attrs['log'] = None
-          if node.isdir:
-            attrs.update(dict(size=0, ext='', mime=''))
-          elif node.isfile:
-            # Copycat ! from trac.versioncontrol.web_ui.browser
-            # MIME type detection 
-            content = node.get_content()
-            _chunk = content.read(CHUNK_SIZE)
-            mime_type = node.content_type
-            if not mime_type or mime_type == 'application/octet-stream':
-                mime_type = mimeview.get_mimetype(node.name, _chunk) or \
-                            mime_type or 'text/plain'
-            
-            attrs.update(size=node.get_content_length(), \
-                          ext=splitext(node.path)[-1][1:], \
-                          mime=mime_type)
-          else:
-            self.log.error("Unknown node type %s at %s", \
-                                              node.kind, node.path)
-          yield attrs
+                node = repos.get_node(path, rev)
+            except NoSuchNode:
+                yield {}
+                continue
+            _rev = node.rev
+            attrs = dict(path=node.path, kind=node.kind, lastrev=_rev)
+            try:
+                chgset = changesets[_rev]
+                attrs.update(dict(changed=chgset.date, log=chgset.message))
+            except KeyError:
+                try:
+                    changesets[_rev] = chgset = repos.get_changeset(_rev)
+                    attrs.update(dict(changed=chgset.date, log=chgset.message))
+                except NoSuchChangeset:
+                    changesets[_rev] = attrs['changed'] = attrs['log'] = None
+            if node.isdir:
+                attrs.update(dict(size=0, ext='', mime=''))
+            elif node.isfile:
+                # Copycat ! from trac.versioncontrol.web_ui.browser
+                # MIME type detection 
+                content = node.get_content()
+                _chunk = content.read(CHUNK_SIZE)
+                mime_type = node.content_type
+                if not mime_type or mime_type == 'application/octet-stream':
+                    mime_type = mimeview.get_mimetype(node.name, _chunk) or \
+                                mime_type or 'text/plain'
+                
+                attrs.update(size=node.get_content_length(), \
+                              ext=splitext(node.path)[-1][1:], \
+                              mime=mime_type)
+            else:
+                self.log.error("Unknown node type %s at %s", \
+                                                  node.kind, node.path)
+            yield attrs
     
     REV_ATTRS = ('rev', 'message', 'author', 'date')
     
         repos = RepositoryManager(self.env).get_repository(req.authname)
         last_cset = None
         try:
-          if isinstance(until, types.StringTypes):
-            last_cset = repos.get_changeset(until)
-            until = last_cset.date
-          elif isinstance(until, type(None)):
-            until = datetime.now(req.tz)
-          elif not isinstance(until, datetime):
+            if isinstance(until, types.StringTypes):
+                last_cset = repos.get_changeset(until)
+                until = last_cset.date
+            elif isinstance(until, type(None)):
+                until = datetime.now(req.tz)
+            elif not isinstance(until, datetime):
+                return []
+            
+            if isinstance(since, types.StringTypes):
+                since = repos.get_changeset(since).date
+            elif isinstance(since, type(None)):
+                since = _epoc
+            elif not isinstance(since, datetime):
+                return []
+        except NoSuchChangeset:
             return []
-          
-          if isinstance(since, types.StringTypes):
-            since = repos.get_changeset(since).date
-          elif isinstance(since, type(None)):
-            since = _epoc
-          elif not isinstance(since, datetime):
-            return []
-        except NoSuchChangeset:
-          return []
         else:
-          def vcs_order(rev1, rev2):
-            if rev1 == rev2:
-              return 0
-            rev1 = repos.normalize_rev(rev1)
-            rev2 = repos.normalize_rev(rev2)
-            if rev1 == rev2:
-              return 0
+            def vcs_order(rev1, rev2):
+                if rev1 == rev2:
+                    return 0
+                rev1 = repos.normalize_rev(rev1)
+                rev2 = repos.normalize_rev(rev2)
+                if rev1 == rev2:
+                    return 0
+                else:
+                    return repos.rev_older_than(rev1, rev2) and 1 or -1
+            seq = list(repos.get_changesets(since, until))
+            if last_cset:
+                seq.append(last_cset)
+            seq.sort(vcs_order, lambda cset: cset.rev)
+            if full:
+                return (tuple(getattr(chg, a) for a in self.REV_ATTRS) \
+                        for chg in seq)
             else:
-              return repos.rev_older_than(rev1, rev2) and 1 or -1
-          seq = list(repos.get_changesets(since, until))
-          if last_cset:
-            seq.append(last_cset)
-          seq.sort(vcs_order, lambda cset: cset.rev)
-          if full:
-            return (tuple(getattr(chg, a) for a in self.REV_ATTRS) \
-                      for chg in seq)
-          else:
-            return (chg.rev for chg in seq)
+                return (chg.rev for chg in seq)
     
     def getFileHistory(self, req, path, rev=None, since=None):
         r"""Retrieve information about all the changes performed on a 
                           particular changeset.
         """
         if not isinstance(rev, (type(None), types.StringTypes)):
-          raise ValueError("Revision ID must be a string")
+            raise ValueError("Revision ID must be a string")
         repos = RepositoryManager(self.env).get_repository(req.authname)
         try:
-          node = repos.get_node(path, rev)
+            node = repos.get_node(path, rev)
         except NoSuchNode:
-          return []
+            return []
         seq = node.get_history()
         seq = _filter_revs(seq, repos, req, since, rev, False, \
-                            accessor=lambda x: x[1], log=self.log)
+                           accessor=lambda x: x[1], log=self.log)
         return (x[0] for x in seq)
     
     def enumChanges(self, req, rev=None):
         """
         repos = RepositoryManager(self.env).get_repository(req.authname)
         if rev is None:
-          rev = repos.youngest_rev
+            rev = repos.youngest_rev
         try:
-          chgset = repos.get_changeset(rev)
+            chgset = repos.get_changeset(rev)
         except NoSuchChangeset:
-          return []
+            return []
         return ((p, k, chg, bp or '', brev or '') \
                       for p, k, chg, bp, brev in chgset.get_changes())
     

File trac-dev/gviz/tracgviz/scalar.py

File contents unchanged.

File trac-dev/gviz/tracgviz/search.py

 
 __all__ = 'GVizSearchFiltersProvider', 'GVizSearchProvider'
 
-from util import GVizXMLRPCAdapter
-from api import gviz_col, gviz_param, GVizBadRequestError
 import types
 
+from tracgviz.api import gviz_col, gviz_param, GVizBadRequestError
+from tracgviz.util import GVizXMLRPCAdapter
+
 class GVizSearchFiltersProvider(GVizXMLRPCAdapter):
     r"""Returns all the search filters installed in the environment.
     

File trac-dev/gviz/tracgviz/stdfmt.py

 """
 __author__ = 'Olemis Lang'
 
+from itertools import izip
+
 from trac.core import Component, implements
 
-from itertools import izip
-
-from api import IGVizTableEncoder
-from util import render_gviz_value, iter_table_data
-from xsv import GVizTSVEncoder, GVizCSVEncoder
+from tracgviz.api import IGVizTableEncoder
+from tracgviz.util import render_gviz_value, iter_table_data
+from tracgviz.xsv import GVizTSVEncoder, GVizCSVEncoder
 
 class GVizJsonEncoder(Component):
     r"""JSON encoder for Google Visualization API.
         
         coltypes = [col["type"] for col in columns]
         for i, row in enumerate(iter_table_data(table)):
-          out+= "<tr bgcolor='%s'>" % ((i & 1) and '#f0f0f0' or '#ffffff',)
-          for value, coltype in izip(row, coltypes):
-            # Do not display None values
-            if value is None:
-              value = ""
-            else:
-              value = render_gviz_value(value, coltype, table, self.env)
-              # TODO: Sanitize in case of HTML string
-              if isinstance(value, tuple):
-                # We have a formatted value as well ... show it
-                value = value[1]
-            out+= "<td>%s</td>" % (value,)
-          out+= "</tr>"
+            out+= "<tr bgcolor='%s'>" % ((i & 1) and '#f0f0f0' or '#ffffff',)
+            for value, coltype in izip(row, coltypes):
+                # Do not display None values
+                if value is None:
+                    value = ""
+                else:
+                    value = render_gviz_value(value, coltype, table, self.env)
+                    # TODO: Sanitize in case of HTML string
+                    if isinstance(value, tuple):
+                        # We have a formatted value as well ... show it
+                        value = value[1]
+                out+= "<td>%s</td>" % (value,)
+            out+= "</tr>"
         out+= "</table>"
         return out
     

File trac-dev/gviz/tracgviz/stdhash.py

 """
 __author__ = 'Olemis Lang'
 
+import hashlib
+import hmac
+from zlib import adler32, crc32
+
 from trac.core import Component, implements
 from trac.config import Option
 
-from api import IHashLibrary, GVizInvalidConfigError
-
-import hashlib
-import hmac
-from zlib import adler32, crc32
+from tracgviz.api import IHashLibrary, GVizInvalidConfigError
 
 __all__ = 'HashLib', 'ZLibChecksum', 'HmacHash'
 
 __metaclass__ = type
 
 class HashLib(Component):
-  r"""Secure Hash Algorithms supported by `hashlib` standard module.
-  
-  Supports the following methods:
-    - sha1, sha224, sha256, sha384, sha512 : as defined in FIPS 180-2 
-    - md5 : RSA's MD5 algorithm (defined in Internet RFC 1321)
-    - Additional algorithms may also be available depending upon the 
-      OpenSSL library that Python uses on your platform.
-  """
-  implements(IHashLibrary)
-  
-  # IHashLibrary methods
-  def get_hash_properties(self, method_name):
-    r"""Determine whether the requested method is a standard hash 
-    algorithm (i.e. md5, sha1, sha224, sha256, sha384, and sha512), or 
-    is implemented by the OpenSSL library that Python uses on your 
-    platform, or is not supported by `hashlib` module.
+    r"""Secure Hash Algorithms supported by `hashlib` standard module.
     
-    @param method_name  the name identifying the hash method
-    @return             `None` if the method is not supported by 
-                        `hashlib` module, a tuple of the form 
-                        (priority, source) otherwise.
-                        
-                        priority: 0   - OpenSSL, 
-                                  199 - standard hash algorithms.
-                        source:   0   - OpenSSL. 
-                                  100 - standard hash algorithms
+    Supports the following methods:
+
+      - sha1, sha224, sha256, sha384, sha512 : as defined in FIPS 180-2 
+      - md5 : RSA's MD5 algorithm (defined in Internet RFC 1321)
+      - Additional algorithms may also be available depending upon the 
+        OpenSSL library that Python uses on your platform.
     """
-    if (not method_name.startswith('_')) and \
-        method_name != 'new' and \
-        hasattr(hashlib, method_name):
-      return (199, 100)
-    else:
-      try:
-        hashlib.new(method_name)
-        return (0, 0)
-      except ValueError:
-        return None
+    implements(IHashLibrary)
     
-  def new_hash_obj(self, method_name, data=None):
-    r"""Create a new hash object.
-    """
-    try:
-      meth = getattr(hashlib, method_name)
-      if data is None:
-        return meth()
-      else:
-        return meth(data)
-    except AttributeError:
-      try:
-        return hashlib.new(method_name)
-      except ValueError:
-        raise GVizInvalidConfigError("Unsupported hash algorithm '%s'" \
-                                  % (method_name,))
-    else:
-      raise
+    # IHashLibrary methods
+    def get_hash_properties(self, method_name):
+        r"""Determine whether the requested method is a standard hash 
+        algorithm (i.e. md5, sha1, sha224, sha256, sha384, and sha512), or 
+        is implemented by the OpenSSL library that Python uses on your 
+        platform, or is not supported by `hashlib` module.
+        
+        @param method_name  the name identifying the hash method
+        @return             `None` if the method is not supported by 
+                            `hashlib` module, a tuple of the form 
+                            (priority, source) otherwise.
+
+                            priority: 0   - OpenSSL, 
+                                      199 - standard hash algorithms.
+                            source:   0   - OpenSSL. 
+                                      100 - standard hash algorithms
+        """
+        if (not method_name.startswith('_')) and \
+                method_name != 'new' and \
+                hasattr(hashlib, method_name):
+            return (199, 100)
+        else:
+            try:
+                hashlib.new(method_name)
+                return (0, 0)
+            except ValueError:
+                return None
+        
+    def new_hash_obj(self, method_name, data=None):
+        r"""Create a new hash object.
+        """
+        try:
+            meth = getattr(hashlib, method_name)
+            if data is None:
+                return meth()
+            else:
+                return meth(data)
+        except AttributeError:
+            try:
+                return hashlib.new(method_name)
+            except ValueError:
+                raise GVizInvalidConfigError("Unsupported hash algorithm '%s'" \
+                                            % (method_name,))
+        else:
+            raise
 
 class ZLibChecksum(Component):
-  r"""Checksum Algorithms supported by `zlib` standard module.
-  
-  Supports the following methods:
-    - adler32 : Adler-32 checksum of string.
-    - crc32   : Compute a CRC (Cyclic Redundancy Check) checksum of 
-                string. 
-  """
-  implements(IHashLibrary)
-  
-  # IHashLibrary methods
-  def get_hash_properties(self, method_name):
-    r"""Determine whether the requested method is defined by `zlib` 
-    standard module.
+    r"""Checksum Algorithms supported by `zlib` standard module.
     
-    @param method_name  the name identifying the hash method
-    @return             (199, 100) if `method_name` is either 
-                        `adler32` or `crc32`, or `None` otherwise.
+    Supports the following methods:
+      - adler32 : Adler-32 checksum of string.
+      - crc32   : Compute a CRC (Cyclic Redundancy Check) checksum of 
+                  string. 
     """
-    if method_name in ['adler32', 'crc32']:
-      return (199, 100)
-    else:
-      return None
+    implements(IHashLibrary)
     
-  class ZLibChecksumObject:
-    r"""Hash objects for zlib checksum methods.
-    """
-    digest_size = 4
-    block_size = 4 # FIX : Dont remember now
-    def __init__(self, method_name):
-      self.args = ()
-      self.chksum_method = {'adler32': adler32,
-                            'crc32': crc32}.get(method_name)
-      if self.chksum_method is None:
-        raise ValueError("Unsupported checkum method '%s'" % \
-                          (method_name,))
-    def update(self, data):
-      r"""Update the hash object with the string arg. Repeated calls 
-      are equivalent to a single call with the concatenation of all 
-      the arguments: m.update(a); m.update(b) is equivalent to 
-      m.update(a+b). .
-      """
-      self.args = (self.chksum_method(data, *self.args),)
-    def digest(self):
-      r"""Return the digest of the strings passed to the update() 
-      method so far. This is a string of 4 bytes which may contain 
-      non-ASCII characters, including null bytes.
-      """
-      try:
-        chksum, digest = self.args[0], []
-      except ValueError:
-        return None
-      else:
-        for x in xrange(4):
-          digest.append(chr(chksum & 0xFF))
-          chksum >>= 8
-        return ''.join(reversed(digest))
-    def hexdigest(self):
-      r"""Like digest() except the digest is returned as a string of 
-      double length, containing only hexadecimal digits. This may be 
-      used to exchange the value safely in email or other non-binary 
-      environments. 
-      """
-      try:
-        return hex(self.args[0])[2:]
-      except ValueError:
-        return None
-    def copy(self):
-      new_obj = ZLibChecksumObject(self.chksum_method.func_name)
-      new_obj.args = tuple(self.args)
-      return new_obj
-  
-  def new_hash_obj(self, method_name, data=None):
-    r"""Create a new hash object.
-    """
-    try:
-      ho = self.ZLibChecksumObject(method_name)
-    except ValueError:
-      raise GVizInvalidConfigError("Unsupported hash algorithm '%s'" \
-                                  % (method_name,))
-    else:
-      if data is not None:
-        ho.update(data)
-      return ho
+    # IHashLibrary methods
+    def get_hash_properties(self, method_name):
+        r"""Determine whether the requested method is defined by `zlib` 
+        standard module.
+        
+        @param method_name  the name identifying the hash method
+        @return             (199, 100) if `method_name` is either 
+                            `adler32` or `crc32`, or `None` otherwise.
+        """
+        if method_name in ['adler32', 'crc32']:
+            return (199, 100)
+        else:
+            return None
+        
+    class ZLibChecksumObject:
+        r"""Hash objects for zlib checksum methods.
+        """
+        digest_size = 4
+        block_size = 4 # FIX : Dont remember now
+        def __init__(self, method_name):
+            self.args = ()
+            self.chksum_method = {'adler32': adler32,
+                                      'crc32': crc32}.get(method_name)
+            if self.chksum_method is None:
+                raise ValueError("Unsupported checkum method '%s'" % \
+                                    (method_name,))
+        def update(self, data):
+            r"""Update the hash object with the string arg. Repeated calls 
+            are equivalent to a single call with the concatenation of all 
+            the arguments: m.update(a); m.update(b) is equivalent to 
+            m.update(a+b). .
+            """
+            self.args = (self.chksum_method(data, *self.args),)
+        def digest(self):
+            r"""Return the digest of the strings passed to the update() 
+            method so far. This is a string of 4 bytes which may contain 
+            non-ASCII characters, including null bytes.
+            """
+            try:
+                chksum, digest = self.args[0], []
+            except ValueError:
+                return None
+            else:
+                for x in xrange(4):
+                    digest.append(chr(chksum & 0xFF))
+                    chksum >>= 8
+                return ''.join(reversed(digest))
+        def hexdigest(self):
+            r"""Like digest() except the digest is returned as a string of 
+            double length, containing only hexadecimal digits. This may be 
+            used to exchange the value safely in email or other non-binary 
+            environments. 
+            """
+            try:
+                return hex(self.args[0])[2:]
+            except ValueError:
+                return None
+        def copy(self):
+            new_obj = ZLibChecksumObject(self.chksum_method.func_name)
+            new_obj.args = tuple(self.args)
+            return new_obj
+    
+    def new_hash_obj(self, method_name, data=None):
+        r"""Create a new hash object.
+        """
+        try:
+            ho = self.ZLibChecksumObject(method_name)
+        except ValueError:
+            raise GVizInvalidConfigError("Unsupported hash algorithm '%s'" \
+                                            % (method_name,))
+        else:
+            if data is not None:
+                ho.update(data)
+            return ho
 
 class HmacHash(Component):
-  r"""Secure HMAC algorithm as described by RFC 2104.
-  """
-  implements(IHashLibrary)
-  
-  key = Option('gviz', 'hmac_key', "12345678",
-        """HMAC key.""")
-  digestmod = Option('gviz', 'hmac_digestmod', "md5",
-        """The name of the digest method for the HMAC algorithm """ \
-        """to use. It defaults to 'md5'.""")
-  
-  @property
-  def _generic_digmod(self):
-    try:
-      return getattr(hashlib, self.digestmod)
-    except AttributeError:
-      def digmod(*args, **kwds):
+    r"""Secure HMAC algorithm as described by RFC 2104.
+    """
+    implements(IHashLibrary)
+    
+    key = Option('gviz', 'hmac_key', "12345678",
+                """HMAC key.""")
+    digestmod = Option('gviz', 'hmac_digestmod', "md5",
+                """The name of the digest method for the HMAC algorithm """ \
+                """to use. It defaults to 'md5'.""")
+    
+    @property
+    def _generic_digmod(self):
         try:
-          return hashlib.new(self.digestmod)
-        except ValueError:
-          raise GVizInvalidConfigError("Unsupported hash algorithm " \
-                                          "for HMAC '%s'" % (self.digestmod,))
-      return digmod
-  
-  # IHashLibrary methods
-  def get_hash_properties(self, method_name):
-    r"""Determine the priority of this hash implementation.
+            return getattr(hashlib, self.digestmod)
+        except AttributeError:
+            def digmod(*args, **kwds):
+                try:
+                    return hashlib.new(self.digestmod)
+                except ValueError:
+                    raise GVizInvalidConfigError("Unsupported hash algorithm " \
+                                                 "for HMAC '%s'" % (self.digestmod,))
+            return digmod
     
-    @param method_name  has to be `hmac`, ignored otherwise
-    @return             priority: 0 , source: 100 if method name is `hmac`
-                        `None` otherwise
-    """
-    if method_name == 'hmac':
-      return (199, 100)
-    else:
-      return None
-    
-  def new_hash_obj(self, method_name, data=None):
-    r"""Create a new hash object.
-    """
-    if method_name == 'hmac':
+    # IHashLibrary methods
+    def get_hash_properties(self, method_name):
+        r"""Determine the priority of this hash implementation.
+        
+        @param method_name  has to be `hmac`, ignored otherwise
+        @return             priority: 0 , source: 100 if method name is `hmac`
+                                  `None` otherwise
+        """
+        if method_name == 'hmac':
+            return (199, 100)
+        else:
+            return None
+        
+    def new_hash_obj(self, method_name, data=None):
+        r"""Create a new hash object.
+        """
+        if method_name == 'hmac':
       self.log.debug("IG: Creating HMAC key=%s, mod=%s", self.key, 
                       self.digestmod)
-      return hmac.new(self.key, digestmod=self._generic_digmod)
-    else:
-      raise GVizInvalidConfigError("Invalid algorithm name '%s'" \
-                                  % (method_name,))
+            return hmac.new(self.key, digestmod=self._generic_digmod)
+        else:
+            raise GVizInvalidConfigError("Invalid algorithm name '%s'" \
+                                         % (method_name,))
 

File trac-dev/gviz/tracgviz/testing/__init__.py

 
 __metaclass__ = type
 
-from trac.core import ComponentMeta
-from trac.db.api import _parse_db_str, DatabaseManager
-from trac.test import EnvironmentStub
-
 import os.path
 import sys
 import tempfile
 
-#------------------------------------------------------
-#    Trac environments used for testing purposes
-#------------------------------------------------------
-
-class EnvironmentStub(EnvironmentStub):
-  r"""Enhanced stub of the trac.env.Environment object for testing.
-  """
-
-  # Dont break lazy evaluation. Otherwise RPC calls misteriously fail.
-  @property
-  def _abs_href(self):
-    return self.abs_href
-
-  def enable_component(self, clsdef):
-    r"""Enable a plugin temporarily at testing time.
-    """
-    self.config.set('components', clsdef, 'enabled')
-    self.enabled.clear()
-    try:
-      del self._rules
-    except AttributeError:
-      pass
-
-  def disable_component(self, clsdef):
-    r"""Disable a plugin temporarily at testing time.
-    """
-    self.config.set('components', clsdef, 'disabled')
-    self.enabled.clear()
-    try:
-      del self._rules
-    except AttributeError:
-      pass
-
-  def rip_component(self, cls):
-    r"""Disable a plugin forever and RIP it using the super-laser beam.
-    """
-    self.disable_component(cls)
-    for reg in ComponentMeta._registry.itervalues():
-      try:
-        reg.remove(cls)
-      except ValueError :
-        pass
-
-  if not hasattr(EnvironmentStub, 'reset_db'):
-
-    # Copycat trac.test.EnvironmentStub.reset_db (Trac=0.11.5)
-    def reset_db(self, default_data=None):
-        r"""Remove all data from Trac tables, keeping the tables themselves.
-        :param default_data: after clean-up, initialize with default data
-        :return: True upon success
-        """
-        from trac import db_default
-
-        db = self.get_db_cnx()
-        db.rollback() # make sure there's no transaction in progress
-        cursor = db.cursor()
-
-        defdata = list(db_default.get_data(db))
-
-        for table, cols, vals in defdata:
-            cursor.execute("DELETE FROM %s" % (table,) )
-
-        # Delete tickets as well
-        cursor.execute("DELETE FROM ticket")
-        db.commit()
-
-        if default_data:
-            for table, cols, vals in defdata:
-                cursor.executemany("INSERT INTO %s (%s) VALUES (%s)"
-                                   % (table, ','.join(cols),
-                                      ','.join(['%s' for c in cols])),
-                                   vals)
-        else:
-            cursor.execute("INSERT INTO system (name, value) "
-                           "VALUES (%s, %s)",
-                           ('database_version', str(db_default.db_version)))
-        db.commit()
-
-#------------------------------------------------------
-#    Minimalistic testing framework for Trac
-#------------------------------------------------------
-
-from dutest import DocTestLoader, DocTestSuiteFixture
-from os.path import dirname
-from types import MethodType
-
-from util import dummy_request
-
-# Hide this module from tracebacks written into test results.
-__unittest = True
-
-class DocTestTracLoader(DocTestLoader):
-  r"""A generic XUnit loader that allows to load doctests written 
-  to check that Trac plugins behave as expected.
-  """
-  def set_env(self, env):
-    if self.extraglobs is None :
-      self.extraglobs = dict(env=env)
-    else :
-      self.extraglobs['env'] = env
-
-  env = property(lambda self : self.extraglobs.get('env'), set_env, \
-                  doc="""The Trac environment used in doctests.""")
-  del set_env
-
-  def __init__(self, dt_finder=None, globs=None, extraglobs=None, \
-                          load=None, default_data=False, enable=None, \
-                          **opts):
-    r"""Initialization. It basically works like `DocTestLoader`'s 
-    initializer but creates also the Trac environment used for 
-    testing purposes. The default behavior is to create an instance 
-    of `EnvironmentStub` class. Subclasses can add more specific 
-    keyword parameters in order to use them to create the 
-    environment. Next it loads (and | or) enables the components 
-    needed by the test suite.
-
-    The following variables are magically available at testing time. 
-    They can be used directly in doctests :
-
-    - req         A dummy request object setup for anonymous access.
-    - auth_req    A dummy request object setup like if user `murphy` was  
-                  accessing the site.
-    - env         the Trac environment used as a stub for testing 
-                  purposes (i.e. `self.env`).
-
-    @param dt_finder        see docs for `DocTestLoader.__init__` 
-                            method.
-    @param globs            see docs for `DocTestLoader.__init__` 
-                            method.
-    @param extraglobs       see docs for `DocTestLoader.__init__` 
-                            method.
-    @param load             a list of packages containing components 
-                            that will be loaded to ensure they are 
-                            available at testing time. It should be 
-                            the top level module in that package 
-                            (e.g. 'trac').
-    @param default_data     If true, populate the database with some 
-                            defaults. This parameter has to be 
-                            handled by `createTracEnv` method.
-    @param enable           a list of UNIX patterns specifying which 
-                            components need to be enabled by default 
-                            at testing time. This parameter should be 
-                            handled by `createTracEnv` method.
-    """
-    super(DocTestTracLoader, self).__init__(dt_finder, globs, \
-                                              extraglobs, **opts)
-    self.env = self.createTracEnv(default_data, enable, **opts)
-    self.load_components(load is None and self.default_packages or load)
-
-  # Load trac built-in components by default
-  default_packages = ['trac']
-
-  def createTracEnv(self, default_data=False, enable=None, **params):
-    r"""Create the Trac environment used for testing purposes. The 
-    default behavior is to create an instance of `EnvironmentStub` 
-    class. Subclasses can override this decision and add more specific 
-    keyword parameters in order to control environment creation in 
-    more detail. 
-
-    All parameters supplied at initialization time. By default they 
-    are ignored.
-    @param default_data     If True, populate the database with some 
-                            defaults.
-    @param enable           a list of UNIX patterns specifying which 
-                            components need to be enabled by default 
-                            at testing time.
-    @return                 the environment used for testing purpose.
-    """
-    return EnvironmentStub(default_data, enable)
-
-  def load_components(self, pkgs):
-    r"""Load some packages to ensure that the components they 
-    implement are available at testing time.
-    """
-    from trac.loader import load_components
-    for pkg in pkgs :
-      try :
-        __import__(pkg)
-      except ImportError :
-        pass                        # Skip pkg. What a shame !
-      else :
-        mdl = sys.modules[pkg]
-        load_components(self.env, dirname(dirname(mdl.__file__)))
-
-  class doctestSuiteClass(DocTestSuiteFixture):
-    r"""Prepare the global namespace before running all doctests 
-    in the suite. Reset the Trac environment.
-    """
-    username = 'murphy'
-
-    @property
-    def env(self):
-      r"""The Trac environment involved in this test. It is 
-      retrieved using the global namespace ;o).
-      """
-      return self.globalns['env']
-
-    def new_request(self, uname=None, args=None):
-      r"""Create and initialize a new request object.
-      """
-      req = dummy_request(self.env, uname)
-      if args is not None :
-        req.args = args
-      return req
-
-    def setUp(self):
-      r"""Include two (i.e. `req` anonymous and `auth_req` 
-      authenticated) request objects in the global namespace, before 
-      running the doctests. Besides, clean up environment data and 
-      include only default data.
-      """
-      globs = self.globalns
-      req = self.new_request(args=dict())
-      auth_req = self.new_request(uname=self.username, args=dict())
-      globs['req'] = req
-      globs['auth_req'] = auth_req
-      # TODO: If the source docstrings belong to a Trac component, 
-      #       then instantiate it and include in the global 
-      #       namespace.
-      self.env.reset_db(default_data=True)
-      self.setup_logging()
-
-    def setup_logging(self):
-      r"""Log events to temp file
-      """
-      logdir = tempfile.gettempdir()
-      logpath = os.path.join(logdir, 'trac-testing.log')
-      config = self.env.config
-      config.set('logging', 'log_file', logpath)
-      config.set('logging', 'log_type', 'file')
-      config.set('logging', 'log_level', 'DEBUG')
-      config.save()
-      self.env.setup_log()
-      self.env.log.info('%s test case: %s %s', '-' * 9, self._dt.name, '-' * 9)
-
-class RpcDirectProxy :
-  r"""A proxy used in tests to interact with RPC handlers just like 
-  if it were an ordinary object (i.e. without the need for supplying 
-  a request object explicitly in each method call).
-
-  It also asserts that the permissions necessary to execute a given 
-  method are satisfied by the request object
-  """
-  def __init__(self, rpcobj, req):
-    r"""Initialization.
-
-    @param rpcobj         the target RPC handler object.
-    @param req            a request object that will be supplied 
-                          in each call to the methods defined by 
-                          the target RPC handler (i.e. `rpcobj`).
-    """
-    self._rpcobj = rpcobj
-    self._req = req
-    self._perms = dict([m.im_func.__name__, p] \
-                              for p, _, m in rpcobj.xmlrpc_methods())
-  def __getattr__(self, attrnm):
-    r"""Control access to target RPC handler object, and assert that 
-    the right permissions have been granted to the user specified in 
-    the request.
-    """
-    try :
-      val = getattr(self._rpcobj, attrnm)
-    except AttributeError :
-      raise
-    else :
-      if isinstance(val, MethodType):
-        def rpcmethod(*args, **kwds):
-          try :
-            p = self._perms[attrnm]
-          except KeyError :
-            pass
-          else :
-            self._req.perm.require(p)
-          return val(self._req, *args, **kwds)
-        return rpcmethod
-      else :
-        return val
-
-class DocTestRpcLoader(DocTestTracLoader):
-  r"""Load doctests used to test Trac RPC handlers.
-  """
-  # Load trac built-in components and standard RPC handlers by default
-  default_packages = ['trac', 'tracrpc']
-
-  class doctestSuiteClass(DocTestTracLoader.doctestSuiteClass):
-    r"""Include the appropriate RPC handler in global namespace 
-    before running all test cases in the suite.
-    """
-
-    def ns_from_name(self):
-      r"""Extract the target namespace under test using the name
-      of the DocTest instance manipulated by the suite.
-      """
-      try :