Commits

Andrew Davison committed c251179

More unit tests, plus minor reformatting for PEP 8 compliance.

Comments (0)

Files changed (38)

doc/releases/0.5.3.txt

+===========================
+Sumatra 0.5.3 release notes
+===========================
+
+*April 9th 2014*
+
+Sumatra 0.5.3 is a minor release that adds support for the ISO datetime format and fixes a problem when trying to
+move a project to another machine.

sumatra/__init__.py

            'dependency_finder', 'web', 'decorators', 'publishing',
            'users', 'core']
 
-__version__ = "0.6.0dev"    
+__version__ = "0.6.0dev"

sumatra/commands.py

 import sys
 from argparse import ArgumentParser
 from textwrap import dedent
-from copy import deepcopy
 import warnings
 import re
 import logging
-import cProfile
 import sumatra
 
 from sumatra.programs import get_executable
 
 store_arg_help = "The argument can take the following forms: (1) `/path/to/sqlitedb` - DjangoRecordStore is used with the specified Sqlite database, (2) `http[s]://location` - remote HTTPRecordStore is used with a remote Sumatra server, (3) `postgres://username:password@hostname/databasename` - DjangoRecordStore is used with specified Postgres database."
 
+
 def parse_executable_str(exec_str):
     """
     Split the string describing the executable into a path part and an
     options part.
     """
     first_space = exec_str.find(" ")
-    if first_space == -1: first_space = len(exec_str)
+    if first_space == -1:
+        first_space = len(exec_str)
     return exec_str[:first_space], exec_str[first_space:]
 
 
     if pos == -1:
         raise Exception("Not a valid command line parameter. String must be of form 'name=value'")
     name = p[:pos]
-    value = p[pos+1:]
+    value = p[pos + 1:]
     if list_pattern.match(value) or tuple_pattern.match(value):
         value = eval(value)
     else:
     input_data = []
     for arg in args:
         have_parameters = False
-        if os.path.isfile(arg): # could be a parameter file or a data file
+        if os.path.isfile(arg):  # could be a parameter file or a data file
             try:
-               parameter_sets.append(build_parameters(arg))
-               script_args.append("<parameters>")
-               have_parameters = True
+                parameter_sets.append(build_parameters(arg))
+                script_args.append("<parameters>")
+                have_parameters = True
             except SyntaxError:
-               pass
+                pass
         if not have_parameters:
             if input_datastore.contains_path(arg):
                 data_key = input_datastore.generate_keys(arg)
                 input_data.extend(data_key)
                 script_args.append(arg)
-            elif allow_command_line_parameters and "=" in arg: # cmdline parameter
+            elif allow_command_line_parameters and "=" in arg:  # cmdline parameter
                 cmdline_parameters.update(parse_command_line_parameter(arg))
-            else: # a flag or something, passed on unchanged
+            else:  # a flag or something, passed on unchanged
                 script_args.append(arg)
     if stdin:
         script_args.append("< %s" % stdin)
             raise IOError("File does not exist: %s" % stdin)
     if stdout:
         script_args.append("> %s" % stdout)
-    assert len(parameter_sets) < 2, "No more than one parameter file may be supplied." # temporary restriction
+    assert len(parameter_sets) < 2, "No more than one parameter file may be supplied."  # temporary restriction
     if cmdline_parameters:
         if parameter_sets:
             parameter_sets[0].update(cmdline_parameters)
     parser.add_argument('-e', '--executable', metavar='PATH', help="set the path to the executable. If this is not set, smt will try to infer the executable from the value of the --main option, if supplied, and will try to find the executable from the PATH environment variable, then by searching various likely locations on the filesystem.")
     parser.add_argument('-r', '--repository', help="the URL of a Subversion or Mercurial repository containing the code. This will be checked out/cloned into the current directory.")
     parser.add_argument('-m', '--main', help="the name of the script that would be supplied on the command line if running the simulation or analysis normally, e.g. init.hoc.")
-    parser.add_argument('-c', '--on-changed', default='error', help="the action to take if the code in the repository or any of the depdendencies has changed. Defaults to %(default)s") # need to add list of allowed values
+    parser.add_argument('-c', '--on-changed', default='error', help="the action to take if the code in the repository or any of the depdendencies has changed. Defaults to %(default)s")  # need to add list of allowed values
     parser.add_argument('-s', '--store', help="Specify the path, URL or URI to the record store (must be specified). This can either be an existing record store or one to be created. {0} Not using the `--store` argument defaults to a DjangoRecordStore with Sqlite in `.smt/records`".format(store_arg_help))
     parser.add_argument('-A', '--archive', metavar='PATH', help="specify a directory in which to archive output datafiles. If not specified, datafiles are not archived.")
     parser.add_argument('-g', '--labelgenerator', choices=['timestamp', 'uuid'], default='timestamp', metavar='OPTION', help="specify which method Sumatra should use to generate labels (options: timestamp, uuid)")
         repository = get_repository(args.repository)
         repository.checkout()
     else:
-        repository = get_working_copy().repository # if no repository is specified, we assume there is a working copy in the current directory.
+        repository = get_working_copy().repository  # if no repository is specified, we assume there is a working copy in the current directory.
 
     if args.executable:
         executable_path, executable_options = parse_executable_str(args.executable)
     elif args.main:
         try:
             executable = get_executable(script_file=args.main)
-        except Exception: # assume unrecognized extension - really need more specific exception type
+        except Exception:  # assume unrecognized extension - really need more specific exception type
             # should warn that extension unrecognized
             executable = None
     else:
     else:
         output_datastore = FileSystemDataStore(args.datapath)
     input_datastore = FileSystemDataStore(args.input)
-    
+
     if args.launch_mode_options:
         args.launch_mode_options = args.launch_mode_options.strip()
     launch_mode = get_launch_mode(args.launch_mode)(options=args.launch_mode_options)
     project = Project(name=args.project_name,
                       default_executable=executable,
                       default_repository=repository,
-                      default_main_file=args.main, # what if incompatible with executable?
+                      default_main_file=args.main,  # what if incompatible with executable?
                       default_launch_mode=launch_mode,
                       data_store=output_datastore,
                       record_store=record_store,
 
     args = parser.parse_args(argv)
 
+    project = load_project()
     if args.store:
         new_store = get_record_store(args.store)
-        project = load_project()
-        project.backup()
-        old_store = project.record_store
-        new_store.sync(old_store, project.name)
-        project.record_store = new_store
-    else:
-        project = load_project()
+        project.change_record_store(new_store)
 
     if args.archive:
         if args.archive.lower() == "true":
             args.archive = ".smt/archive"
-        if hasattr(project.data_store, 'archive_store'): # current data store is archiving
+        if hasattr(project.data_store, 'archive_store'):  # current data store is archiving
             if args.archive.lower() == 'false':
                 project.data_store = FileSystemDataStore(project.data_store.root)
             else:
                 project.data_store.archive_store = args.archive
-        else: # current data store is not archiving
+        else:  # current data store is not archiving
             if args.archive.lower() != 'false':
                 project.data_store = ArchivingFileSystemDataStore(args.datapath, args.archive)
     if args.datapath:
     elif len(parameters) == 1:
         parameters = parameters[0]
     else:
-        parser.error("Only a single parameter file allowed.") # for now
+        parser.error("Only a single parameter file allowed.")  # for now
 
     if args.executable:
         executable_path, executable_options = parse_executable_str(args.executable)
         executable = get_executable(path=executable_path)
         executable.options = executable_options
     elif args.main:
-        executable = get_executable(script_file=args.main) # should we take the options from project.default_executable, if they match?
+        executable = get_executable(script_file=args.main)  # should we take the options from project.default_executable, if they match?
     else:
         executable = 'default'
     if args.num_processes:
                 label = project.most_recent().label
             try:
                 project.delete_record(label, delete_data=args.data)
-            except Exception: # could be KeyError or DoesNotExist: should create standard NoSuchRecord or RecordDoesNotExist exception
+            except Exception:  # could be KeyError or DoesNotExist: should create standard NoSuchRecord or RecordDoesNotExist exception
                 warnings.warn("Could not delete record '%s' because it does not exist" % label)
 
 
     parser.add_argument('label', nargs='?', metavar='LABEL', help="the record to which the comment will be added")
     parser.add_argument('comment', help="a string of text, or the name of a file containing the comment.")
     parser.add_argument('-r', '--replace', action='store_true',
-                        help="if this flag is set, any existing comment will be overwritten, otherwise, the new comment will be appended to the end, starting on a new line")
+                        help="if this flag is set, any existing comment will be overwritten, otherwise, the new comment will be appended to the end, starting on a new line")  # THIS IS NOT IMPLEMENTED
     parser.add_argument('-f', '--file', action='store_true',
                         help="interpret COMMENT as the path to a file containing the comment")
     args = parser.parse_args(argv)
               "version of Sumatra) before upgrading.")
         sys.exit(1)
 
-
     # backup and remove .smt
     import shutil
     backup_dir = project.backup()
     print("Project successfully upgraded to Sumatra version {}.".format(project.sumatra_version))
 
 
-
 def export(argv):
     usage = "%(prog)s export"
     description = dedent("""\
         given, and the command is run in a directory containing a Sumatra
         project, only that project's records be synchronized with the store at
         PATH1. Note that PATH1 and PATH2 may be either filesystem paths or URLs.
-        """) # need to say what happens if the sync is incomplete due to label collisions
+        """)  # need to say what happens if the sync is incomplete due to label collisions
     parser = ArgumentParser(usage=usage,
                             description=description)
     parser.add_argument('path1')
     }
     for option_name, field in field_map.items():
         value = getattr(args, option_name)
-        if value:   
+        if value:
             project.record_store.update(project.name, field, value)

sumatra/compatibility.py

     from cStringIO import StringIO
 except ImportError:
     from io import StringIO
-    
+
 try:
     from urllib2 import urlopen, URLError
     from urllib import urlretrieve
 except ImportError:
     from urllib.request import urlopen, urlretrieve
     from urllib.error import URLError
-    from urllib.parse import urlparse
+    from urllib.parse import urlparse
 
 TIMESTAMP_FORMAT = "%Y%m%d-%H%M%S"
 
+
 def have_internet_connection():
     """
     Not foolproof, but allows checking for an external connection with a short
     """
     test_address = 'http://74.125.113.99'  # google.com
     try:
-        response = urlopen(test_address,timeout=1)
+        response = urlopen(test_address, timeout=1)
         return True
     except (URLError, socket.timeout) as err:
         pass
     """
     Run a command with a timeout after which it will be forcibly
     killed.
-    
+
     Based on http://stackoverflow.com/a/3326559
     """
     class Alarm(Exception):
         pass
+
     def alarm_handler(signum, frame):
         raise Alarm
     p = subprocess.Popen(args, shell=shell, cwd=cwd, stdout=subprocess.PIPE,
         for pid in pids:
             # process might have died before getting to this line
             # so wrap to avoid OSError: no such process
-            try: 
+            try:
                 os.kill(pid, signal.SIGKILL)
             except OSError:
                 pass

sumatra/dependency_finder/__init__.py

 import warnings
 
 from sumatra.dependency_finder import core, neuron, python, genesis, matlab
-    
+
 
 def find_dependencies(filename, executable):
     """
     Return a list of dependencies for a given script and programming language.
-    
+
     *filename*:
         the path to the script whose dependencies should be found.
     *executable*:

sumatra/dependency_finder/core.py

             try:
                 wc = versioncontrol.get_working_copy(dependency.path)
             except versioncontrol.VersionControlError:
-                pass # dependency.version remains "unknown"
+                pass  # dependency.version remains "unknown"
             else:
                 if wc.has_changed():
                     dependency.diff = wc.diff()
 def find_versions(dependencies, heuristics):
     """
     Try to find version information by calling a series of functions in turn.
-    
+
     *dependencies*:
         a list of Dependency objects.
     *heuristics*:
         a list of functions that accept a component as the single
         argument and return a version number or 'unknown'.
-                   
+
     Returns a possibly modified list of dependencies
     """
     for heuristic in heuristics:
     raise IOError("File %s does not exist" % path)
 
 
+class BaseDependency(object):
 
-class BaseDependency(object):
     """
     Contains information about a program component, and tries to determine version information.
-    
+
     *name*:
         an identifying name, e.g. the module name in Python
     *path*:
     *source*:
         an identifier for where the dependency came from, if known, e.g. the
         url of a version control repository or the name of a Linux package.
-    
+
     """
-    
+
     def __init__(self, name, path=None, version='unknown', diff='', source=None):
         self.name = name
         self.path = path
         self.diff = diff
         self.version = version
         self.source = source  # e.g. url of (upstream?) repository
- 
+
     def __repr__(self):
         return "%s (%s) version=%s%s" % (self.name, self.path, self.version, self.diff and "*" or '')
-        
+
     def __eq__(self, other):
         return self.name == other.name and self.path == other.path and \
-               self.version == other.version and self.diff == other.diff
-        
+            self.version == other.version and self.diff == other.diff
+
     def __ne__(self, other):
         return not self.__eq__(other)
 

sumatra/dependency_finder/genesis.py

     Contains information about a .g file, and tries to determine version information.
     """
     module = 'genesis'
-    
+
     def __init__(self, name, path=None, version='unknown', diff='', source=None):
         # name maybe should be path relative to main file?
         super(Dependency, self).__init__(os.path.basename(name),
         for i in range(1, len(lines)):
             lines[i] = lines[i].replace("{getenv SIMPATH}", lines[0])
     return lines[-1].split()
-    
+
 
 def find_included_files(file_path):
     """
     Find all files that are included, whether directly or indirectly, by a given
     .g file. 
     """
-    comment_pattern = re.compile('/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/') # see http://ostermiller.org/findcomment.html
+    comment_pattern = re.compile('/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/')  # see http://ostermiller.org/findcomment.html
     include_pattern = re.compile(r'include (?P<path>[\w\./]+)')
     search_dirs = get_sim_path()
-    all_paths = []    
+    all_paths = []
+
     def find(start_path, paths):
         """
         Recursively look for files loaded by start_path, add them to paths.
         with open(start_path) as f:
             without_comments = comment_pattern.sub("", f.read())
         new_paths = include_pattern.findall(without_comments)
+
         def add_ext(path):
             if path[-2:] != ".g":
                 path += ".g"
     Return a list of Dependency objects representing all files included,
     whether directly or indirectly, by a given .g file. 
     """
-    heuristics = [core.find_versions_from_versioncontrol,]
+    heuristics = [core.find_versions_from_versioncontrol, ]
     paths = find_included_files(filename)
     # also need to find .p files
     dependencies = [Dependency(name) for name in paths]

sumatra/dependency_finder/neuron.py

     Contains information about a Hoc file, and tries to determine version information.
     """
     module = 'neuron'
-    
+
     def __init__(self, name, path=None, version='unknown', diff='', source=None):
         super(Dependency, self).__init__(os.path.basename(name),
                                          path or os.path.abspath(name),
     """
     xopen_pattern = re.compile(r'xopen\("(?P<path>\w+\.*\w*)"\)')
     all_paths = []
+
     def find(path, paths):
         current_dir = os.path.dirname(path)
         with open(path) as f:
             new_paths = xopen_pattern.findall(f.read())
-        #print "-", path, new_paths
+        # print "-", path, new_paths
         new_paths = [os.path.join(current_dir, path) for path in new_paths]
         paths.extend(new_paths)
         for path in new_paths:
     op = os.path
     search_dirs = []
     if "HOC_LIBRARY_PATH" in os.environ:
-        search_dirs.extend(os.environ["HOC_LIBRARY_PATH".split(":")]) # could also be space-separated
+        search_dirs.extend(os.environ["HOC_LIBRARY_PATH".split(":")])  # could also be space-separated
     if "NEURONHOME" in os.environ:
         search_dirs.append(os.environ["NEURONHOME"])
     else:
         search_dirs.append(op.join(prefix, "share/nrn/lib/hoc"))
     load_file_pattern = re.compile(r'load_file\("(?P<path>[\w\.\/]+)"\)')
     all_paths = []
+
     def find(start_path, paths):
         """
         Recursively look for files loaded by start_path, add them to paths.
             new_paths = load_file_pattern.findall(f.read())
         curdir = op.dirname(start_path)
         new_paths = [core.find_file(p, curdir, search_dirs) for p in new_paths]
-        #if new_paths:
+        # if new_paths:
         #    print start_path, "loads the following:\n ", "\n  ".join(new_paths)
-        #else:
+        # else:
         #    print start_path, "loads no files"
         paths.extend(new_paths)
         for path in new_paths:
     """Return a list of Dependency objects representing all Hoc files imported
     (directly or indirectly) by a given Hoc file."""
     executable_path = os.path.realpath(executable.path)
-    heuristics = [core.find_versions_from_versioncontrol,]
+    heuristics = [core.find_versions_from_versioncontrol, ]
     paths = find_xopened_files(filename).union(find_loaded_files(filename, executable.path))
     dependencies = [Dependency(name) for name in paths]
     dependencies = [d for d in dependencies if not d.in_stdlib(executable.path)]

sumatra/dependency_finder/python.py

     # process-creation overhead.
     import textwrap
     import subprocess
-    script = str(script) # get problems if script is is unicode
+    script = str(script)  # get problems if script is is unicode
     p = subprocess.Popen(executable_path, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
     encoding = get_encoding()
-    output, err = p.communicate(textwrap.dedent(script).encode(encoding)) # should handle err
+    output, err = p.communicate(textwrap.dedent(script).encode(encoding))  # should handle err
     output = output.decode(encoding)
-    output = output[output.find(SENTINEL)+len(SENTINEL):]
+    output = output[output.find(SENTINEL) + len(SENTINEL):]
     try:
         return_value = eval(output)
     except SyntaxError as err:
 sys.stdout.write("%(sentinel)s" + str(versions))
 """
 
+
 def find_versions_by_attribute(dependencies, executable):
     """Try to find version information from the attributes of a Python module."""
     context = {
        obtain version information from this."""
     for dependency in dependencies:
         if dependency.version == 'unknown':
-            dir = os.path.dirname(dependency.path) # should check if dirname ends in ".egg" - may need parent directory
+            dir = os.path.dirname(dependency.path)  # should check if dirname ends in ".egg" - may need parent directory
             if os.path.isdir(dir):
                 if 'EGG-INFO' in os.listdir(dir):
                     with open(os.path.join(dir, 'EGG-INFO', 'PKG-INFO')) as f:
     @classmethod
     def from_module(cls, module, executable_path):
         """Create from modulefinder.Module instance."""
-        path = os.path.realpath(module.__path__[0]) # resolve any symbolic links
+        path = os.path.realpath(module.__path__[0])  # resolve any symbolic links
         if len(module.__path__) > 1:
-            raise Exception("This is not supposed to happen. Please tell the package developers about this.") # or I could figure out for myself when this could happen
+            raise Exception("This is not supposed to happen. Please tell the package developers about this.")  # or I could figure out for myself when this could happen
         return cls(module.__name__, module.__path__[0])
 
 
     all the dependency finding and version checking in a subprocess with the
     correct version of Python.
     """
-    #Actually, we could check whether executable_path matches sys.executable, and
-    #then do it in this process. On the other hand, the dependency finding
-    #could run in parallel with the simulation (good for multicore): we could
-    #move setting of dependencies to after the simulation, rather than having it
-    #in record.register()
+    # Actually, we could check whether executable_path matches sys.executable, and
+    # then do it in this process. On the other hand, the dependency finding
+    # could run in parallel with the simulation (good for multicore): we could
+    # move setting of dependencies to after the simulation, rather than having it
+    # in record.register()
     script = """
         from modulefinder import ModuleFinder
         import sys, os
     print("\n".join(str(d)
                     for d in find_dependencies(sys.argv[1],
                                                programs.PythonExecutable(None),
-                                               on_changed='store-diff')))
+                                               on_changed='store-diff')))

sumatra/formatting/__init__.py

 
 
 class Formatter(object):
-    
+
     def __init__(self, records, project=None, tags=None):
         self.records = records
         self.project = project
         self.tags = None
-        
+
     def format(self, mode='short'):
         """
         Format a record according to the given mode. ``mode`` may be 'short',
     """
     Format the information from a list of Sumatra records as text.
     """
-    
+
     def short(self):
         """Return a list of record labels, one per line."""
         return "\n".join(record.label for record in self.records)
-            
+
     def long(self, text_width=80, left_column_width=17):
         """
         Return detailed information about a list of records, as text with a
             left_column = []
             right_column = []
             for field in fields:
-                left_column.append("%s%ds" % ("%-",left_column_width) % field.title())
-                entry = getattr(record,field)
+                left_column.append("%s%ds" % ("%-", left_column_width) % field.title())
+                entry = getattr(record, field)
                 if hasattr(entry, "pretty"):
                     new_lines = entry.pretty().split("\n")
                 else:
                     new_lines = ['']
                 right_column.extend(new_lines)
                 if len(new_lines) > 1:
-                    left_column.extend([' '*left_column_width]*(len(new_lines)-1))
+                    left_column.extend([' ' * left_column_width] * (len(new_lines) - 1))
                 #import pdb; pdb.set_trace()
             for left, right in zip(left_column, right_column):
                 output += left + ": " + right + "\n"
         return output
-    
+
     def table(self):
         """
         Return information about a list of records as text, in a simple
         """
         tt = TextTable(fields, self.records)
         return str(tt)
-    
+
 
 class TextTable(object):
     """
     implementations around, e.g. http://pypi.python.org/pypi/texttable/0.6.0/
     but for now I'd like to avoid too many dependencies.
     """
-    
+
     def __init__(self, headers, rows, max_column_width=20):
         self.headers = headers
         self.rows = rows
         self.max_column_width = max_column_width
-        
+
     def calculate_column_widths(self):
         column_widths = []
         for header in self.headers:
             column_width = max([len(header)] + [len(str(getattr(row, header))) for row in self.rows])
             column_widths.append(min(self.max_column_width, column_width))
         return column_widths
-            
+
     def __str__(self):
         column_widths = self.calculate_column_widths()
         format = "| " + " | ".join("%%-%ds" % w for w in column_widths) + " |\n"
     """
     Create a shell script that can be used to repeat a series of computations.
     """
-    
+
     def short(self):
         import operator
         output = ("#!/bin/sh\n"
         if self.tags:
             output += "# tagged with %s\n" % ",".join(tags)
         if self.project.description:
-            output += textwrap.TextWrapper(initial_indent = "# ", subsequent_indent="# ").fill(self.project.description)
+            output += textwrap.TextWrapper(initial_indent="# ", subsequent_indent="# ").fill(self.project.description)
         cleanup = "\n\n# Clean-up temporary files\n"
 
         output += "\n# Original hardware environment:\n"
         platforms = list(set(reduce(operator.add, [record.platforms for record in self.records])))
         for i, platform in enumerate(platforms):
-            output += "#   Machine #%d: %s processor running %s. %s(%s)\n" % (i+1, platform.machine, platform.version, platform.network_name, platform.ip_addr, )
+            output += "#   Machine #%d: %s processor running %s. %s(%s)\n" % (i + 1, platform.machine, platform.version, platform.network_name, platform.ip_addr, )
 
         output += "\n# Original software environment:\n"
         repositories = set(record.repository for record in self.records)
             output += "#   %s repository at %s\n" % (record.repository.vcs_type, record.repository.upstream or record.repository.url)
         dependency_sets = list(set(tuple(sorted(record.dependencies)) for record in self.records))
         for i, dependency_set in enumerate(dependency_sets):
-            output += "#   Dependency set #%d: %s\n" % (i+1, dependency_set)
+            output += "#   Dependency set #%d: %s\n" % (i + 1, dependency_set)
 
         current_directory = ''
         current_version = ''
         for record in reversed(self.records):  # oldest first
-            output += "\n# " + "-"*77 + "\n"
+            output += "\n# " + "-" * 77 + "\n"
             output += "# %s\n" % record.label
             output += "# Originally run on %s by %s\n" % (record.timestamp.strftime("%Y-%m-%d at %H:%M:%S"), record.user)
             if len(platforms) > 1:
     Format information about a group of Sumatra records as HTML fragments, to
     be included in a larger document.
     """
-    
+
     def short(self):
         """
         Return a list of record labels as an HTML unordered list.
         """
         return "<ul>\n<li>" + "</li>\n<li>".join(record.label for record in self.records) + "</li>\n</ul>"
-    
+
     def long(self):
         """
         Return detailed information about a list of records as an HTML
         (re.compile(r'\.\.\.+'), r'\\ldots'),
         (re.compile(r'\<'), r'\\textless{}'),
         (re.compile(r'\>'), r'\\textgreater{}')
-        )
-    
+    )
+
     @staticmethod
     def _escape_tex(value):
         """Inspired by http://flask.pocoo.org/snippets/55/"""
             newval = pattern.sub(replacement, newval)
         newval = newval.replace('/', '/ ')
         return newval
-    
+
     def long(self):
         from os.path import dirname, join
         from jinja2 import Environment, FileSystemLoader
                                paper_size='a4paper')
 
 
+class TextDiffFormatter(Formatter):
 
-class TextDiffFormatter(Formatter):
     """
     Format information about the differences between two Sumatra records in
     text format.
     """
-    
+
     def __init__(self, diff):
         self.diff = diff
-        
+
     def short(self):
         """Return a summary of the differences between two records."""
         def yn(x):
             Script arguments differ : %s
             Parameters differ       : %s
             Data differ             : %s""" % (
-                D.recordA.label,
-                D.recordB.label,
-                yn(D.executable_differs),
-                yn(D.code_differs),
-                yn(D.repository_differs), yn(D.main_file_differs),
-                yn(D.version_differs), yn(D.diff_differs),
-                yn(D.dependencies_differ),
-                yn(D.launch_mode_differs),
-                yn(D.input_data_differ),
-                yn(D.script_arguments_differ),
-                yn(D.parameters_differ),
-                yn(D.output_data_differ))
-            )
+            D.recordA.label,
+            D.recordB.label,
+            yn(D.executable_differs),
+            yn(D.code_differs),
+            yn(D.repository_differs), yn(D.main_file_differs),
+            yn(D.version_differs), yn(D.diff_differs),
+            yn(D.dependencies_differ),
+            yn(D.launch_mode_differs),
+            yn(D.input_data_differ),
+            yn(D.script_arguments_differ),
+            yn(D.parameters_differ),
+            yn(D.output_data_differ))
+        )
         return output
-    
+
     def long(self):
         """
         Return a detailed description of the differences between two records.
                     output += "  %s is a dependency of %s but not of %s\n" % (name, self.diff.recordA.label, self.diff.recordB.label)
                 elif depA is None:
                     output += "  %s is a dependency of %s but not of %s\n" % (name, self.diff.recordB.label, self.diff.recordA.label)
-                    
+
         diffs = self.diff.launch_mode_differences
         if diffs:
             output += "Launch mode differences:\n"
             if AnotB:
                 output += "  Generated by %s:\n" % self.diff.recordA.label
                 for key in AnotB:
-                    output += "    %s\n" % key 
+                    output += "    %s\n" % key
             if BnotA:
                 output += "  Generated by %s:\n" % self.diff.recordB.label
                 for key in BnotA:
-                    output += "    %s\n" % key 
+                    output += "    %s\n" % key
         return output
 
 
     'textdiff': TextDiffFormatter,
 }
 
+
 def get_formatter(format):
     """
     Return a :class:`Formatter` object of the appropriate type. ``format``
     (h, rem) = _quotient_remainder(rem, 60 * 60)
     (m, rem) = _quotient_remainder(rem, 60)
     s = rem + fractional_part
-    
+
     return ' '.join(
         templ.format(val)
         for (val, templ) in [
             (h, '{0}h'),
             (m, '{0}m'),
             (s, '{0:.2f}s'),
-            ]
+        ]
         if val != 0
-        )
+    )

sumatra/parameters.py

 import os.path
 import shutil
 try:
-    from ConfigParser import SafeConfigParser, MissingSectionHeaderError  # Python 2
+    from ConfigParser import SafeConfigParser, MissingSectionHeaderError, NoOptionError  # Python 2
 except ImportError:
-    from configparser import SafeConfigParser, MissingSectionHeaderError  # Python 3
+    from configparser import SafeConfigParser, MissingSectionHeaderError, NoOptionError  # Python 3
 try:
     import json
 except ImportError:
 import parameters
 from .compatibility import string_type, StringIO
 
+POP_NONE = "eiutbocqnluiegnclqiuetyvbietcbdgsfzpq"
+
 
 class YAMLParameterSet(object):
     """
         return filename
 
     def update(self, E, **F):
-        __doc__ = dict.update.__doc__
         self.values.update(E, **F)
+    update.__doc__ = dict.update.__doc__
 
     def pop(self, key, d=None):
         if key in self.values:
     def __ne__(self, other):
         return not self.__eq__(other)
 
-    def pop(self, k, d=None):
+    def pop(self, k, d=POP_NONE):
         if k in self.values:
             v = self.values.pop(k)
             self.types.pop(k)
             self.comments.pop(k, None)
             return v
-        elif d:
+        elif d is not POP_NONE:
             return d
         else:
             raise KeyError("%s not found" % k)
         return filename
 
     def update(self, E, **F):
-        __doc__ = dict.update.__doc__
         def _update(name, value):
             if not isinstance(value, (int, float, string_type, list)):
                 raise TypeError("value must be a numeric value or a string")
             self.values[name] = value
             self.types[name] = type(value)
         if hasattr(E, "items"):
-            for name,value in E.items():
+            for name, value in E.items():
                 _update(name, value)
         else:
             for name, value in E:
                 _update(name, value)
-        for name,value in F.items():
+        for name, value in F.items():
             _update(name, value)
+    update.__doc__ = dict.update.__doc__
 
 
 class ConfigParserParameterSet(SafeConfigParser):
                 self.read(initialiser)
                 self.source_file = initialiser
             else:
-                input = StringIO(str(initialiser)) # configparser has some problems with unicode. Using str() is a crude, and probably partial fix.
+                input = StringIO(str(initialiser))  # configparser has some problems with unicode. Using str() is a crude, and probably partial fix.
                 input.seek(0)
                 self.readfp(input)
         except MissingSectionHeaderError:
         return filename
 
     def update(self, E, **F):
-        __doc__ = dict.update.__doc__
         def _update(name, value):
             if "." in name:
                 section, option = name.split(".")
             else:
-                section = "sumatra" # used for extra parameters added by sumatra
+                section = "sumatra"  # used for extra parameters added by sumatra
                 option = name
             if not self.has_section(section):
                 self.add_section(section)
                 value = str(value)
             self.set(section, option, value)
         if hasattr(E, "items"):
-            for name,value in E.items():
+            for name, value in E.items():
                 _update(name, value)
         else:
             for name, value in E:
                 _update(name, value)
-        for name,value in F.items():
+        for name, value in F.items():
             _update(name, value)
+    update.__doc__ = dict.update.__doc__
 
-    def pop(self, name, d=None):
+    def pop(self, name, d=POP_NONE):
         if "." in name:
             section, option = name.split(".")
-            value = self.get(section, option)
-            self.remove_option(section, option)
+            try:
+                value = self.get(section, option)
+                self.remove_option(section, option)
+            except NoOptionError:
+                if d is not POP_NONE:
+                    value = d
+                else:
+                    raise KeyError('name')
             return value
         elif self.has_option("sumatra", name):
             value = self.get("sumatra", name)
         return filename
 
     def update(self, E, **F):
-        __doc__ = dict.update.__doc__
         self.values.update(E, **F)
+    update.__doc__ = dict.update.__doc__
 
     def pop(self, key, d=None):
         if key in self.values:
         return JSONParameterSet(filename)
     elif yaml_loaded and ext == ".yaml":
         return YAMLParameterSet(filename)
-    
     try:
         parameters = JSONParameterSet(filename)
         return parameters

sumatra/programs.py

     # store compilation/configuration options? yes, if we can determine them
     requires_script = False  # does this executable require a script file
     name = None
-    
+
     def __init__(self, path, version=None, options="", name=None):
         if path and os.path.exists(path):
             self.path = path
         if self.name is None:
             self.name = name or os.path.basename(self.path)
         self.version = version or self._get_version()
-        self.options = options        
+        self.options = options
 
     def __repr__(self):
         s = "%s (version: %s) at %s" % (self.name, self.version, self.path)
             executable_name = executable_name + '.exe'
         for path in os.getenv('PATH').split(os.path.pathsep):
             if os.path.exists(os.path.join(path, executable_name)):
-                found += [path] 
+                found += [path]
         if not found:
             raise Warning('%s could not be found. Please supply the path to the %s executable.' % (self.name, executable_name))
         else:
-            executable = os.path.join(found[0], executable_name) 
+            executable = os.path.join(found[0], executable_name)
             if len(found) == 1:
                 print('Using %s' % executable)
             else:
 
     def __eq__(self, other):
         return type(self) == type(other) and self.path == other.path and self.name == other.name and self.version == other.version and self.options == other.options
-    
+
     def __ne__(self, other):
         return not self.__eq__(other)
-    
+
     def __getstate__(self):
         return {'path': self.path, 'version': self.version, 'options': self.options, 'name': self.name}
-    
+
     def __setstate__(self, d):
         self.__dict__ = d
-    
+
     @staticmethod
     def write_parameters(parameters, filebasename):
         filename = parameters.save(filebasename, add_extension=True)
     name = "NEST"
     default_executable_name = 'nest'
     requires_script = True
-    
+
 
 class GENESISSimulator(Executable):
     name = "GENESIS"
 registered_executables = {}
 registered_extensions = {}
 
-    
+
 def register_executable(cls, name, executables, extensions):
     """Register a new subclass of Executable that can be returned by get_executable()."""
     assert issubclass(cls, Executable)
     for ext in extensions:
         registered_extensions[ext] = cls
 
-    
+
 register_executable(NEURONSimulator, 'NEURON', ('nrniv', 'nrngui'), ('.hoc', '.oc'))
 register_executable(PythonExecutable, 'Python', ('python', 'python2', 'python3',
                                                  'python2.5', 'python2.6', 'python2.7',
         else:
             raise Exception("Extension not recognized.")
     else:
-        raise Exception('Either path or script_file must be specified')  
+        raise Exception('Either path or script_file must be specified')
     return program

sumatra/projects.py

 """
 
 import os
-import sys
 import re
 try:
     import cPickle as pickle
 import django
 import sqlite3
 import time
+import shutil
+from datetime import datetime
 from sumatra.records import Record
 from sumatra import programs, datastore
 from sumatra.formatting import get_formatter, get_diff_formatter
 DEFAULT_PROJECT_FILE = "project"
 
 LABEL_GENERATORS = {
-    'timestamp': lambda: None, # this is the default, implemented in the Record class
+    'timestamp': lambda: None,  # this is the default, implemented in the Record class
     'uuid': lambda: str(uuid.uuid4()).split('-')[-1]
 }
 
 
-def _remove_left_margin(s): # replace this by textwrap.dedent?
+def _remove_left_margin(s):  # replace this by textwrap.dedent?
     lines = s.strip().split('\n')
     return "\n".join(line.strip() for line in lines)
 
+
 def _get_project_file(path):
     return os.path.join(path, ".smt", DEFAULT_PROJECT_FILE)
 
         else:
             raise ValueError("Invalid project name. Names may only contain letters, numbers, spaces and hyphens")
         self.default_executable = default_executable
-        self.default_repository = default_repository # maybe we should be storing the working copy instead, as this has a ref to the repository anyway
+        self.default_repository = default_repository  # maybe we should be storing the working copy instead, as this has a ref to the repository anyway
         self.default_main_file = default_main_file
         self.default_launch_mode = default_launch_mode
         if data_store == 'default':
             data_store = datastore.FileSystemDataStore(None)
-        self.data_store = data_store # a data store object
+        self.data_store = data_store  # a data store object
         self.input_datastore = input_datastore or self.data_store
         if record_store == 'default':
             record_store = DefaultRecordStore(os.path.abspath(".smt/records"))
         self.description = description
         self.data_label = data_label
         self.label_generator = label_generator
-        self.timestamp_format = timestamp_format        
+        self.timestamp_format = timestamp_format
         self.sumatra_version = sumatra.__version__
         self.allow_command_line_parameters = allow_command_line_parameters
-        self._most_recent = None            
+        self._most_recent = None
         self.save()
         print("Sumatra project successfully set up")
-        
+
     def __set_data_label(self, value):
         assert value in (None, 'parameters', 'cmdline')
         self._data_label = value
-        
+
     def __get_data_label(self):
         return self._data_label
     data_label = property(fset=__set_data_label, fget=__get_data_label)
-    
+
     def save(self):
         """Save state to some form of persistent storage. (file, database)."""
         state = {}
                     state[name][key] = value
             else:
                 state[name] = attr
-        f = open(_get_project_file(self.path), 'w') # should check if file exists?
+        f = open(_get_project_file(self.path), 'w')  # should check if file exists?
         json.dump(state, f, indent=2)
         f.close()
-    
+
     def info(self):
         """Show some basic information about the project."""
         template = """
         Sumatra version     : %(sumatra_version)s
         """
         return _remove_left_margin(template % self.__dict__)
-    
+
     def new_record(self, parameters={}, input_data=[], script_args="",
                    executable='default', repository='default',
                    main_file='default', version='current', launch_mode='default',
                    label=None, reason=None, timestamp_format='default'):
         logger.debug("Creating new record")
-        if executable == 'default':
-            executable = deepcopy(self.default_executable)           
-        if repository == 'default':
+        if executable is 'default':
+            executable = deepcopy(self.default_executable)
+        if repository is 'default':
             repository = deepcopy(self.default_repository)
-        if main_file == 'default':
+        if main_file is 'default':
             main_file = self.default_main_file
-        if launch_mode == 'default':
+        if launch_mode is 'default':
             launch_mode = deepcopy(self.default_launch_mode)
-        if timestamp_format == 'default':
+        if timestamp_format is 'default':
             timestamp_format = self.timestamp_format
         working_copy = repository.get_working_copy()
         version, diff = self.update_code(working_copy, version)
         if label is None:
             label = LABEL_GENERATORS[self.label_generator]()
         record = Record(executable, repository, main_file, version, launch_mode,
-                        self.data_store, parameters, input_data, script_args, 
+                        self.data_store, parameters, input_data, script_args,
                         label=label, reason=reason, diff=diff,
                         on_changed=self.on_changed,
                         input_datastore=self.input_datastore,
         if not isinstance(executable, programs.MatlabExecutable):
             record.register(working_copy)
         return record
-    
+
     def launch(self, parameters={}, input_data=[], script_args="",
                executable='default', repository='default', main_file='default',
-               version='current', launch_mode='default', label=None, reason=None, 
+               version='current', launch_mode='default', label=None, reason=None,
                timestamp_format='default', repeats=None):
         """Launch a new simulation or analysis."""
         record = self.new_record(parameters, input_data, script_args,
                                  executable, repository, main_file, version,
-                                 launch_mode, label, reason, timestamp_format) 
+                                 launch_mode, label, reason, timestamp_format)
         record.run(with_label=self.data_label)
         if 'matlab' in record.executable.name.lower():
             record.register(record.repository.get_working_copy())
         self.add_record(record)
         self.save()
         return record.label
-    
+
     def update_code(self, working_copy, version='current'):
         """Check if the working copy has modifications and prompt to commit or revert them."""
         # we really need to extend this to the dependencies, but we need to take extra special care that the
         if version == 'current' or version == working_copy.current_version:
             if changed:
                 if self.on_changed == "error":
-                    raise UncommittedModificationsError("Code has changed, please commit your changes")    
+                    raise UncommittedModificationsError("Code has changed, please commit your changes")
                 elif self.on_changed == "store-diff":
                     diff = working_copy.diff()
                 else:
             working_copy.use_version(version)
         version = working_copy.current_version()
         return version, diff
-    
+
     def add_record(self, record):
         """Add a simulation or analysis record."""
         success = False
                 self.record_store.save(self.name, record)
                 success = True
                 self._most_recent = record.label
-            except django.db.utils.DatabaseError, sqlite3.OperationalError:
+            except (django.db.utils.DatabaseError, sqlite3.OperationalError):
                 print "Failed to save record due to database error. Trying again in {} seconds. (Attempt {}/{})".format(sleep_seconds, cnt, max_tries)
                 time.sleep(sleep_seconds)
                 cnt += 1
         if cnt == max_tries:
             print "Reached maximum number of attempts to save record. Aborting."
-    
+
     def get_record(self, label):
         """Search for a record with the supplied label and return it if found.
            Otherwise return None."""
         return self.record_store.get(self.name, label)
-    
+
     def delete_record(self, label, delete_data=False):
         """Delete a record. Return 1 if the record is found.
            Otherwise return 0."""
             self.get_record(label).delete_data()
         self.record_store.delete(self.name, label)
         self._most_recent = self.record_store.most_recent(self.name)
-    
+
     def delete_by_tag(self, tag, delete_data=False):
         """Delete all records with a given tag. Return the number of records deleted."""
         if delete_data:
         n = self.record_store.delete_by_tag(self.name, tag)
         self._most_recent = self.record_store.most_recent(self.name)
         return n
-    
+
     def format_records(self, format='text', mode='short', tags=None, reverse=False):
         records = self.record_store.list(self.name, tags)
         if reverse:
             records.reverse()
         formatter = get_formatter(format)(records, project=self, tags=tags)
-        return formatter.format(mode) 
+        return formatter.format(mode)
 
     def most_recent(self):
         try:
         try:
             record = self.record_store.get(self.name, label)
         except Exception as e:
-            raise Exception("%s. label=<%s>" % (e,label))
+            raise Exception("%s. label=<%s>" % (e, label))
         record.outcome = comment
         self.record_store.save(self.name, record)
-        
+
     def add_tag(self, label, tag):
         record = self.record_store.get(self.name, label)
         record.tags.add(tag)
         self.record_store.save(self.name, record)
-    
+
     def remove_tag(self, label, tag):
         record = self.record_store.get(self.name, label)
         record.tags.remove(tag)
         self.record_store.save(self.name, record)
-    
+
     def compare(self, label1, label2, ignore_mimetypes=[], ignore_filenames=[]):
         record1 = self.record_store.get(self.name, label1)
         record2 = self.record_store.get(self.name, label2)
-        return record1.difference(record2, ignore_mimetypes, ignore_filenames)        
-    
+        return record1.difference(record2, ignore_mimetypes, ignore_filenames)
+
     def show_diff(self, label1, label2, mode='short', ignore_mimetypes=[], ignore_filenames=[]):
         diff = self.compare(label1, label2, ignore_mimetypes, ignore_filenames)
         formatter = get_diff_formatter()(diff)
 
     def export(self):
         # copy the project data
-        import shutil
         shutil.copy(".smt/project", ".smt/project_export.json")
         # export the record data
         f = open(".smt/records_export.json", 'w')
         f.write(self.record_store.export(self.name))
         f.close()
-    
+
     def repeat(self, original_label, new_label=None):
         if original_label == 'last':
             tmp = self.most_recent()
         else:
             tmp = self.get_record(original_label)
         original = deepcopy(tmp)
-        if hasattr(tmp.parameters, '_url'): # for some reason, _url is not copied.
-            original.parameters._url = tmp.parameters._url # this is a hackish solution - needs fixed properly
+        if hasattr(tmp.parameters, '_url'):  # for some reason, _url is not copied.
+            original.parameters._url = tmp.parameters._url  # this is a hackish solution - needs fixed properly
         try:
             working_copy = get_working_copy()
         except VersionControlError:
         return new_label, original.label
 
     def backup(self):
-        """        
+        """
         Create a new backup directory in the same location as the
         project directory and copy the contents of the project
         directory into the backup directory. Uses `_get_project_file`
         to extract the path to the project directory.
-        
+
         :return:
           - `backup_dir`: the directory used for the backup
 
         """
-        import shutil
-        from datetime import datetime
         smt_dir = os.path.split(_get_project_file(self.path))[0]
-        backup_dir = smt_dir + "_backup_%s" % datetime.now().strftime(TIMESTAMP_FORMAT)    
+        backup_dir = smt_dir + "_backup_%s" % datetime.now().strftime(TIMESTAMP_FORMAT)
         shutil.copytree(smt_dir, backup_dir)
         return backup_dir
 
+    def change_record_store(self, new_store):
+        """
+        Change the record store that is used by this project.
+        """
+        self.backup()
+        old_store = self.record_store
+        new_store.sync(old_store, self.name)
+        self.record_store = new_store
+
+
 def _load_project_from_json(path):
     f = open(_get_project_file(path), 'r')
     data = json.load(f)
     prj.path = path
     for key, value in data.items():
         if isinstance(value, dict) and "type" in value:
-            parts = str(value["type"]).split(".") # make sure not unicode, see http://stackoverflow.com/questions/1971356/haystack-whoosh-index-generation-error/2683624#2683624
+            parts = str(value["type"]).split(".")  # make sure not unicode, see http://stackoverflow.com/questions/1971356/haystack-whoosh-index-generation-error/2683624#2683624
             module_name = ".".join(parts[:-1])
             class_name = parts[-1]
-            _temp = __import__(module_name, globals(), locals(), [class_name], -1) # from <module_name> import <class_name>
+            _temp = __import__(module_name, globals(), locals(), [class_name], -1)  # from <module_name> import <class_name>
             cls = getattr(_temp, class_name)
             args = {}
-            for k,v in value.items():
+            for k, v in value.items():
                 if k != 'type':
-                    args[str(k)] = v # need to use str() as json module uses all unicode
+                    args[str(k)] = v  # need to use str() as json module uses all unicode
             setattr(prj, key, cls(**args))
         else:
             setattr(prj, key, value)
     return prj
 
+
 def _load_project_from_pickle(path):
     # earlier versions of Sumatra saved Projects using pickle
     f = open(_get_project_file(path), 'r')
     f.close()
     return prj
 
+
 def load_project(path=None):
     """
     Read project from directory passed as the argument and return Project
         if p == oldp:
             raise IOError("No Sumatra project exists in the current directory or above it.")
     mimetypes.init([os.path.join(p, ".smt", "mime.types")])
-    #try:
+    # try:
     prj = _load_project_from_json(p)
-    #except Exception:
+    # except Exception:
     #    prj = _load_project_from_pickle(p)
     return prj

sumatra/publishing/latex/includefigure.py

 except ImportError:
     from configparser import SafeConfigParser
 from sumatra.publishing.utils import determine_project, determine_record_store, \
-                                     determine_project_name, get_image, \
-                                     record_link_url, get_record_label_and_image_path
+    determine_project_name, get_image, \
+    record_link_url, get_record_label_and_image_path
 
 
 logger = logging.getLogger("Sumatra")
     # determine project and record store to use
     prj = determine_project(sumatra_options)
     record_store = determine_record_store(prj, sumatra_options)
-    project_name = determine_project_name(prj, sumatra_options)    
+    project_name = determine_project_name(prj, sumatra_options)
     logger.info("Project name: %s", project_name)
     logger.info("Record store: %s", record_store)
-    
+
     # get record, obtain image uri
     record_label, image_path = get_record_label_and_image_path(sumatra_options['label'])
     record = record_store.get(project_name, record_label)
     if graphics_options:
         include_graphics_cmd += "[%s]" % ",".join("%s=%s" % item for item in graphics_options.items())
     include_graphics_cmd += "{%s}" % local_filename
-    
+
     # if record_store is web-accessible, wrap the image in a hyperlink
     if hasattr(record_store, 'server_url'):
         target = record_link_url(record_store.server_url, project_name, record_label)
         cmd = "\href{%s}{%s}" % (target, include_graphics_cmd)
     else:
         cmd = include_graphics_cmd
-        
+
     print(cmd)
 
 
 
 
 if __name__ == '__main__':
-    generate_latex_command(*read_config(sys.argv[1]))
+    generate_latex_command(*read_config(sys.argv[1]))

sumatra/publishing/sphinxext/__init__.py

 
 from .sumatra_rst import SumatraImage, smt_link_role
 
-def setup(app):    
+
+def setup(app):
     app.add_config_value('sumatra_record_store', None, 'env')
     app.add_config_value('sumatra_project', None, 'env')
     app.add_config_value('sumatra_link_icon', 'icon_info.png', 'html')
     app.add_directive('smtimage', SumatraImage)
     app.add_role('smtlink', smt_link_role)
-    #app.connect('env-purge-doc', purge_smtimages)  # do we need this?
+    # app.connect('env-purge-doc', purge_smtimages)  # do we need this?

sumatra/publishing/sphinxext/sumatra_rst.py

 from sumatra.projects import load_project
 from sumatra.recordstore import get_record_store
 from sumatra.publishing.utils import determine_project, determine_record_store, \
-                                     determine_project_name, record_link_url, \
-                                     get_image, get_record_label_and_image_path
+    determine_project_name, record_link_url, \
+    get_image, get_record_label_and_image_path
 import os.path
 
 
 roles.register_local_role('smtlink', smt_link_role)
 
 
-
 def build_options(global_settings, local_options):
     if hasattr(global_settings, 'env'):  # using sphinx
         config = global_settings.env.config
                    'record_store': directives.unchanged,
                    'digest': directives.unchanged,
                    }
-    
+
     def run(self):
         if 'align' in self.options:
             if isinstance(self.state, states.SubstitutionDef):
         # determine which record store to use
         prj = determine_project(sumatra_options)
         record_store = determine_record_store(prj, sumatra_options, self.error)
-        
+
         # determine the project (short) name
         project_name = determine_project_name(prj, sumatra_options, self.error)
-        
+
         record_label, image_path = get_record_label_and_image_path(self.arguments[0])
         record = record_store.get(project_name, record_label)
         image = get_image(record, image_path, self.options, self.error)  # automatically checks digest
             self.options['target'] = record_link_url(record_store.server_url, project_name, record_label)
         if not 'alt' in self.options:
             self.options['alt'] = "Data file generated by computation %s" % record_label
-        
+
         # --------------------------------------------------------------------
         # following code from Image.run()
         self.options['uri'] = reference

sumatra/publishing/utils.py

 
 _cache = {}
 
+
 def mkdir(path):
     try:
         os.makedirs(path)
     except OSError as exc:
         if exc.errno == errno.EEXIST and os.path.isdir(path):
             pass
-        else: raise
+        else:
+            raise
 
 
 class cache(object):
     """Cache decorator"""
     global _cache
+
     def __init__(self, func):
         self.func = func
+
     def __call__(self, options):
         assert isinstance(options, dict)
         if 'project' in options and 'record_store' in options:
     if '?' in ref:
         parts = ref.split("?")
         if len(parts) == 2:
-            record_label, image_path =  parts
+            record_label, image_path = parts
             image_path = "?" + image_path
         else:
             raise Exception("Invalid record/path query")
     elif ':' in ref:
         parts = ref.split(":")
         if len(parts) == 2:
-            record_label, image_path =  parts
+            record_label, image_path = parts
         else:
             raise Exception("Invalid record/path reference")
     else:
-        record_label, image_path =  ref, None
+        record_label, image_path = ref, None
     return record_label, image_path
 
 
                     break
         if image_key is None:
             raise ValueError("Record %s has no output data file with path %s. Valid paths are: %s" % (
-                    record.label, image_path, ", ".join(key.path for key in record.output_data)))
+                record.label, image_path, ", ".join(key.path for key in record.output_data)))
     assert isinstance(image_key, DataKey)
     # check expected digest, if supplied, against key.digest
     if ('digest' in sumatra_options
-        and sumatra_options['digest'] != image_key.digest):
+            and sumatra_options['digest'] != image_key.digest):
         raise err('Digests do not match')
     return record.datastore.get_data_item(image_key)  # checks key.digest against file contents
 

sumatra/recordstore/__init__.py

     have_http = True
 except ImportError:
     have_http = False
-    
+
 if have_http:
     from .http_store import HttpRecordStore
-    
+
 DefaultRecordStore = have_django and DjangoRecordStore or ShelveRecordStore
 
 

sumatra/recordstore/base.py

     """
     Base class for record store implementations.
     """
-    
+
     def list_projects(self):
         """Return the names of all projects that have records in this store."""
         raise NotImplementedError
     def save(self, project_name, record):
         """Store the given record under the given project."""
         raise NotImplementedError
-        
+
     def get(self, project_name, label):
         """Retrieve the record with the given label from the given project."""
         raise NotImplementedError
-    
+
     def list(self, project_name, tags=None):
         """
         Return a list of records for the given project.
-        
+
         If *tags* is not provided, list all records, otherwise list only records
         that have been tagged with one or more of the tags.
         """
         raise NotImplementedError
-    
+
     def labels(self, project_name):
         """Return the labels of all records in the given project."""
         raise NotImplementedError
-    
+
     def delete(self, project_name, label):
         """Delete the record with the given label from the given project."""
         raise NotImplementedError
     def delete_all(self):
         """Delete all records from the store."""
         raise NotImplementedError
-        
+
     def delete_by_tag(self, project_name, tag):
         """Delete all records from the given project that have been tagged with the given tag."""
         raise NotImplementedError
-        
+
     def most_recent(self, project_name):
         """Return the most recent record from the given project."""
         raise NotImplementedError
-    
+
     def export(self, project_name, indent=2):
         """Export store contents as JSON."""
         return "[" + ",\n".join(serialization.encode_record(record, indent=indent)
         """
         Synchronize two record stores so that they contain the same records for
         a given project.
-        
+
         Where the two stores have the same label (within a project) for
         different records, those records will not be synced. The method
         returns a list of non-synchronizable records (empty if the sync worked
         for label in only_in_other:
             self.save(project_name, other.get(project_name, label))
         return non_synchronizable
-    
+
     def sync_all(self, other):
         """Synchronize all records from all projects between two record stores."""
         all_projects = set(self.list_projects()).union(other.list_projects())
         for project_name in all_projects:
             self.sync(other, project_name)
-            
+
     def has_project(self, project_name):
         """Does the store contain any records for the given project?"""
         raise NotImplementedError
-    
+
     def list_projects(self):
         """Return the names of all projects that have records in the store."""
         raise NotImplementedError
     def update(self, project_name, field, value, tags=None):
         """
         Modify the records for a given project.
-        
+
         Arguments:
           *field*: the name of a record attribute, e.g. "datastore.root"
           *value*: 
                 obj = getattr(obj, part)
             setattr(obj, parts[-1], value)
             self.save(project_name, record)
-        
-    
+
 
 class RecordStoreAccessError(OSError):
     pass

sumatra/recordstore/django_store/__init__.py

 # it, but that seems to mess with Django's internals.
 imp.find_module("tagging")
 
+
 class DjangoConfiguration(object):
     """
     To allow multiple DjangoRecordStore instances to exist at the same
     steps, only actually doing the Django configuration step at the last
     possible moment.
     """
-   
+
     def __init__(self):
         self._settings = {
             'DEBUG': True,
             'DATABASES': {},
             'INSTALLED_APPS': ('sumatra.recordstore.django_store',
-                               'django.contrib.contenttypes', # needed for tagging
+                               'django.contrib.contenttypes',  # needed for tagging
                                'tagging'),
         }
         self._n_databases = 0
             db['ENGINE'] = 'django.db.backends.sqlite3'
             db['NAME'] = os.path.abspath(parse_result.path)
         return db
-        
+
     def add_database(self, uri):
         """
         Add a database to the configuration and return a label. If the database
         already exists in the configuration, just return the existing label.
         """
         db = self.uri_to_db(uri)
-        
+
         if self.contains_database(db):
             for key, db_tmp in self._settings['DATABASES'].items():
                 if db == db_tmp:
             self._settings['DATABASES'][label] = db
             self._n_databases += 1
         return label
-    
+
     def contains_database(self, db):
         return db in [db_tmp for label, db_tmp in self._settings['DATABASES'].items()]
-    
+
     def _create_databases(self):
         for label, db in self._settings['DATABASES'].items():
             if 'sqlite' in db['ENGINE']:
     the Django object-relational mapper (ORM), which means that any database
     supported by Django could in principle be used, although for now we assume
     SQLite.
-    
+
     This record store is needed for the *smtweb* interface.
     """
-    
+
     def __init__(self, db_file='.smt/records'):
         self._db_label = db_config.add_database(db_file)
         self._db_file = db_file
-                
+
     def __str__(self):
-        return "Relational database record store using the Django ORM (database file=%s)" % self._db_file
-        
+        return "Django (%s)" % self._db_file
+
     def __getstate__(self):
         return {'db_file': self._db_file}
-    
+
     def __setstate__(self, state):
         self._db_file = state['db_file']
         try:
             self._db_label = db_config.add_database(self._db_file)
         except:
-            pass        
-    
+            pass
+
     def _get_models(self):
         if not db_config.configured:
             db_config.configure()
         import models
         return models
-    
+
     def _switch_db(self, db_file):
         # for testing
         global db_config
         db_config = DjangoConfiguration()
         if db_file:
             self.__init__(db_file)
-    
+
     @property
     def _manager(self):
         models = self._get_models()
         return models.Record.objects.using(self._db_label)
-    
+
     def _get_db_record(self, project_name, record):
         models = self._get_models()
         db_project = self._get_db_project(project_name)
             db_record = models.Record(label=record.label, project=db_project)
             db_record._state.db = self._db_label
         return db_record
-    
+
     def _get_db_project(self, project_name):
         models = self._get_models()
         try:
         db_obj, created = cls.objects.get_or_create_from_sumatra_object(obj, using=self._db_label)
         if created:
             db_obj.save(using=self._db_label)
-        return db_obj        
-    
+        return db_obj
+
     def list_projects(self):
         models = self._get_models()
         return [project.id for project in models.Project.objects.using(self._db_label).all()]
-    
+
     def save(self, project_name, record):
         db_record = self._get_db_record(project_name, record)
         for attr in 'reason', 'duration', 'outcome', 'main_file', 'version', 'timestamp':
         db_record.tags = ",".join(record.tags)
         db_record.stdout_stderr = record.stdout_stderr
         # should perhaps check here for any orphan Tags, i.e., those that are no longer associated with any records, and delete them
-        db_record.save() # need to save before using many-to-many relationship
+        db_record.save()  # need to save before using many-to-many relationship
         chunk_size = 900  # SQLite has problems with inserts >= ca. 1000, so for safety we split it into chunks
         for i in xrange(0, len(record.input_data), chunk_size):
-            db_keys = (self._get_db_obj('DataKey', key) for key in record.input_data[i:i+chunk_size])
+            db_keys = (self._get_db_obj('DataKey', key) for key in record.input_data[i:i + chunk_size])
             db_record.input_data.add(*db_keys)
         for i in xrange(0, len(record.output_data), chunk_size):
-            db_keys = (self._get_db_obj('DataKey', key) for key in record.output_data[i:i+chunk_size])
+            db_keys = (self._get_db_obj('DataKey', key) for key in record.output_data[i:i + chunk_size])
             db_record.output_data.add(*db_keys)
         if record.dependencies:
             for dep in record.dependencies:
-                #print "Adding dependency %s to db_record" % dep
+                # print "Adding dependency %s to db_record" % dep
                 db_record.dependencies.add(self._get_db_obj('Dependency', dep))
         for pi in record.platforms:
             db_record.platforms.add(self._get_db_obj('PlatformInformation', pi))
         db_record.diff = record.diff
         db_record.repeats = record.repeats
         db_record.save(using=self._db_label)
-        
+
     def get(self, project_name, label):
         models = self._get_models()
         try:
         except models.Record.DoesNotExist:
             raise KeyError(label)
         return db_record.to_sumatra()
-    
+
     def list(self, project_name, tags=None):
         db_records = self._manager.filter(project__id=project_name).select_related()
         if tags:
 
     def labels(self, project_name):
         return [record.label for record in self._manager.filter(project__id=project_name)]
-    
+
     def delete(self, project_name, label):
         db_record = self._manager.get(label=label, project__id=project_name)
         db_record.delete()
-        
+
     def delete_by_tag(self, project_name, tag):
         db_records = self._manager.filter(project__id=project_name, tags__contains=tag)
         n = db_records.count()
         for db_record in db_records:
             db_record.delete()
         return n
-    
+
     def most_recent(self, project_name):
         models = self._get_models()
         return self._manager.filter(project__id=project_name).latest('timestamp').label
-    
+
     def delete_all(self):
         """Delete everything from the database."""
         management.call_command('flush', database=self._db_label,
                                 interactive=False, verbosity=0)
-    
+
     def _dump(self, indent=2):
         """
         Dump the database contents to a JSON-encoded string

sumatra/recordstore/django_store/models.py

 
 
 class SumatraObjectsManager(models.Manager):
-    
+
     def get_or_create_from_sumatra_object(self, obj, using='default'):
         # automatically retrieving the field names is nice, but leads
         # to all the special cases below when we have subclasses that we
                     attributes[name] = str(obj.__getstate__())
                 elif name == 'type':
                     attributes[name] = obj.__class__.__name__
-                elif name in ('content', 'metadata'):    
-                    attributes[name] = str(obj) # ParameterSet, DataKey
+                elif name in ('content', 'metadata'):
+                    attributes[name] = str(obj)  # ParameterSet, DataKey
                 else:
                     raise
 
-        return self.using(using).get_or_create(**attributes)            
-        
+        return self.using(using).get_or_create(**attributes)
+
 
 class BaseModel(models.Model):
     objects = SumatraObjectsManager()
-    
+
     class Meta:
         abstract = True
-    
+
     def field_names(self):
         field_names = self._meta.get_all_field_names()
         field_names.remove('id')
         field_names.remove('record')
         return field_names
-    
+