Commits

Jed Brown committed 179860b Merge

Add 'config/BuildSystem/' from commit '94fb7d1092a023a10eba9cdace78f25709a190fd'

git-subtree-dir: config/BuildSystem
git-subtree-mainline: d24caab14e29d890d97b19b2c7aadd37e4d931ee
git-subtree-split: 94fb7d1092a023a10eba9cdace78f25709a190fd

Comments (0)

Files changed (118)

config/BuildSystem/.hgignore

+syntax: glob
+*.pyc
+*~
+*.orig
+RDict.db
+RDict.loc
+RDict.log
+
+

config/BuildSystem/.hgtags

+6167c5399c7baec3559e723258883caf1e1783c9 release-2.3.2
+08d1a842beeb60886d02ca2e9fb49deac27b64cc release-2.3.3
+e2ee09c1d244ce7c7009364bec161d3a4a71a10b release-3.0.0
+e7987146764be982e7ab2822301e836c54438a20 release-3.1

config/BuildSystem/RDict.py

+#!/usr/bin/env python
+'''A remote dictionary server
+
+    RDict is a typed, hierarchical, persistent dictionary intended to manage
+    all arguments or options for a program. The interface remains exactly the
+    same as dict, but the storage is more complicated.
+
+    Argument typing is handled by wrapping all values stored in the dictionary
+    with nargs.Arg or a subclass. A user can call setType() to set the type of
+    an argument without any value being present. Whenever __getitem__() or
+    __setitem__() is called, values are extracted or replaced in the wrapper.
+    These wrappers can be accessed directly using getType(), setType(), and
+    types().
+
+    Hierarchy is allowed using a single "parent" dictionary. All operations
+    cascade to the parent. For instance, the length of the dictionary is the
+    number of local keys plus the number of keys in the parent, and its
+    parent, etc. Also, a dictionary need not have a parent. If a key does not
+    appear in the local dicitonary, the call if passed to the parent. However,
+    in this case we see that local keys can shadow those in a parent.
+    Communication with the parent is handled using sockets, with the parent
+    being a server and the interactive dictionary a client.
+
+    The default persistence mechanism is a pickle file, RDict.db, written
+    whenever an argument is changed locally. A timer thread is created after
+    an initial change, so that many rapid changes do not cause many writes.
+    Each dictionary only saves its local entries, so all parents also
+    separately save data in different RDict.db files. Each time a dictionary
+    is created, the current directory is searched for an RDict.db file, and
+    if found the contents are loaded into the dictionary.
+
+    This script also provides some default actions:
+
+      - server [parent]
+        Starts a server in the current directory with an optional parent. This
+        server will accept socket connections from other dictionaries and act
+        as a parent.
+
+      - client [parent]
+        Creates a dictionary in the current directory with an optional parent
+        and lists the contents. Notice that the contents may come from either
+        an RDict.db file in the current directory, or from the parent.
+
+      - clear [parent]
+        Creates a dictionary in the current directory with an optional parent
+        and clears the contents. Notice that this will also clear the parent.
+
+      - insert <parent> <key> <value>
+        Creates a dictionary in the current directory with a parent, and inserts
+        the key-value pair. If "parent" is "None", no parent is assigned.
+
+      - remove <parent> <key>
+        Creates a dictionary in the current directory with a parent, and removes
+        the given key. If "parent" is "None", no parent is assigned.
+'''
+try:
+  import project          # This is necessary for us to create Project objects on load
+  import build.buildGraph # This is necessary for us to create BuildGraph objects on load
+except ImportError:
+  pass
+import nargs
+
+import cPickle
+import os
+import sys
+useThreads = nargs.Arg.findArgument('useThreads', sys.argv[1:])
+if useThreads is None:
+  useThreads = 1
+else:
+  useThreads = int(useThreads)
+
+class RDict(dict):
+  '''An RDict is a typed dictionary, which may be hierarchically composed. All elements derive from the
+Arg class, which wraps the usual value.'''
+  # The server will self-shutdown after this many seconds
+  shutdownDelay = 60*60*5
+
+  def __init__(self, parentAddr = None, parentDirectory = None, load = 1, autoShutdown = 1, readonly = False):
+    import atexit
+    import time
+    import xdrlib
+
+    self.logFile         = None
+    self.setupLogFile()
+    self.target          = ['default']
+    self.parent          = None
+    self.saveTimer       = None
+    self.shutdownTimer   = None
+    self.lastAccess      = time.time()
+    self.saveFilename    = 'RDict.db'
+    self.addrFilename    = 'RDict.loc'
+    self.parentAddr      = parentAddr
+    self.isServer        = 0
+    self.readonly        = readonly
+    self.parentDirectory = parentDirectory
+    self.packer          = xdrlib.Packer()
+    self.unpacker        = xdrlib.Unpacker('')
+    self.stopCmd         = cPickle.dumps(('stop',))
+    self.writeLogLine('Greetings')
+    self.connectParent(self.parentAddr, self.parentDirectory)
+    if load: self.load()
+    if autoShutdown and useThreads:
+      atexit.register(self.shutdown)
+    self.writeLogLine('SERVER: Last access '+str(self.lastAccess))
+    return
+
+  def __getstate__(self):
+    '''Remove any parent socket object, the XDR translators, and the log file from the dictionary before pickling'''
+    self.writeLogLine('Pickling RDict')
+    d = self.__dict__.copy()
+    if 'parent'    in d: del d['parent']
+    if 'saveTimer' in d: del d['saveTimer']
+    if '_setCommandLine' in d: del d['_setCommandLine']
+    del d['packer']
+    del d['unpacker']
+    del d['logFile']
+    return d
+
+  def __setstate__(self, d):
+    '''Reconnect the parent socket object, recreate the XDR translators and reopen the log file after unpickling'''
+    self.logFile  = file('RDict.log', 'a')
+    self.writeLogLine('Unpickling RDict')
+    self.__dict__.update(d)
+    import xdrlib
+    self.packer   = xdrlib.Packer()
+    self.unpacker = xdrlib.Unpacker('')
+    self.connectParent(self.parentAddr, self.parentDirectory)
+    return
+
+  def setupLogFile(self, filename = 'RDict.log'):
+    if not self.logFile is None:
+      self.logFile.close()
+    if os.path.isfile(filename) and os.stat(filename).st_size > 10*1024*1024:
+      if os.path.isfile(filename+'.bkp'):
+        os.remove(filename+'.bkp')
+      os.rename(filename, filename+'.bkp')
+      self.logFile = file(filename, 'w')
+    else:
+      self.logFile = file(filename, 'a')
+    return
+
+  def writeLogLine(self, message):
+    '''Writes the message to the log along with the current time'''
+    import time
+    self.logFile.write('('+str(os.getpid())+')('+str(id(self))+')'+message+' ['+time.asctime(time.localtime())+']\n')
+    self.logFile.flush()
+    return
+
+  def __len__(self):
+    '''Returns the length of both the local and parent dictionaries'''
+    length = dict.__len__(self)
+    if not self.parent is None:
+      length = length + self.send()
+    return length
+
+  def getType(self, key):
+    '''Checks for the key locally, and if not found consults the parent. Returns the Arg object or None if not found.'''
+    if dict.has_key(self, key):
+      self.writeLogLine('getType: Getting local type for '+key+' '+str(dict.__getitem__(self, key)))
+      return dict.__getitem__(self, key)
+    elif not self.parent is None:
+      return self.send(key)
+    return None
+
+  def __getitem__(self, key):
+    '''Checks for the key locally, and if not found consults the parent. Returns the value of the Arg.
+       - If the value has not been set, the user will be prompted for input'''
+    if dict.has_key(self, key):
+      self.writeLogLine('__getitem__: '+key+' has local type')
+      pass
+    elif not self.parent is None:
+      self.writeLogLine('__getitem__: Checking parent value')
+      if self.send(key, operation = 'has_key'):
+        self.writeLogLine('__getitem__: Parent has value')
+        return self.send(key)
+      else:
+        self.writeLogLine('__getitem__: Checking parent type')
+        arg = self.send(key, operation = 'getType')
+        if not arg:
+          self.writeLogLine('__getitem__: Parent has no type')
+          arg = nargs.Arg(key)
+        try:
+          value = arg.getValue()
+        except AttributeError, e:
+          self.writeLogLine('__getitem__: Parent had invalid entry: '+str(e))
+          arg   = nargs.Arg(key)
+          value = arg.getValue()
+        self.writeLogLine('__getitem__: Setting parent value '+str(value))
+        self.send(key, value, operation = '__setitem__')
+        return value
+    else:
+      self.writeLogLine('__getitem__: Setting local type for '+key)
+      dict.__setitem__(self, key, nargs.Arg(key))
+      self.save()
+    self.writeLogLine('__getitem__: Setting local value for '+key)
+    return dict.__getitem__(self, key).getValue()
+
+  def setType(self, key, value, forceLocal = 0):
+    '''Checks for the key locally, and if not found consults the parent. Sets the type for this key.
+       - If a value for the key already exists, it is converted to the new type'''
+    if not isinstance(value, nargs.Arg):
+      raise TypeError('An argument type must be a subclass of Arg')
+    value.setKey(key)
+    if forceLocal or self.parent is None or dict.has_key(self, key):
+      if dict.has_key(self, key):
+        v = dict.__getitem__(self, key)
+        if v.isValueSet():
+          try:
+            value.setValue(v.getValue())
+          except TypeError: pass
+      dict.__setitem__(self, key, value)
+      self.save()
+    else:
+      return self.send(key, value)
+    return
+
+  def __setitem__(self, key, value):
+    '''Checks for the key locally, and if not found consults the parent. Sets the value of the Arg.'''
+    if not dict.has_key(self, key):
+      if not self.parent is None:
+        return self.send(key, value)
+      else:
+        dict.__setitem__(self, key, nargs.Arg(key))
+    dict.__getitem__(self, key).setValue(value)
+    self.writeLogLine('__setitem__: Set value for '+key+' to '+str(dict.__getitem__(self, key)))
+    self.save()
+    return
+
+  def __delitem__(self, key):
+    '''Checks for the key locally, and if not found consults the parent. Deletes the Arg completely.'''
+    if dict.has_key(self, key):
+      dict.__delitem__(self, key)
+      self.save()
+    elif not self.parent is None:
+      self.send(key)
+    return
+
+  def clear(self):
+    '''Clears both the local and parent dictionaries'''
+    if dict.__len__(self):
+      dict.clear(self)
+      self.save()
+    if not self.parent is None:
+      self.send()
+    return
+
+  def __contains__(self, key):
+    '''This method just calls self.has_key(key)'''
+    return self.has_key(key)
+
+  def has_key(self, key):
+    '''Checks for the key locally, and if not found consults the parent. Then checks whether the value has been set'''
+    if dict.has_key(self, key):
+      if dict.__getitem__(self, key).isValueSet():
+        self.writeLogLine('has_key: Have value for '+key)
+      else:
+        self.writeLogLine('has_key: Do not have value for '+key)
+      return dict.__getitem__(self, key).isValueSet()
+    elif not self.parent is None:
+      return self.send(key)
+    return 0
+
+  def get(self, key, default=None):
+    if self.has_key(key):
+      return self.__getitem__(key)
+    else:
+      return default
+
+  def hasType(self, key):
+    '''Checks for the key locally, and if not found consults the parent. Then checks whether the type has been set'''
+    if dict.has_key(self, key):
+      return 1
+    elif not self.parent is None:
+      return self.send(key)
+    return 0
+
+  def items(self):
+    '''Return a list of all accessible items, as (key, value) pairs.'''
+    l = dict.items(self)
+    if not self.parent is None:
+      l.extend(self.send())
+    return l
+
+  def localitems(self):
+    '''Return a list of all the items stored locally, as (key, value) pairs.'''
+    return dict.items(self)
+
+  def keys(self):
+    '''Returns the list of keys in both the local and parent dictionaries'''
+    keyList = filter(lambda key: dict.__getitem__(self, key).isValueSet(), dict.keys(self))
+    if not self.parent is None:
+      keyList.extend(self.send())
+    return keyList
+
+  def types(self):
+    '''Returns the list of keys for which types are defined in both the local and parent dictionaries'''
+    keyList = dict.keys(self)
+    if not self.parent is None:
+      keyList.extend(self.send())
+    return keyList
+
+  def update(self, d):
+    '''Update the dictionary with the contents of d'''
+    for k in d:
+      self[k] = d[k]
+    return
+
+  def updateTypes(self, d):
+    '''Update types locally, which is equivalent to the dict.update() method'''
+    return dict.update(self, d)
+
+  def insertArg(self, key, value, arg):
+    '''Insert a (key, value) pair into the dictionary. If key is None, arg is put into the target list.'''
+    if not key is None:
+      self[key] = value
+    else:
+      if not self.target == ['default']:
+        self.target.append(arg)
+      else:
+        self.target = [arg]
+    return
+
+  def insertArgs(self, args):
+    '''Insert some text arguments into the dictionary (list and dictionaries are recognized)'''
+    import UserDict
+
+    if isinstance(args, list):
+      for arg in args:
+        (key, value) = nargs.Arg.parseArgument(arg)
+        self.insertArg(key, value, arg)
+    # Necessary since os.environ is a UserDict
+    elif isinstance(args, dict) or isinstance(args, UserDict.UserDict):
+      for key in args.keys():
+        if isinstance(args[key], str):
+          value = nargs.Arg.parseValue(args[key])
+        else:
+          value = args[key]
+        self.insertArg(key, value, None)
+    elif isinstance(args, str):
+        (key, value) = nargs.Arg.parseArgument(args)
+        self.insertArg(key, value, args)
+    return
+
+  def hasParent(self):
+    '''Return True if this RDict has a parent dictionary'''
+    return not self.parent is None
+
+  def getServerAddr(self, dir):
+    '''Read the server socket address (in pickled form) from a file, usually RDict.loc
+       - If we fail to connect to the server specified in the file, we spawn it using startServer()'''
+    filename = os.path.join(dir, self.addrFilename)
+    if not os.path.exists(filename):
+      self.startServer(filename)
+    if not os.path.exists(filename):
+      raise RuntimeError('Server address file does not exist: '+filename)
+    try:
+      f    = open(filename, 'r')
+      addr = cPickle.load(f)
+      f.close()
+      return addr
+    except Exception, e:
+      self.writeLogLine('CLIENT: Exception during server address determination: '+str(e.__class__)+': '+str(e))
+    raise RuntimeError('Could not get server address in '+filename)
+
+  def writeServerAddr(self, server):
+    '''Write the server socket address (in pickled form) to a file, usually RDict.loc.'''
+    f = file(self.addrFilename, 'w')
+    cPickle.dump(server.server_address, f)
+    f.close()
+    self.writeLogLine('SERVER: Wrote lock file '+os.path.abspath(self.addrFilename))
+    return
+
+  def startServer(self, addrFilename):
+    '''Spawn a new RDict server in the parent directory'''
+    import RDict # Need this to locate server script
+    import sys
+    import time
+    import distutils.sysconfig
+
+    self.writeLogLine('CLIENT: Spawning a new server with lock file '+os.path.abspath(addrFilename))
+    if os.path.exists(addrFilename):
+      os.remove(addrFilename)
+    oldDir      = os.getcwd()
+    source      = os.path.join(os.path.dirname(os.path.abspath(sys.modules['RDict'].__file__)), 'RDict.py')
+    interpreter = os.path.join(distutils.sysconfig.get_config_var('BINDIR'), distutils.sysconfig.get_config_var('PYTHON'))
+    if not os.path.isfile(interpreter):
+      interpreter = 'python'
+    os.chdir(os.path.dirname(addrFilename))
+    self.writeLogLine('CLIENT: Executing '+interpreter+' '+source+' server"')
+    try:
+      os.spawnvp(os.P_NOWAIT, interpreter, [interpreter, source, 'server'])
+    except:
+      self.writeLogLine('CLIENT: os.spawnvp failed.\n \
+      This is a typical problem on CYGWIN systems.  If you are using CYGWIN,\n \
+      you can fix this problem by running /bin/rebaseall.  If you do not have\n \
+      this program, you can install it with the CYGWIN installer in the package\n \
+      Rebase, under the category System.  You must run /bin/rebaseall after\n \
+      turning off all cygwin services -- in particular sshd, if any such services\n \
+      are running.  For more information about rebase, go to http://www.cygwin.com')
+      print '\n \
+      This is a typical problem on CYGWIN systems.  If you are using CYGWIN,\n \
+      you can fix this problem by running /bin/rebaseall.  If you do not have\n \
+      this program, you can install it with the CYGWIN installer in the package\n \
+      Rebase, under the category System.  You must run /bin/rebaseall after\n \
+      turning off all cygwin services -- in particular sshd, if any such services\n \
+      are running.  For more information about rebase, go to http://www.cygwin.com\n'
+      raise
+    os.chdir(oldDir)
+    timeout = 1
+    for i in range(10):
+      time.sleep(timeout)
+      timeout *= 2
+      if timeout > 100: timeout = 100
+      if os.path.exists(addrFilename): return
+    self.writeLogLine('CLIENT: Could not start server')
+    return
+
+  def connectParent(self, addr, dir):
+    '''Try to connect to a parent RDict server
+       - If addr and dir are both None, this operation fails
+       - If addr is None, check for an address file in dir'''
+    if addr is None:
+      if dir is None: return 0
+      addr = self.getServerAddr(dir)
+
+    import socket
+    import errno
+    connected = 0
+    s         = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    timeout   = 1
+    for i in range(10):
+      try:
+        self.writeLogLine('CLIENT: Trying to connect to '+str(addr))
+        s.connect(addr)
+        connected = 1
+        break
+      except socket.error, e:
+        self.writeLogLine('CLIENT: Failed to connect: '+str(e))
+        if e[0] == errno.ECONNREFUSED:
+          try:
+            import time
+            time.sleep(timeout)
+            timeout *= 2
+            if timeout > 100: timeout = 100
+          except KeyboardInterrupt:
+            break
+          # Try to spawn parent
+          if dir:
+            filename = os.path.join(dir, self.addrFilename)
+            if os.path.isfile(filename):
+              os.remove(filename)
+            self.startServer(filename)
+      except Exception, e:
+        self.writeLogLine('CLIENT: Failed to connect: '+str(e.__class__)+': '+str(e))
+    if not connected:
+      self.writeLogLine('CLIENT: Failed to connect to parent')
+      return 0
+    self.parent = s
+    self.writeLogLine('CLIENT: Connected to '+str(self.parent))
+    return 1
+
+  def sendPacket(self, s, packet, source = 'Unknown', isPickled = 0):
+    '''Pickle the input packet. Send first the size of the pickled string in 32-bit integer, and then the string itself'''
+    self.writeLogLine(source+': Sending packet '+str(packet))
+    if isPickled:
+      p = packet
+    else:
+      p = cPickle.dumps(packet)
+    self.packer.reset()
+    self.packer.pack_uint(len(p))
+    if hasattr(s, 'write'):
+      s.write(self.packer.get_buffer())
+      s.write(p)
+    else:
+      s.sendall(self.packer.get_buffer())
+      s.sendall(p)
+    self.writeLogLine(source+': Sent packet')
+    return
+
+  def recvPacket(self, s, source = 'Unknown'):
+    '''Receive first the size of the pickled string in a 32-bit integer, and then the string itself. Return the unpickled object'''
+    self.writeLogLine(source+': Receiving packet')
+    if hasattr(s, 'read'):
+      s.read(4)
+      value = cPickle.load(s)
+    else:
+      # I probably need to check that it actually read these 4 bytes
+      self.unpacker.reset(s.recv(4))
+      length    = self.unpacker.unpack_uint()
+      objString = ''
+      while len(objString) < length:
+        objString += s.recv(length - len(objString))
+      value = cPickle.loads(objString)
+    self.writeLogLine(source+': Received packet '+str(value))
+    return value
+
+  def send(self, key = None, value = None, operation = None):
+    '''Send a request to the parent'''
+    import inspect
+
+    objString = ''
+    for i in range(3):
+      try:
+        packet = []
+        if operation is None:
+          operation = inspect.stack()[1][3]
+        packet.append(operation)
+        if not key is None:
+          packet.append(key)
+          if not value is None:
+            packet.append(value)
+        self.sendPacket(self.parent, tuple(packet), source = 'CLIENT')
+        response = self.recvPacket(self.parent, source = 'CLIENT')
+        break
+      except IOError, e:
+        self.writeLogLine('CLIENT: IOError '+str(e))
+        if e.errno == 32:
+          self.connectParent(self.parentAddr, self.parentDirectory)
+      except Exception, e:
+        self.writeLogLine('CLIENT: Exception '+str(e)+' '+str(e.__class__))
+    try:
+      if isinstance(response, Exception):
+        self.writeLogLine('CLIENT: Got an exception '+str(response))
+        raise response
+      else:
+        self.writeLogLine('CLIENT: Received value '+str(response)+' '+str(type(response)))
+    except UnboundLocalError:
+      self.writeLogLine('CLIENT: Could not unpickle response')
+      response  = None
+    return response
+
+  def serve(self):
+    '''Start a server'''
+    import socket
+    import SocketServer
+
+    if not useThreads:
+      raise RuntimeError('Cannot run a server if threads are disabled')
+
+    class ProcessHandler(SocketServer.StreamRequestHandler):
+      def handle(self):
+        import time
+
+        self.server.rdict.lastAccess = time.time()
+        self.server.rdict.writeLogLine('SERVER: Started new handler')
+        while 1:
+          try:
+            value = self.server.rdict.recvPacket(self.rfile, source = 'SERVER')
+          except EOFError, e:
+            self.server.rdict.writeLogLine('SERVER: EOFError receiving packet '+str(e)+' '+str(e.__class__))
+            return
+          except Exception, e:
+            self.server.rdict.writeLogLine('SERVER: Error receiving packet '+str(e)+' '+str(e.__class__))
+            self.server.rdict.sendPacket(self.wfile, e, source = 'SERVER')
+            continue
+          if value[0] == 'stop': break
+          try:
+            response = getattr(self.server.rdict, value[0])(*value[1:])
+          except Exception, e:
+            self.server.rdict.writeLogLine('SERVER: Error executing operation '+str(e)+' '+str(e.__class__))
+            self.server.rdict.sendPacket(self.wfile, e, source = 'SERVER')
+          else:
+            self.server.rdict.sendPacket(self.wfile, response, source = 'SERVER')
+        return
+
+    # check if server is running
+    if os.path.exists(self.addrFilename):
+      rdict     = RDict(parentDirectory = '.')
+      hasParent = rdict.hasParent()
+      del rdict
+      if hasParent:
+        self.writeLogLine('SERVER: Another server is already running')
+        raise RuntimeError('Server already running')
+
+    # Daemonize server
+    self.writeLogLine('SERVER: Daemonizing server')
+    if os.fork(): # Launch child
+      os._exit(0) # Kill off parent, so we are not a process group leader and get a new PID
+    os.setsid()   # Set session ID, so that we have no controlling terminal
+    # We choose to leave cwd at RDict.py: os.chdir('/') # Make sure root directory is not on a mounted drive
+    os.umask(077) # Fix creation mask
+    for i in range(3): # Crappy stopgap for closing descriptors
+      try:
+        os.close(i)
+      except OSError, e:
+        if e.errno != errno.EBADF:
+          raise RuntimeError('Could not close default descriptor '+str(i))
+
+    # wish there was a better way to get a usable socket
+    self.writeLogLine('SERVER: Establishing socket server')
+    basePort = 8000
+    flag     = 'nosocket'
+    p        = 1
+    while p < 1000 and flag == 'nosocket':
+      try:
+        server = SocketServer.ThreadingTCPServer((socket.gethostname(), basePort+p), ProcessHandler)
+        flag   = 'socket'
+      except Exception, e:
+        p = p + 1
+    if flag == 'nosocket':
+      p = 1
+      while p < 1000 and flag == 'nosocket':
+        try:
+          server = SocketServer.ThreadingTCPServer(('localhost', basePort+p), ProcessHandler)
+          flag   = 'socket'
+        except Exception, e:
+          p = p + 1
+    if flag == 'nosocket':
+      self.writeLogLine('SERVER: Could not established socket server on port '+str(basePort+p))
+      raise RuntimeError,'Cannot get available socket'
+    self.writeLogLine('SERVER: Established socket server on port '+str(basePort+p))
+
+    self.isServer = 1
+    self.writeServerAddr(server)
+    self.serverShutdown(os.getpid())
+
+    server.rdict = self
+    self.writeLogLine('SERVER: Started server')
+    server.serve_forever()
+    return
+
+  def load(self):
+    '''Load the saved dictionary'''
+    if not self.parentDirectory is None and os.path.samefile(os.getcwd(), self.parentDirectory):
+      return
+    self.saveFilename = os.path.abspath(self.saveFilename)
+    if os.path.exists(self.saveFilename):
+      try:
+        dbFile = file(self.saveFilename)
+        data   = cPickle.load(dbFile)
+        self.updateTypes(data)
+        dbFile.close()
+        self.writeLogLine('Loaded dictionary from '+self.saveFilename)
+      except Exception, e:
+        self.writeLogLine('Problem loading dictionary from '+self.saveFilename+'\n--> '+str(e))
+    else:
+      self.writeLogLine('No dictionary to load in this file: '+self.saveFilename)
+    return
+
+  def save(self, force = 0):
+    '''Save the dictionary after 5 seconds, ignoring all subsequent calls until the save
+       - Giving force = True will cause an immediate save'''
+    if self.readonly: return
+    if force or not useThreads:
+      self.saveTimer = None
+      # This should be a critical section
+      dbFile = file(self.saveFilename, 'w')
+      data   = dict(filter(lambda i: not i[1].getTemporary(), self.localitems()))
+      cPickle.dump(data, dbFile)
+      dbFile.close()
+      self.writeLogLine('Saved local dictionary to '+os.path.abspath(self.saveFilename))
+    elif not self.saveTimer:
+      import threading
+      self.saveTimer = threading.Timer(5, self.save, [], {'force': 1})
+      self.saveTimer.setDaemon(1)
+      self.saveTimer.start()
+    return
+
+  def shutdown(self):
+    '''Shutdown the dictionary, writing out changes and notifying parent'''
+    if self.saveTimer:
+      self.saveTimer.cancel()
+      self.save(force = 1)
+    if self.isServer and os.path.isfile(self.addrFilename):
+      os.remove(self.addrFilename)
+    if not self.parent is None:
+      self.sendPacket(self.parent, self.stopCmd, isPickled = 1)
+      self.parent.close()
+      self.parent = None
+    self.writeLogLine('Shutting down')
+    self.logFile.close()
+    return
+
+  def serverShutdown(self, pid, delay = shutdownDelay):
+    if self.shutdownTimer is None:
+      import threading
+
+      self.shutdownTimer = threading.Timer(delay, self.serverShutdown, [pid], {'delay': 0})
+      self.shutdownTimer.setDaemon(1)
+      self.shutdownTimer.start()
+      self.writeLogLine('SERVER: Set shutdown timer for process '+str(pid)+' at '+str(delay)+' seconds')
+    else:
+      try:
+        import signal
+        import time
+
+        idleTime = time.time() - self.lastAccess
+        self.writeLogLine('SERVER: Last access '+str(self.lastAccess))
+        self.writeLogLine('SERVER: Idle time '+str(idleTime))
+        if idleTime < RDict.shutdownDelay:
+          self.writeLogLine('SERVER: Extending shutdown timer for '+str(pid)+' by '+str(RDict.shutdownDelay - idleTime)+' seconds')
+          self.shutdownTimer = None
+          self.serverShutdown(pid, RDict.shutdownDelay - idleTime)
+        else:
+          self.writeLogLine('SERVER: Killing server '+str(pid))
+          os.kill(pid, signal.SIGTERM)
+      except Exception, e:
+        self.writeLogLine('SERVER: Exception killing server: '+str(e))
+    return
+
+if __name__ ==  '__main__':
+  import sys
+  try:
+    if len(sys.argv) < 2:
+      print 'RDict.py [server | client | clear | insert | remove] [parent]'
+    else:
+      action = sys.argv[1]
+      parent = None
+      if len(sys.argv) > 2:
+        if not sys.argv[2] == 'None': parent = sys.argv[2]
+      if action == 'server':
+        RDict(parentDirectory = parent).serve()
+      elif action == 'client':
+        print 'Entries in server dictionary'
+        rdict = RDict(parentDirectory = parent)
+        for key in rdict.types():
+          if not key.startswith('cacheKey') and not key.startswith('stamp-'):
+            print str(key)+' '+str(rdict.getType(key))
+      elif action == 'cacheClient':
+        print 'Cache entries in server dictionary'
+        rdict = RDict(parentDirectory = parent)
+        for key in rdict.types():
+          if key.startswith('cacheKey'):
+            print str(key)+' '+str(rdict.getType(key))
+      elif action == 'stampClient':
+        print 'Stamp entries in server dictionary'
+        rdict = RDict(parentDirectory = parent)
+        for key in rdict.types():
+          if key.startswith('stamp-'):
+            print str(key)+' '+str(rdict.getType(key))
+      elif action == 'clear':
+        print 'Clearing all dictionaries'
+        RDict(parentDirectory = parent).clear()
+      elif action == 'insert':
+        rdict = RDict(parentDirectory = parent)
+        rdict[sys.argv[3]] = sys.argv[4]
+      elif action == 'remove':
+        rdict = RDict(parentDirectory = parent)
+        del rdict[sys.argv[3]]
+      else:
+        sys.exit('Unknown action: '+action)
+  except Exception, e:
+    import traceback
+    print traceback.print_tb(sys.exc_info()[2])
+    sys.exit(str(e))
+  sys.exit(0)

config/BuildSystem/__init__.py

Empty file added.

config/BuildSystem/args.py

+class ArgumentProcessor(object):
+  '''This class provides interaction with an RDict object, which by default is shared'''
+  defaultDB = None
+
+  def __init__(self, clArgs = None, argDB = None):
+    '''Setup the argument database'''
+    self.argDB = self.createArgDB(argDB)
+    if clArgs is None:
+      import sys
+
+      self.clArgs = sys.argv[1:]
+    else:
+      self.clArgs = clArgs
+    return
+
+  def __getstate__(self):
+    '''We do not want to pickle the default RDict'''
+    d = self.__dict__.copy()
+    if '_argDB' in d:
+      if d['_argDB'] is ArgumentProcessor.defaultDB:
+        del d['_argDB']
+      else:
+        d['_argDB'] = None
+    return d
+
+  def __setstate__(self, d):
+    '''We must create the default RDict'''
+    self.__dict__.update(d)
+    if not '_argDB' in d:
+      self.argDB = self.createArgDB(None)
+    return
+
+  def getArgDB(self):
+    return self._argDB
+  def setArgDB(self, argDB):
+    self._argDB = argDB
+    return
+  argDB = property(getArgDB, setArgDB, doc = 'The RDict argument database')
+
+  def createArgDB(self, initDB):
+    '''Create an argument database unless initDB is provided, and insert the command line arguments'''
+    if not initDB is None:
+      argDB = initDB
+    else:
+      if ArgumentProcessor.defaultDB is None:
+        import RDict
+        import os
+        import sys
+
+        # Changed this to assume RDict is independent
+        ArgumentProcessor.defaultDB = RDict.RDict(load = 0, autoShutdown = 0)
+      argDB = ArgumentProcessor.defaultDB
+    return argDB
+
+  def setupArguments(self, argDB):
+    '''Setup types in the argument database
+       - This method shouldbe overidden by any subclass with special arguments, making sure to call the superclass method'''
+    return argDB
+
+  def insertArguments(self, useEnvironment = 0):
+    '''Put arguments in from the command line and environment
+       - This will only insert command line arguments into a given RDict once'''
+    if useEnvironment:
+      import os
+
+      self.argDB.insertArgs(os.environ)
+    if not hasattr(self.argDB, '_setCommandLine'):
+      self.argDB.insertArgs(self.clArgs)
+      self.argDB._setCommandLine = 1
+    return
+
+  def setup(self):
+    '''This method should be overidden for any setup after initialization
+       - Here we determine argument types and insert arguments into the dictionary'''
+    self.setupArguments(self.argDB)
+    self.insertArguments()
+    return
+
+  def cleanup(self):
+    '''This method should be overidden for any cleanup before finalization'''
+    return

config/BuildSystem/build/__init__.py

+all = ['bk', 'buildGraph', 'builder', 'compile', 'fileState', 'fileset', 'framework', 'processor', 'templates', 'transform']

config/BuildSystem/build/bk.py

+import build.fileset
+import build.transform
+
+def convertPath(file):
+  '''Converts the cygwin path to a full Windows path'''
+  try:
+    import cygwinpath
+    return cygwinpath.convertToFullWin32Path(file)
+  except ImportError:
+    pass
+  return file
+
+class Tag (build.transform.Transform):
+  '''Tags all relevant Bitkeeper filesets
+     - Unlocked files are tagged "bkedit"
+     - Locked files which are unchanged are tagged "bkrevert"
+     - New implementation files are tagged "bkadd"'''
+  def __init__(self, rootFunc, inputTag = None):
+    import re
+
+    build.transform.Transform.__init__(self)
+    self.implRE   = re.compile(r'^(.*)_impl$')
+    self.rootFunc = rootFunc
+    self.inputTag = inputTag
+    if not self.inputTag is None and not isinstance(self.inputTag, list):
+      self.inputTag = [self.inputTag]
+    return
+
+  def __str__(self):
+    return 'BitKeeper tag transform'
+
+  def getUnlockedFiles(self, root):
+    '''Return a list of all files not locked by BitKeeper in the root directories'''
+    files       = []
+    lockedFiles = []
+    files.extend(self.executeShellCommand('bk sfiles -g '+convertPath(root)).split())
+    lockedFiles.extend(self.executeShellCommand('bk sfiles -lg '+convertPath(root)).split())
+    map(files.remove, lockedFiles)
+    return files
+
+  def isImplementationFile(self, filename):
+    '''Returns True if filename is an implementation file'''
+    import os
+
+    if filename[-1] == '~': return 0
+    if filename[-1] == '#': return 0
+    if os.path.splitext(filename)[1] == '.pyc': return 0
+    if self.implRE.match(os.path.dirname(filename)):
+      return 1
+    return 0
+
+  def getNewFiles(self, root):
+    '''Return a list of all implementation files not under BitKeeper control in the root directories'''
+    files = []
+    files.extend(filter(self.isImplementationFile, self.executeShellCommand('bk sfiles -ax '+convertPath(root)).split()))
+    return files
+
+  def getUnchangedFiles(self, root):
+    '''Return a list of the files locked by Bitkeeper, but unchanged'''
+    lockedFiles  = []
+    changedFiles = []
+    lockedFiles.extend(self.executeShellCommand('bk sfiles -lg '+convertPath(root)).split())
+    changedFiles.extend(self.executeShellCommand('bk sfiles -cg '+convertPath(root)).split())
+    map(lockedFiles.remove, changedFiles)
+    return lockedFiles
+
+  def handleFile(self, f, set):
+    '''Add new filesets to the output
+       - All files under BitKeeper control are tagged "bkedit"
+       - All new implementation files are tagged "bkadd"
+       - All locked but unchanged files under BitKeeper control are tagged "bkrevert"'''
+    root = self.rootFunc(f)
+    if (self.inputTag is None or set.tag in self.inputTag) and root:
+      import os
+      if not os.path.isdir(root):
+        os.makedirs(root)
+      self.output.children.append(build.fileset.FileSet(filenames = self.getUnlockedFiles(root),  tag = 'bkedit'))
+      self.output.children.append(build.fileset.FileSet(filenames = self.getNewFiles(root),       tag = 'bkadd'))
+      self.output.children.append(build.fileset.FileSet(filenames = self.getUnchangedFiles(root), tag = 'bkrevert'))
+    return build.transform.Transform.handleFile(self, f, set)
+
+class Open (build.transform.Transform):
+  '''This nodes handles sets with tag "bkedit", editing each file'''
+  def __init__(self):
+    build.transform.Transform.__init__(self)
+    return
+
+  def __str__(self):
+    return 'BitKeeper open transform'
+
+  def edit(self, set):
+    '''Edit the files in set with BitKeeper'''
+    if not len(set): return
+    self.debugPrint('Opening files', 2, 'bk')
+    command = 'bk edit '+' '.join(map(convertPath, set))
+    output  = self.executeShellCommand(command)
+    return self.output
+
+  def handleFileSet(self, set):
+    '''Handle sets with tag "bkedit"'''
+    if set.tag == 'bkedit':
+      self.edit(set)
+      map(self.handleFileSet, set.children)
+      return self.output
+    return build.transform.Transform.handleFileSet(self, set)
+
+class Close (build.transform.Transform):
+  '''This nodes handles sets with tag "bkadd" and "bkrevert", adding new files and reverting unchanged files'''
+  def __init__(self):
+    build.transform.Transform.__init__(self)
+    return
+
+  def __str__(self):
+    return 'BitKeeper close transform'
+
+  def add(self, set):
+    '''Add the files in set to BitKeeper'''
+    if not len(set): return
+    self.debugPrint('Putting new files under version control', 2, 'bk')
+    map(lambda f: self.debugPrint('Adding '+f+' to version control', 3, 'bk'), set)
+    command = 'bk add '+' '.join(map(convertPath, set))
+    output  = self.executeShellCommand(command)
+    command = 'bk co -q '+' '.join(map(convertPath, set))
+    output  = self.executeShellCommand(command)
+    return self.output
+
+  def revert(self, set):
+    '''Revert the files in set using BitKeeper'''
+    if not len(set): return
+    self.debugPrint('Reverting unchanged files', 2, 'bk')
+    command = 'bk unedit '+' '.join(map(convertPath, set))
+    output  = self.executeShellCommand(command)
+    command = 'bk co -q '+' '.join(map(convertPath, set))
+    output  = self.executeShellCommand(command)
+    return self.output
+
+  def handleFileSet(self, set):
+    '''Handle sets with tag "bkadd" and "bkrevert"'''
+    if set.tag == 'bkadd':
+      self.add(set)
+      map(self.handleFileSet, set.children)
+      return self.output
+    elif set.tag == 'bkrevert':
+      self.revert(set)
+      map(self.handleFileSet, set.children)
+      return self.output
+    return build.transform.Transform.handleFileSet(self, set)

config/BuildSystem/build/buildGraph.py

+from __future__ import generators
+
+class BuildGraph(object):
+  def __init__(self, vertices = []):
+    '''Create a graph'''
+    self.vertices = []
+    self.inEdges  = {}
+    self.outEdges = {}
+    map(self.addVertex, vertices)
+    return
+
+  def __str__(self):
+    return 'BuildGraph with '+str(len(self.vertices))+' vertices and '+str(reduce(lambda k,l: k+l, [len(edgeList) for edgeList in self.inEdges.values()], 0))+' edges'
+
+  def addVertex(self, vertex):
+    '''Add a vertex if it does not already exist in the vertex list
+       - Should be able to use Set in Python 2.3'''
+    if vertex is None: return
+    if not vertex in self.vertices:
+      self.vertices.append(vertex)
+      self.clearEdges(vertex)
+    return
+
+  def addEdges(self, vertex, inputs = [], outputs = []):
+    '''Define the in and out edges for a vertex by listing the other vertices defining the edges
+       - If any vertex does not exist in the graph, it is created'''
+    self.addVertex(vertex)
+    for input in inputs:
+      self.addVertex(input)
+      if not vertex is None and not input is None:
+        if not input  in self.inEdges[vertex]: self.inEdges[vertex].append(input)
+        if not vertex in self.outEdges[input]: self.outEdges[input].append(vertex)
+    for output in outputs:
+      self.addVertex(output)
+      if not vertex is None and not output is None:
+        if not vertex in self.inEdges[output]:  self.inEdges[output].append(vertex)
+        if not output in self.outEdges[vertex]: self.outEdges[vertex].append(output)
+    return
+
+  def getEdges(self, vertex):
+    return (self.inEdges[vertex], self.outEdges[vertex])
+
+  def clearEdges(self, vertex, inOnly = 0, outOnly = 0):
+    if inOnly and outOnly:
+      raise RuntimeError('Inconsistent arguments')
+    if not outOnly:
+      self.inEdges[vertex]  = []
+    if not inOnly:
+      self.outEdges[vertex] = []
+    return
+
+  def removeVertex(self, vertex):
+    '''Remove a vertex if already exists in the vertex list
+       - Also removes all associated edges'''
+    if vertex is None: return
+    if vertex in self.vertices:
+      self.vertices.remove(vertex)
+      del self.inEdges[vertex]
+      del self.outEdges[vertex]
+      for v in self.vertices:
+        if vertex in self.inEdges[v]:  self.inEdges[v].remove(vertex)
+        if vertex in self.outEdges[v]: self.outEdges[v].remove(vertex)
+    return
+
+  def addSubgraph(self, graph):
+    '''Add the vertices and edges of another graph into this one'''
+    map(self.addVertex, graph.vertices)
+    map(lambda v: apply(self.addEdges, (v,)+graph.getEdges(v)), graph.vertices)
+    return
+
+  def removeSubgraph(self, graph):
+    '''Remove the vertices and edges of a subgraph, and all the edges connected to it'''
+    map(self.removeVertex, graph.vertices)
+    return
+
+  def printIndent(self, indent):
+    import sys
+    for i in range(indent): sys.stdout.write('  ')
+
+  def display(self):
+    print 'I am a BuildGraph with '+str(len(self.vertices))+' vertices'
+    for vertex in BuildGraph.breadthFirstSearch(self):
+      self.printIndent(vertex.__level)
+      print '('+str(self.vertices.index(vertex))+') '+str(vertex)+' in: '+str(map(self.vertices.index, self.inEdges[vertex]))+' out: '+str(map(self.vertices.index, self.outEdges[vertex]))
+    return
+
+  def appendGraph(self, graph):
+    '''Join every leaf of this graph to every root of the input graph, leaving the result in this graph'''
+    leaves = BuildGraph.getLeaves(self)
+    self.addSubgraph(graph)
+    map(lambda v: self.addEdges(v, outputs = BuildGraph.getRoots(graph)), leaves)
+    return self
+
+  def prependGraph(self, graph):
+    '''Join every leaf of the input graph to every root of this graph, leaving the result in this graph'''
+    roots = BuildGraph.getRoots(self)
+    self.addSubgraph(graph)
+    map(lambda v: self.addEdges(v, outputs = roots), BuildGraph.getLeaves(graph))
+    return self
+
+  def getRoots(graph):
+    '''Return all the sources in the graph (nodes without entering edges)'''
+    return filter(lambda v: not len(graph.getEdges(v)[0]), graph.vertices)
+  getRoots = staticmethod(getRoots)
+
+  def getLeaves(graph):
+    '''Return all the sinks in the graph (nodes without exiting edges)'''
+    return filter(lambda v: not len(graph.getEdges(v)[1]), graph.vertices)
+  getLeaves = staticmethod(getLeaves)
+
+  def depthFirstVisit(graph, vertex, seen = None, returnFinished = 0, outEdges = 1):
+    '''This is a generator returning vertices in a depth-first traversal only for the subtree rooted at vertex
+       - If returnFinished is True, return a vertex when it finishes
+       - Otherwise, return a vertex when it is first seen
+       - If outEdges is True, proceed along these, otherwise use inEdges'''
+    if seen is None: seen = []
+    seen.append(vertex)
+    if not returnFinished:
+      yield vertex
+    # Cute trick since outEdges is index 1, and inEdges is index 0
+    for v in graph.getEdges(vertex)[outEdges]:
+      if not v in seen:
+        try:
+          for v2 in BuildGraph.depthFirstVisit(graph, v, seen, returnFinished, outEdges):
+            yield v2
+        except StopIteration:
+          pass
+    if returnFinished:
+      yield vertex
+    return
+  depthFirstVisit = staticmethod(depthFirstVisit)
+
+  def depthFirstSearch(graph, returnFinished = 0, outEdges = 1):
+    '''This is a generator returning vertices in a depth-first traversal
+       - If returnFinished is True, return a vertex when it finishes
+       - Otherwise, return a vertex when it is first seen
+       - If outEdges is True, proceed along these, otherwise use inEdges'''
+    seen = []
+    for vertex in graph.vertices:
+      if not vertex in seen:
+        try:
+          for v in BuildGraph.depthFirstVisit(graph, vertex, seen, returnFinished, outEdges):
+            yield v
+        except StopIteration:
+          pass
+    return
+  depthFirstSearch = staticmethod(depthFirstSearch)
+
+  def breadthFirstSearch(graph, returnFinished = 0):
+    '''This is a generator returning vertices in a breadth-first traversal
+       - If returnFinished is True, return a vertex when it finishes
+       - Otherwise, return a vertex when it is first seen'''
+    queue = BuildGraph.getRoots(graph)[0:1]
+    if not len(queue): return
+    seen  = [queue[0]]
+    if not returnFinished:
+      queue[0].__level = 0
+      yield queue[0]
+    while len(queue):
+      vertex = queue[-1]
+      for v in graph.getEdges(vertex)[1]:
+        if not v in seen:
+          seen.append(v)
+          v.__level = vertex.__level + 1
+          queue.insert(0, v)
+          if not returnFinished:
+            yield v
+      vertex = queue.pop()
+      if returnFinished:
+        yield vertex
+    return
+
+  def topologicalSort(graph, start = None):
+    '''Reorder the vertices using topological sort'''
+    if start is None:
+      vertices = [vertex for vertex in BuildGraph.depthFirstSearch(graph, returnFinished = 1)]
+    else:
+      vertices = [vertex for vertex in BuildGraph.depthFirstVisit(graph, start, returnFinished = 1)]
+    vertices.reverse()
+    for vertex in vertices:
+      yield vertex
+    return
+  topologicalSort = staticmethod(topologicalSort)

config/BuildSystem/build/builder.py

+from __future__ import generators
+import base
+
+class Builder (base.Base):
+  def __init__(self, buildGraph = None):
+    base.Base.__init__(self)
+    self.buildGraph    = buildGraph
+    self.currentVertex = None
+    return
+
+  def processInput(self, input):
+    inputs = {}
+    if isinstance(input, dict):
+      inputs.update(input)
+    elif not input is None:
+      inputs[None] = input
+    return inputs
+
+  def execute(self, start = None, input = None):
+    '''Execute the topologically sorted build graph, optionally starting from the transform "start" with the optional FileSet "input"'''
+    import build.buildGraph
+
+    inputs  = self.processInput(input)
+    started = 0
+    self.debugPrint('Starting build', 1, 'build')
+    if not self.currentVertex is None:
+      start = self.currentVertex
+    for vertex in build.buildGraph.BuildGraph.topologicalSort(self.buildGraph):
+      self.debugPrint('Executing vertex '+str(vertex), 2, 'build')
+      if not started:
+        if not start is None and not vertex == start:
+          continue
+        started = 1
+        if None in inputs:
+          self.debugPrint('Processing initial input '+self.debugFileSetStr(inputs[None]), 3, 'build')
+          vertex.handleFileSet(inputs[None])
+      if vertex in inputs:
+        self.debugPrint('Processing specified input '+self.debugFileSetStr(inputs[vertex]), 3, 'build')
+        vertex.handleFileSet(inputs[vertex])
+      for parent in self.buildGraph.getEdges(vertex)[0]:
+        self.debugPrint('Processing input '+self.debugFileSetStr(parent.output)+' from vertex: '+str(parent), 3, 'build')
+        vertex.handleFileSet(parent.output)
+      self.debugPrint('Generated output '+self.debugFileSetStr(vertex.output)+' from vertex: '+str(vertex), 3, 'build')
+      self.currentVertex = vertex
+      yield vertex
+    self.currentVertex = None
+    return

config/BuildSystem/build/compile/C.py

+import build.processor
+
+class Compiler (build.processor.Compiler):
+  def __init__(self, sourceDB, usingC, compiler = None, warningFlags = None, inputTag = 'c'):
+    build.processor.Compiler.__init__(self, sourceDB, compiler, inputTag, updateType = 'deferred')
+    self.usingC       = usingC
+    self.warningFlags = warningFlags
+    self.language     = 'C'
+    self.includeDirs.append('.')
+    self.checkCompiler()
+    return
+
+  def __str__(self):
+    return self.language+' compiler('+self.processor+') for '+str(self.inputTag)
+
+  def checkCompiler(self):
+    '''Checks the compatibility of the supplied compiler'''
+    if self.processor is None:
+      self.processor = self.argDB['CC']
+    return
+
+  def getOptimizationFlags(self, source = None):
+    if self.argDB['CFLAGS']:
+      return [self.argDB['CFLAGS']]
+    return []
+
+  def getWarningFlags(self, source = None):
+    '''Return a list of the compiler warning flags. The default is most of the GCC warnings.'''
+    if self.warningFlags is None:
+      return ['-Wall', '-Wundef', '-Wpointer-arith', '-Wbad-function-cast', '-Wcast-align', '-Wwrite-strings',
+              '-Wconversion', '-Wsign-compare', '-Wstrict-prototypes', '-Wmissing-prototypes', '-Wmissing-declarations',
+              '-Wmissing-noreturn', '-Wredundant-decls', '-Wnested-externs', '-Winline']
+    return self.warningFlags

config/BuildSystem/build/compile/Cxx.py

+import build.processor
+
+class Compiler (build.processor.Compiler):
+  def __init__(self, sourceDB, usingCxx, compiler = None, warningFlags = None, inputTag = 'cxx'):
+    build.processor.Compiler.__init__(self, sourceDB, compiler, inputTag, updateType = 'deferred')
+    self.usingCxx     = usingCxx
+    self.warningFlags = warningFlags
+    self.language     = 'Cxx'
+    self.includeDirs.append('.')
+    self.checkCompiler()
+    return
+
+  def __str__(self):
+    return self.language+' compiler('+self.processor+') for '+str(self.inputTag)
+
+  def checkCompiler(self):
+    '''Checks the compatibility of the supplied compiler'''
+    import config.setCompilers
+
+    if self.processor is None:
+      self.processor = self.argDB['CXX']
+    compiler = self.processor
+    if config.setCompilers.Configure.isGNU(compiler):
+      import commands
+      # Make sure g++ is recent enough
+      (status, output) = commands.getstatusoutput(compiler+' -dumpversion')
+      if not status == 0:
+        raise RuntimeError('The compiler you specified ('+compiler+') could not be run. Perhaps it is not in your path.')
+      version = output.split('.')[0]
+      if not version == '3':
+        raise RuntimeError('The g++ you specified ('+compiler+') is version '+version+'; please install a g++ of at least version 3 or fix your path. Get gcc/g++ at http://gcc.gnu.com')
+    return
+
+  def getOptimizationFlags(self, source = None):
+    if self.argDB['CXXFLAGS']:
+      return [self.argDB['CXXFLAGS']]
+    return []
+
+  def getWarningFlags(self, source = None):
+    '''Return a list of the compiler warning flags. The default is most of the GCC warnings.'''
+    if self.warningFlags is None:
+      return ['-Wall', '-Wundef', '-Wpointer-arith', '-Wcast-align', '-Wwrite-strings',
+              '-Wconversion', '-Wsign-compare', '-Wstrict-prototypes', '-Wmissing-prototypes',
+              '-Wmissing-noreturn', '-Wredundant-decls', '-Winline']
+    return self.warningFlags

config/BuildSystem/build/compile/F90.py

+import build.processor
+
+class Compiler (build.processor.Compiler):
+  def __init__(self, sourceDB, usingF90, compiler = None, warningFlags = None, inputTag = 'f90'):
+    build.processor.Compiler.__init__(self, sourceDB, compiler, inputTag, updateType = 'deferred')
+    self.usingF90     = usingF90
+    self.warningFlags = warningFlags
+    self.language     = 'F90'
+    self.includeDirs.append('.')
+    self.checkCompiler()
+    return
+
+  def __str__(self):
+    return self.language+' compiler('+self.processor+') for '+str(self.inputTag)
+
+  def checkCompiler(self):
+    '''Checks the compatibility of the supplied compiler'''
+    if self.processor is None:
+      self.processor = self.argDB['F90']
+    compiler = self.processor
+    if not compiler == 'ifc':
+      raise RuntimeError('I only know how to deal with Intel F90 right now. Shoot me.')
+    return
+
+  def getOptimizationFlags(self, source = None):
+    if self.argDB['FFLAGS']:
+      return [self.argDB['FFLAGS']]
+    return []
+
+  def getWarningFlags(self, source = None):
+    '''Return a list of the compiler warning flags. The default is empty.'''
+    if self.warningFlags is None:
+      return []
+    return self.warningFlags

config/BuildSystem/build/compile/SIDL.py

+import build.fileset
+import build.processor
+
+import os
+
+import cPickle
+
+try:
+  from hashlib import md5 as new_md5
+except ImportError:
+  from md5 import new as new_md5
+
+class SIDLConstants:
+  '''This class contains data about the SIDL language'''
+  def getLanguages():
+    '''Returns a list of all permissible SIDL target languages'''
+    # This should be argDB['installedLanguages']
+    return ['C', 'Cxx', 'C++', 'Python', 'F77', 'F90', 'Java', 'Mathematica', 'Matlab']
+  getLanguages = staticmethod(getLanguages)
+
+  def checkLanguage(language):
+    '''Check for a valid SIDL target language, otherwise raise a ValueError'''
+    if not language in SIDLConstants.getLanguages():
+      raise ValueError('Invalid SIDL language: '+language)
+  checkLanguage = staticmethod(checkLanguage)
+
+class SIDLLanguageList (list):
+  def __setitem__(self, key, value):
+    SIDLConstants.checkLanguage(value)
+    list.__setitem__(self, key, value)
+
+class Compiler(build.processor.Processor):
+  '''The SIDL compiler processes any FileSet with the tag "sidl", and outputs a FileSet of source code with the appropriate language tag.
+     - Servers always compile a single SIDL file'''
+  def __init__(self, sourceDB, language, outputDir, isServer, usingSIDL):
+    SIDLConstants.checkLanguage(language)
+    build.processor.Processor.__init__(self, sourceDB, None, ['sidl', 'old sidl'], language.lower(), not isServer, 'deferred')
+    # Can't initialize processor in constructor since I have to wait for Base to set argDB
+    self.processor = self.getCompilerDriver()
+    self.language  = language
+    self.outputDir = outputDir
+    self.isServer  = isServer
+    if isServer:
+      self.action  = 'server'
+    else:
+      self.action  = 'client'
+    self.usingSIDL = usingSIDL
+    self.repositoryDirs = []
+    self.outputTag = self.language.lower()+' '+self.action
+    return
+
+  def __str__(self):
+    return 'SIDL Compiler for '+self.language+' '+self.action
+
+  def handleErrors(self, command, status, output):
+    if status or output.find('Error:') >= 0:
+      raise RuntimeError('Could not execute \''+str(command)+'\':\n'+str(output))
+
+  def getCompilerDriver(self):
+    project = self.getInstalledProject('bk://sidl.bkbits.net/Compiler')
+    if project is None:
+      return 'scandal.py'
+    return os.path.join(project.getRoot(), 'driver', 'python', 'scandal.py')
+
+  def getCompilerModule(self, name = 'scandal'):
+    import imp
+
+    root = os.path.dirname(self.getCompilerDriver())
+    if not root:
+      raise ImportError('Project bk://sidl.bkbits.net/Compiler is not installed')
+    (fp, pathname, description) = imp.find_module(name, [root])
+    try:
+      return imp.load_module(name, fp, pathname, description)
+    finally:
+      if fp: fp.close()
+
+  def getActionFlags(self, source):
+    '''Return a list of the compiler flags specifying the generation action.'''
+    return ['-'+self.action+'='+self.language]
+
+  def getDependenciesSIDL(self):
+    '''Return all SIDL files found in project dependencies'''
+    if not self.repositoryDirs: return []
+    sources = []
+    for dir in self.repositoryDirs:
+      dir = os.path.join(dir, 'sidl')
+      if not os.path.exists(dir):
+        self.debugPrint('Invalid SIDL include directory: '+dir, 4, 'compile')
+        continue
+      for source in os.listdir(dir):
+        if not os.path.splitext(source)[1] == '.sidl': continue
+        source = os.path.join(dir, source)
+        if not os.path.isfile(source): raise RuntimeError('Invalid SIDL include: '+source)
+        sources.append(source)
+    return sources
+
+  def getIncludeFlags(self, source):
+    return ['-includes=['+','.join(self.getDependenciesSIDL())+']']
+
+  def getOutputFlags(self, source):
+    '''Return a list of the compiler flags specifying the output directories'''
+    if isinstance(source, build.fileset.FileSet): source = source[0]
+    (package, ext) = os.path.splitext(os.path.basename(source))
+    if not self.outputDir is None:
+      if self.isServer:
+        outputDir = os.path.join(self.outputDir, self.usingSIDL.getServerRootDir(self.language, package))
+      else:
+        outputDir = os.path.join(self.outputDir, self.usingSIDL.getClientRootDir(self.language))
+      return ['-'+self.action+'Dirs={'+self.language+':'+outputDir+'}']
+    return []
+
+  def getFlags(self, source):
+    return self.getActionFlags(source)+self.getIncludeFlags(source)+self.getOutputFlags(source)
+
+  def processFileShell(self, source, set):
+    '''Compile "source" using a shell command'''
+    return self.processFileSetShell(build.fileset.FileSet([source], tag = set.tag))
+
+  def processFileSetShell(self, set):
+    '''Compile all the files in "set" using a shell command'''
+    if not len(set) or set.tag.startswith('old'): return self.output
+    self.debugPrint('Compiling '+str(set)+' into a '+self.language+' '+self.action, 3, 'compile')
+    command = ' '.join([self.getProcessor()]+self.getFlags(set)+set)
+    output  = self.executeShellCommand(command, self.handleErrors)
+    #self.output.extend(map(self.getIntermediateFileName, set))
+    return self.output
+
+  def processFileModule(self, source, set):
+    '''Compile "source" using a module directly'''
+    return self.processFileSetModule(build.fileset.FileSet([source], tag = set.tag))
+
+  def processFileSetModule(self, set):
+    '''Compile all the files in "set" using a module directly'''
+    if not len(set): return self.output
+    import nargs
+    import sourceDatabase
+
+    # Check for cached output
+    #   We could of course hash this big key again
+    #   These keys could be local, but we can do that if they proliferate too much. It would mean
+    #     that each project would have to compile the SIDL once
+    flags    = self.getFlags(set)
+    cacheKey = 'cacheKey'+''.join([sourceDatabase.SourceDB.getChecksum(f) for f in set]+[new_md5(''.join(flags)).hexdigest()])
+    if set.tag.startswith('old') and cacheKey in self.argDB:
+      self.debugPrint('Loading '+str(set)+' for a '+self.language+' '+self.action+' from argument database ('+cacheKey+')', 3, 'compile')
+      outputFiles = cPickle.loads(self.argDB[cacheKey])
+    else:
+      # Save targets so that they do not interfere with Scandal
+      target            = self.argDB.target
+      self.argDB.target = []
+      # Run compiler and reporter
+      compiler = self.getCompilerModule().Scandal(flags+set)
+      if not set.tag.startswith('old'):
+        self.debugPrint('Compiling '+str(set)+' into a '+self.language+' '+self.action, 3, 'compile')
+        self.debugPrint('  with flags '+str(flags), 4, 'compile')
+        compiler.run()
+      else:
+        self.debugPrint('Reporting on '+str(set)+' for a '+self.language+' '+self.action, 3, 'compile')
+        self.debugPrint('  with flags '+str(flags), 4, 'compile')
+        compiler.report()
+      outputFiles          = compiler.outputFiles
+      self.argDB[cacheKey] = cPickle.dumps(outputFiles)
+      # Restore targets and remove flags
+      self.argDB.target = target
+      for flag in flags:
+        del self.argDB[nargs.Arg.parseArgument(flag)[0]]
+    # Construct output
+    tag = self.outputTag
+    if self.isServer:
+      (package, ext) = os.path.splitext(os.path.basename(set[0]))
+      tag           += ' '+package
+    self.output.children.append(build.fileset.RootedFileSet(self.usingSIDL.project.getUrl(), outputFiles, tag = tag))
+    return self.output
+
+  def processFile(self, source, set):
+    '''Compile "source"'''
+    return self.processFileModule(source, set)
+
+  def processFileSet(self, set):
+    '''Compile all the files in "set"'''
+    return self.processFileSetModule(set)
+
+  def processOldFile(self, source, set):
+    '''Compile "source"'''
+    return self.processFileModule(source, set)
+
+  def processOldFileSet(self, set):
+    '''Compile all the files in "set"'''
+    return self.processFileSetModule(set)

config/BuildSystem/build/compile/__init__.py

+all = ['C', 'Cxx', 'F90', 'SIDL']

config/BuildSystem/build/fileState.py

+import build.fileset
+import build.transform
+
+import os
+
+class FileChanged (build.transform.Transform):
+  '''Detects whether files have changed using checksums
+     - If the force flag is given, all files are marked changed'''
+  def __init__(self, sourceDB, inputTag = None, changedTag = 'changed', unchangedTag = 'unchanged', force = 0):
+    build.transform.Transform.__init__(self)
+    self.sourceDB      = sourceDB
+    self.inputTag      = inputTag
+    if isinstance(self.inputTag, str): self.inputTag = [self.inputTag]
+    self.force         = force
+    self.changed       = build.fileset.FileSet(tag = changedTag)
+    self.unchanged     = build.fileset.FileSet(tag = unchangedTag)
+    self.output.children.append(self.changed)
+    self.output.children.append(self.unchanged)
+    return
+
+  def compare(self, source, sourceEntry):
+    '''Return True if the checksum for "source" has changed since "sourceEntry" was recorded'''
+    self.debugPrint('Checking for '+source+' in the source database', 3, 'sourceDB')
+    checksum = self.sourceDB.getChecksum(source)
+    if not sourceEntry[0] == checksum:
+      self.debugPrint(source+' has changed relative to the source database: '+str(sourceEntry[0])+' <> '+str(checksum), 3, 'sourceDB')
+      return 1
+    return 0
+
+  def hasChanged(self, source):
+    '''Returns True if "source" has changed since it was last updates in the source database'''
+    if self.force:
+      self.debugPrint(source+' was forcibly tagged', 3, 'sourceDB')
+      return 1
+    try:
+      if not os.path.exists(source):
+        self.debugPrint(source+' does not exist', 3, 'sourceDB')
+      else:
+        if not self.compare(source, self.sourceDB[source]):
+          for dep in self.sourceDB[source][3]:
+            try:
+              if self.compare(dep, self.sourceDB[dep]):
+                return 1
+            except KeyError: pass
+          return 0
+    except KeyError:
+      self.debugPrint(source+' does not exist in source database', 3, 'sourceDB')
+    return 1
+
+  def handleFile(self, f, set):
+    '''Place the file into either the "changed" or "unchanged" output set
+       - If inputTag was specified, only handle files with this tag'''
+    if self.inputTag is None or set.tag in self.inputTag:
+      if self.hasChanged(f):
+        self.changed.append(f)
+      else:
+        self.unchanged.append(f)
+      return self.output
+    return build.transform.Transform.handleFile(self, f, set)
+
+class GenericTag (FileChanged):
+  '''Uses input tag, extension and directory checks to group files which need further processing'''
+  def __init__(self, sourceDB, outputTag, inputTag = None, ext = '', deferredExt = None, root = None, force = 0):
+    FileChanged.__init__(self, sourceDB, inputTag, outputTag, 'old '+outputTag, force)
+    self.ext   = ext
+    if isinstance(self.ext, list):
+      self.ext = map(lambda x: '.'+x, self.ext)
+    elif isinstance(self.ext, str):
+      self.ext = ['.'+self.ext]
+    self.deferredExt   = deferredExt
+    if isinstance(self.deferredExt, list):
+      self.deferredExt = map(lambda x: '.'+x, self.deferredExt)
+    elif isinstance(self.deferredExt, str):
+      self.deferredExt = ['.'+self.deferredExt]
+    self.root   = root
+    if not self.root is None:
+      self.root = os.path.normpath(self.root)
+    self.deferredUpdates = build.fileset.FileSet(tag = 'update '+outputTag)
+    self.output.children.append(self.deferredUpdates)
+    return
+
+  def __str__(self):
+    return 'Tag transform for extension '+str(self.ext)+str(self.inputTag)+' to tag '+self.changed.tag
+
+  def handleFile(self, f, set):
+    '''- If the file is not in the specified root directory, use the default handler
+       - If the file is in the extension list, call the parent method
+       - If the file is in the deferred extension list and has changed, put it in the update set'''
+    if self.inputTag is None or set.tag in self.inputTag:
+      (base, ext) = os.path.splitext(f)
+      if not self.root or self.root+os.sep == os.path.commonprefix([os.path.normpath(base), self.root+os.sep]):
+        if self.ext is None or ext in self.ext:
+          return FileChanged.handleFile(self, f, set)
+        elif not self.deferredExt is None and ext in self.deferredExt:
+          if self.hasChanged(f):
+            self.deferredUpdates.append(f)
+          return self.output
+    return build.transform.Transform.handleFile(self, f, set)
+
+  def handleFileSet(self, set):
+    '''Check root directory if given, and then execute the default set handling method'''
+    if self.root and not os.path.isdir(self.root):
+      raise RuntimeError('Invalid root directory for tagging operation: '+self.root)
+    return FileChanged.handleFileSet(self, set)
+
+class Update (build.transform.Transform):
+  '''Update nodes process files whose update in the source database was delayed'''
+  def __init__(self, sourceDB, tag = None):
+    build.transform.Transform.__init__(self)
+    self.sourceDB = sourceDB
+    if tag is None:
+      self.tag = []
+    else:
+      self.tag = tag
+    if self.tag and not isinstance(self.tag, list):
+      self.tag = [self.tag]
+    self.tag   = map(lambda t: 'update '+t, self.tag)
+    return
+
+  def __str__(self):
+    return 'Update transform for '+str(self.tag)
+
+  def handleFile(self, f, set):
+    '''If the file tag starts with "update", then update it in the source database'''
+    if (self.tag and set.tag in self.tag) or (set.tag and set.tag[:6] == 'update'):
+      if os.path.isfile(f):
+        self.sourceDB.updateSource(f)
+      return self.output
+    return build.transform.Transform.handleFile(self, f, set)
+
+  def handleFileSet(self, set):
+    '''Execute the default set handling method, and save source database'''
+    output = build.transform.Transform.handleFileSet(self, set)
+    # I could check here, and only save in the first recursive call
+    self.sourceDB.save()
+    try:
+      import gc
+      gc.collect()
+    except ImportError: pass
+    return output

config/BuildSystem/build/fileset.py

+import base
+
+import os
+
+class FileSet(list):
+  def __init__(self, filenames = None, tag = None, filesets = [], mustExist = 1):
+    list.__init__(self)
+    self.children  = filesets[:]
+    self.tag       = tag
+    self.mustExist = mustExist
+    if not filenames is None:
+      self.extend(filenames)
+    return
+
+  def clone(self):
+    '''Return a FileSet with the same tag and existence flag, but no members or children'''
+    return FileSet(tag = self.tag, mustExist = self.mustExist)
+
+  def checkFile(self, filename):
+    '''If mustExist true, check for file existence'''
+    if self.mustExist and not os.path.exists(filename):
+      raise ValueError('File '+filename+' does not exist!')
+    return filename
+
+  def append(self, item):
+    item = self.checkFile(item)
+    if not item in self:
+      list.append(self, item)
+    return
+
+  def extend(self, l):
+    for item in l:
+      self.append(item)
+    return
+
+  def insert(self, index, item):
+    item = self.checkFile(item)
+    if not item in self:
+      list.insert(self, index, item)
+    return
+
+  def isCompatible(self, set):
+    '''Return True if the set tags and mustExist flags match'''
+    return (self.tag == set.tag) and (self.mustExist == set.mustExist)
+
+class TreeFileSet (FileSet):
+  def __init__(self, roots = None, fileTest = lambda file: 1, tag = None):
+    if roots is None:
+      self.roots  = FileSet(os.getcwd())
+    else:
+      if isinstance(roots, str):
+        self.roots = FileSet([roots])
+      else:
+        self.roots = roots
+    self.fileTest = fileTest
+    FileSet.__init__(self, filenames = self.walkTree(), tag = tag)
+    return
+
+  def walkTree(self):
+    files = []
+    for root in self.roots:
+      os.path.walk(root, self.walkFunc, files)
+    return files
+
+  def walkFunc(self, defaultFiles, directory, fileList):
+    if (os.path.basename(directory) == 'SCCS'): return
+    for file in fileList:
+      fullPath = os.path.join(directory, file)
+      if (os.path.isdir(fullPath)):            continue
+      if (file[-1] == '~'):                    continue
+      if (file[0] == '#' and file[-1] == '#'): continue
+      if (self.fileTest(fullPath)): defaultFiles.append(fullPath)
+
+class ExtensionFileSet (TreeFileSet):
+  def __init__(self, roots, exts, tag = None):
+    self.exts = exts
+    if not isinstance(self.exts, list): self.exts = [self.exts]
+    TreeFileSet.__init__(self, roots, self.extTest, tag = tag)
+    return
+
+  def extTest(self, file):
+    (base, ext) = os.path.splitext(file)
+    if (ext in self.exts):
+      return 1
+    else:
+      return 0
+
+class RootedFileSet(FileSet, base.Base):
+  def __init__(self, projectUrl, filenames = None, tag = None, filesets = [], mustExist = 1):
+    FileSet.__init__(self, None, tag, filesets, mustExist)
+    base.Base.__init__(self)
+    self.projectUrl = projectUrl
+    if not filenames is None:
+      self.extend(filenames)
+    return
+
+  def __str__(self):
+    return '['+','.join(map(str, self))+']'
+
+  def getProjectUrl(self):
+    return self._projectUrl
+
+  def setProjectUrl(self, url):
+    self._projectUrl = url
+  projectUrl = property(getProjectUrl, setProjectUrl, doc = 'The URL of the project which provides a root for all files in the set')
+
+  def getProjectRoot(self):
+    if not hasattr(self, '_projectRoot'):
+      project = self.getInstalledProject(self.projectUrl)
+      if project is None:
+        self._projectRoot = ''
+      else:
+        self._projectRoot = project.getRoot()
+    return self._projectRoot
+
+  def setProjectRoot(self):
+    raise RuntimeError('Cannot set the project root. It is determined by the project URL.')
+  projectRoot = property(getProjectRoot, setProjectRoot, doc = 'The project root for all files in the set')
+
+  def __getstate__(self):
+    '''Remove the cached project root directory before pickling'''
+    d = base.Base.__getstate__(self)
+    if '_projectRoot' in d: del d['_projectRoot']
+    return d
+
+  def __getitem__(self, index):
+    return os.path.join(self.projectRoot, list.__getitem__(self, index))
+
+  def __getslice__(self, start, end):
+    root = self.projectRoot
+    return map(lambda f: os.path.join(root, f), list.__getslice__(self, start, end))
+
+  def __setitem__(self, index, item):
+    return list.__setitem__(self, index, self.checkFile(item))
+
+  def __setslice__(self, start, end, s):
+    root = self.projectRoot
+    return list.__setslice__(self, start, end, map(lambda f: self.checkFile(f, root), s))
+
+  def __iter__(self):
+    return FileSetIterator(self)
+
+  def clone(self):
+    '''Return a RootedFileSet with the same root, tag and existence flag, but no members or children'''
+    set = RootedFileSet(self.projectUrl, tag = self.tag, mustExist = self.mustExist)
+    set._projectRoot = self._projectRoot
+    return set
+
+  def checkFile(self, filename, root = None):
+    if root is None:
+      root = self.projectRoot
+    if os.path.isabs(filename):
+      filename = FileSet.checkFile(self, filename)
+      if not filename.startswith(root+os.sep):
+        raise ValueError('Absolute path '+filename+' conflicts with project root '+root)
+      else:
+        filename = filename[len(root)+1:]
+    else:
+      filename = FileSet.checkFile(self, os.path.join(root, filename))
+    return filename
+
+  def isCompatible(self, set):
+    '''Return True if the roots match and the superclass match returns True'''
+    return isinstance(set, RootedFileSet) and (self.projectRoot == set.projectRoot) and FileSet.isCompatible(self, set)
+
+class FileSetIterator (object):
+  def __init__(self, set):
+    self.set   = set
+    self.index = -1
+    self.max   = len(set)
+    return
+
+  def __iter__(self):
+    return self
+
+  def next(self):
+    self.index += 1
+    if self.index == self.max: raise StopIteration()
+    return self.set[self.index]
+
+class RootedExtensionFileSet (RootedFileSet, ExtensionFileSet):
+  def __init__(self, projectUrl, roots, exts, tag = None):
+    self.exts = exts
+    if not isinstance(self.exts, list): self.exts = [self.exts]
+    base.Base.__init__(self)
+    self.projectUrl = projectUrl
+    TreeFileSet.__init__(self, map(lambda d: os.path.join(self.projectRoot, d), roots), self.extTest, tag = tag)
+    return

config/BuildSystem/build/framework.py

+import user
+import importer
+import base
+import sourceDatabase