Commits

Enis Afgan committed 359ff16 Merge

Merged default branch into stable for release

Comments (0)

Files changed (48)

 # Dev files
 .coverage
 htmlcov
-
+cm_webapp.pid
 
 #Eclise+PyDev project files
 .project
+### CloudMan - January 7, 2014.
+
+##### Major updates
+* On AWS, updated *galaxy* (snap-69893175) and *galaxyIndices* (snap-4b20f451)
+  file system snapshots, which include the Nov 4, 2013 Galaxy release as well as
+  a significant number of Galaxy tools updates and additions. Tool installations
+  from the Tool Shed work as expected.
+
+* Updated the AWS AMI: *Galaxy CloudMan 2.3* (ami-a7dbf6ce)
+
+* Made multi-process Galaxy the default option for running Galaxy with CloudMan.
+
+* Automatically toggle master as job execution host or not when workers are present.
+
+* Added support for attaching external Gluster based filesystems + the ability
+  to init clusters off gluster/nfs volumes when so configured in *snaps.yaml*.
+
+* Added support for creating a volume based on an *archive_url* allowing a file
+  system to be downloaded at cluster launch; also made it possible to use transient
+  disk for this.
+
+* Added tagging support for OpenStack instances (requires OpenStack Havana release).
+
+* Galaxy Reports app now starts by default on Galaxy type clusters; it is accessible
+  on ``<instance IP>/reports/`` URL.
+
+##### Minor updates
+
+* Added AWS new High I/O instance types.
+
+* Created *proFTPd* service for managing the application; fixes occasional issues
+  with FTP connectivity
+
+* Updated *admin_users* (in Admin) form data-prefill and explanatory text to
+  indicate correctly that this form now sets all admin users, instead of appending
+  to the existing list.
+
+* Included cluster name into the page title.
+
+* Explicitly indicate that upon cluster deletion shared clusters get removed too.
+
+* Added ``TMPDIR`` env var to point to a dir on (Galaxy's) data file system.
+
+* Generalize support for finding ``libc`` on newer Ubuntu platforms. Allows CloudMan
+  to run on Ubuntu 13.04 and tries to be future compatible with new minor releases
+  of ``libc`` (thanks Brad Chapman).
+
+* Numerous fixes, see commit log for details.
+
+
 ### CloudMan - June 29, 2013.
 
 * Unification of ``galaxyTools`` and ``galaxyData`` file systems into a single

cm/base/controller.py

 """Contains functionality needed in every webapp interface"""
-import os
-import time
 import logging
-from cm import util
 
 log = logging.getLogger('cloudman')
 

cm/boot/__init__.py

     PyYAML http://pyyaml.org/wiki/PyYAMLDocumentation (easy_install pyyaml)
     boto http://code.google.com/p/boto/ (easy_install boto)
 """
-import base64
 import logging
 import os
-import re
 import shutil
 import sys
 import tarfile
 from boto.s3.connection import OrdinaryCallingFormat, S3Connection, SubdomainCallingFormat
 
 from .util import _run, _is_running, _make_dir
+from .conf import _install_authorized_keys, _install_conf_files, _configure_nginx
 from .object_store import _get_file_from_bucket
 
 logging.getLogger(
     # url = 'http://userwww.service.emory.edu/~eafgan/content/nginx.conf'
     # log.info("Getting nginx conf file (using wget) from '%s' and saving it to '%s'" % (url, local_nginx_conf_file))
     # _run('wget --output-document=%s %s' % (local_nginx_conf_file, url))
-    _configure_nginx(ud)
+    _configure_nginx(log, ud)
     _fix_nginx_upload(ud)
     rmdir = False  # Flag to indicate if a dir should be deleted
     upload_store_dir = '/mnt/galaxyData/upload_store'
     return nginx_dir
 
 
-def _write_conf_file(contents_descriptor, path):
-    destination_directory = os.path.dirname(path)
-    if not os.path.exists(destination_directory):
-        os.makedirs(destination_directory)
-    if contents_descriptor.startswith("http") or contents_descriptor.startswith("ftp"):
-        log.info("Fetching file from %s" % contents_descriptor)
-        _run(log, "wget --output-document='%s' '%s'" % (contents_descriptor, path))
-    else:
-        log.info("Writing out configuration file encoded in user-data:")
-        with open(path, "w") as output:
-            output.write(base64.b64decode(contents_descriptor))
-
-
-def _configure_nginx(ud):
-    # User specified nginx.conf file, can be specified as
-    # url or base64 encoded plain-text.
-    nginx_conf = ud.get("nginx_conf_contents", None)
-    nginx_conf_path = ud.get("nginx_conf_path", "/usr/nginx/conf/nginx.conf")
-    if nginx_conf:
-        _write_conf_file(nginx_conf, nginx_conf_path)
-    reconfigure_nginx = ud.get("reconfigure_nginx", True)
-    if reconfigure_nginx:
-        _reconfigure_nginx(ud, nginx_conf_path)
-
-
-def _reconfigure_nginx(ud, nginx_conf_path):
-    configure_multiple_galaxy_processes = ud.get(
-        "configure_multiple_galaxy_processes", False)
-    web_threads = ud.get("web_thread_count", 1)
-    if configure_multiple_galaxy_processes and web_threads > 1:
-        ports = [8080 + i for i in range(web_threads)]
-        servers = ["server localhost:%d;" % port for port in ports]
-        upstream_galaxy_app_conf = "upstream galaxy_app { %s } " % "".join(
-            servers)
-        nginx_conf = open(nginx_conf_path, "r").read()
-        new_nginx_conf = re.sub("upstream galaxy_app.*\\{([^\\}]*)}",
-                                upstream_galaxy_app_conf, nginx_conf)
-        open(nginx_conf_path, "w").write(new_nginx_conf)
-
-
 def _fix_nginx_upload(ud):
     """
     Set ``max_client_body_size`` in nginx config. This is necessary for the
             sys.exit(0)
         else:
             usage()
-    # Currently using this to configure nginx SSL, but it could be used
-    # to configure anything really.
-    conf_files = ud.get('conf_files', [])
-    for conf_file_obj in conf_files:
-        path = conf_file_obj.get('path')
-        content = conf_file_obj.get('content')
-        _write_conf_file(content, path)
+
+    _install_conf_files(log, ud)
+    _install_authorized_keys(log, ud)
 
     if 'no_start' not in ud:
         if ('nectar' in ud.get('cloud_name', '').lower()):
+import os
+from .util import _run
+import base64
+import re
+
+
+class AuthorizedKeysManager(object):
+
+    def __init__(self):
+        self.sudo_cmd = "sudo"  # Hack to allow override sudo user during testing.
+
+    def _get_home_dir(self, user):
+        path = os.path.expanduser("~%s" % user)
+        return path if os.path.exists(path) else None
+
+    def add_authorized_key(self, log, user, authorized_key):
+        home_dir = self._get_home_dir(user)
+        sudo_cmd = self.sudo_cmd
+        if home_dir:
+            ssh_dir = os.path.join(home_dir, ".ssh")
+            if not os.path.exists(ssh_dir):
+                if not (_run(log, "%s mkdir -p '%s'" % (sudo_cmd, ssh_dir)) and \
+                        _run(log, "%s chown %s '%s'" % (sudo_cmd, user, ssh_dir)) and \
+                        _run(log, "%s chmod 700 '%s'" % (sudo_cmd, ssh_dir))):
+                    return False
+            authorized_keys_file = os.path.join(ssh_dir, "authorized_keys2")
+            if not os.path.exists(authorized_keys_file):
+                authorized_keys_file = os.path.join(ssh_dir, "authorized_keys")
+
+            cmd = "KEY=%s; %s grep $KEY '%s' || %s echo $KEY >> '%s'" % \
+                (_shellquote(authorized_key), sudo_cmd, authorized_keys_file, sudo_cmd, authorized_keys_file)
+            return _run(log, cmd) and \
+                   _run(log, "%s chown %s '%s'" % (sudo_cmd, user, authorized_keys_file)) and \
+                   _run(log, "%s chmod 600 '%s'" % (sudo_cmd, authorized_keys_file))
+        return True
+
+
+def _install_authorized_keys(log, ud, manager=AuthorizedKeysManager()):
+    authorized_keys = ud.get("authorized_keys", None) or []
+    authorized_key_users = ud.get("authorized_key_users", ["ubuntu", "galaxy"])
+    for authorized_key in authorized_keys:
+        for user in authorized_key_users:
+            if not manager.add_authorized_key(log, user, authorized_key):
+                log.warn("Failed to add authorized_key for user %s" % user)
+
+
+def _write_conf_file(log, contents_descriptor, path):
+    destination_directory = os.path.dirname(path)
+    if not os.path.exists(destination_directory):
+        os.makedirs(destination_directory)
+    if contents_descriptor.startswith("http") or contents_descriptor.startswith("ftp"):
+        log.info("Fetching file from %s" % contents_descriptor)
+        _run(log, "wget --output-document='%s' '%s'" % (contents_descriptor, path))
+    else:
+        log.info("Writing out configuration file encoded in user-data:")
+        with open(path, "w") as output:
+            output.write(base64.b64decode(contents_descriptor))
+
+
+def _install_conf_files(log, ud):
+    # Currently using this to configure nginx SSL, but it could be used
+    # to configure anything really.
+    conf_files = ud.get('conf_files', [])
+    for conf_file_obj in conf_files:
+        path = conf_file_obj.get('path', None)
+        content = conf_file_obj.get('content', None)
+        if path is None:
+            log.warn("Found conf file with no path, skipping.")
+            continue
+        if content is None:
+            log.warn("Found conf file with not content, skipping.")
+            continue
+        _write_conf_file(log, content, path)
+
+
+def _configure_nginx(log, ud):
+    # User specified nginx.conf file, can be specified as
+    # url or base64 encoded plain-text.
+    nginx_conf = ud.get("nginx_conf_contents", None)
+    nginx_conf_path = ud.get("nginx_conf_path", "/usr/nginx/conf/nginx.conf")
+    if nginx_conf:
+        _write_conf_file(log, nginx_conf, nginx_conf_path)
+    reconfigure_nginx = ud.get("reconfigure_nginx", True)
+    if reconfigure_nginx:
+        _reconfigure_nginx(ud, nginx_conf_path, log)
+
+
+def _reconfigure_nginx(ud, nginx_conf_path, log):
+    log.debug("Reconfiguring nginx conf")
+    configure_multiple_galaxy_processes = ud.get(
+        "configure_multiple_galaxy_processes", True)
+    web_threads = ud.get("web_thread_count", 3)
+    if configure_multiple_galaxy_processes and web_threads > 1:
+        log.debug("Reconfiguring nginx to support Galaxy running %s "
+            "web threads." % web_threads)
+        ports = [8080 + i for i in range(web_threads)]
+        servers = ["server localhost:%d;" % port for port in ports]
+        upstream_galaxy_app_conf = "upstream galaxy_app { %s } " % "".join(
+            servers)
+        with open(nginx_conf_path, "r") as old_conf:
+            nginx_conf = old_conf.read()
+        new_nginx_conf = re.sub("upstream galaxy_app.*\\{([^\\}]*)}",
+                                upstream_galaxy_app_conf, nginx_conf)
+        with open(nginx_conf_path, 'w') as new_conf:
+            new_conf.write(new_nginx_conf)
+
+def _shellquote(s):
+    """
+    http://stackoverflow.com/questions/35817/how-to-escape-os-system-calls-in-python
+    """
+    return "'" + s.replace("'", "'\\''") + "'"

cm/clouds/dummy.py

-import urllib
 import socket
 from cm.clouds import CloudInterface
-from cm.services.data.filesystem import Volume
-
-from oca import Client, VirtualMachine, VirtualMachinePool, CONNECTED
-from oca.exceptions import OpenNebulaException
-
-import new
 
 # Obtaining IP and MAC addresses
-import socket
 import fcntl
 import struct
 
-import subprocess
-
 import logging
 log = logging.getLogger('cloudman')
 
 from boto.exception import BotoServerError
 from boto.exception import EC2ResponseError
 from boto.s3.connection import S3Connection
-from boto.s3.connection import OrdinaryCallingFormat
-from boto.s3.connection import SubdomainCallingFormat
 from boto.ec2.connection import EC2Connection
 
 
 
     def get_fqdn(self):
         log.debug("Retrieving FQDN")
-        if self.fqdn == None:
+        if not self.fqdn:
             try:
                 self.fqdn = socket.getfqdn()
             except IOError:
         return self.region
 
     def get_ec2_connection(self):
-        if self.ec2_conn == None:
+        if not self.ec2_conn:
             try:
                 if self.app.TESTFLAG is True:
                     log.debug("Attempted to establish EC2 connection, but TESTFLAG is set. "
 
     def get_s3_connection(self):
         # log.debug( 'Getting boto S3 connection' )
-        if self.s3_conn == None:
+        if not self.s3_conn:
             log.debug("No S3 Connection, creating a new one.")
             try:
                 self.s3_conn = S3Connection(
             ['%s: %s' % (key, value) for key, value in worker_ud.iteritems()])
         log.debug("Starting instance(s) with the following command : ec2_conn.run_instances( "
                   "image_id='{iid}', min_count='{min_num}', max_count='{num}', key_name='{key}', "
-                  "security_groups=['{sgs}'], user_data=[{ud}], instance_type='{type}', placement='{zone}')"
+                  "security_groups=['{sgs}'], user_data(with password/secret_key filtered out)=[{ud}], instance_type='{type}', placement='{zone}')"
                   .format(iid=self.get_ami(), min_num=min_num, num=num,
                     key=self.get_key_pair_name(), sgs=", ".join(self.get_security_groups()),
-                    ud=worker_ud_str, type=instance_type, zone=self.get_zone()))
+                    ud="\n".join(['%s: %s' % (key, value) for key, value in worker_ud.iteritems() if key not in['password', 'secret_key']]),
+                    type=instance_type, zone=self.get_zone()))
         try:
             # log.debug( "Would be starting worker instance(s)..." )
             reservation = None
                            # 'solution')
             if reservation:
                 for instance in reservation.instances:
-                    self.add_tag(instance, 'clusterName', self.app.ud['cluster_name'])
-                    self.add_tag(instance, 'role', worker_ud['role'])
-                    self.add_tag(instance, 'Name', "Worker: {0}"
-                        .format(self.app.ud['cluster_name']))
+                    # At this point in the launch, tag only amazon instances
+                    if 'amazon' in self.app.ud.get('cloud_name', 'amazon').lower():
+                        self.add_tag(instance, 'clusterName', self.app.ud['cluster_name'])
+                        self.add_tag(instance, 'role', worker_ud['role'])
+                        self.add_tag(instance, 'Name', "Worker: {0}"
+                            .format(self.app.ud['cluster_name']))
                     i = Instance(app=self.app, inst=instance, m_state=instance.state)
                     log.debug("Adding Instance %s" % instance)
                     self.app.manager.worker_instances.append(i)

cm/clouds/eucalyptus.py

         return self.public_hostname
 
     def run_instances(self, num, instance_type, spot_price=None, **kwargs):
-        use_spot = False
         if spot_price is not None:
             log.warning(
                 'Eucalyptus does not support spot instances -- submitting normal request')

cm/clouds/opennebula.py

-import urllib
 import socket
 from cm.clouds import CloudInterface
 from cm.services.data.filesystem import Volume
 import new
 
 # Obtaining IP and MAC addresses
-import socket
 import fcntl
 import struct
 

cm/clouds/openstack.py

     def __init__(self, app=None):
         super(OSInterface, self).__init__()
         self.app = app
-        self.tags_supported = False
+        self.tags_supported = False  # Not used here, see _tags_supported method instead
         self.set_configuration()
 
+    def _tags_supported(self, resource):
+        """
+        ``True`` if tagging for a given resource is operational. ``False``
+        otherwise.
+        """
+        # Havana release of Open Stack supports tags for Instance objects only
+        if type(resource) == boto.ec2.instance.Instance:
+            return True
+        log.debug("Not adding tag to resource %s because that resource "
+            "in OpenStack does not support tags." % resource)
+        return False
+
     def set_configuration(self):
         super(OSInterface, self).set_configuration()
         # Cloud details gotten from the user data
         return self.self_public_ip
 
     def add_tag(self, resource, key, value):
-        log.debug("Would add tag {key}:{value} to resource {resource} but OpenStack does not "
-                  "support tags via boto.".format(key=key, value=value, resource=resource))
-        pass
+        if self._tags_supported(resource):
+            try:
+                log.debug("Adding tag '%s:%s' to resource '%s'" % (
+                    key, value, resource.id if resource.id else resource))
+                resource.add_tag(key, value)
+            except EC2ResponseError, e:
+                log.error("Exception adding tag '%s:%s' to resource '%s': %s"
+                    % (key, value, resource, e))
 
     def get_tag(self, resource, key):
-        log.debug("Would get tag {key} from resource {resource} but OpenStack "
-            "does not support tags via boto.".format(key=key, resource=resource))
-        pass
+        value = None
+        if self._tags_supported(resource):
+            try:
+                log.debug("Getting tag '%s' on resource '%s'" % (key, resource.id))
+                value = resource.tags.get(key, None)
+            except EC2ResponseError, e:
+                log.error("Exception getting tag '%s' on resource '%s': %s" %
+                    (key, resource, e))
+        return value
 """Universe configuration builder."""
-import time
-import sys
-import os
+import ConfigParser
 import logging
 import logging.config
-import ConfigParser
-# from optparse import OptionParser
+import os
+import sys
 from cm.util import string_as_bool
 from cm.util import misc
 from cm.util import paths
     ("m2.2xlarge", "High-Memory Double Extra Large"),
     ("m2.4xlarge", "High-Memory Quadruple Extra Large"),
     ("c1.xlarge", "High-CPU Extra Large"),
+    ("i2.xlarge", "High I/O Extra Large"),
+    ("i2.2xlarge", "High I/O Double Extra Large"),
+    ("i2.4xlarge", "High I/O Quadruple Extra Large"),
+    ("i2.8xlarge", "High I/O 8x Extra Large")
 ]
 
 

cm/controllers/root.py

             if startup_opt == "Galaxy" or startup_opt == "Data":
                 # Initialize form on the main UI contains two fields named ``pss``,
                 # which arrive as a list so pull out the actual storage size value
-                if galaxy_data_option == "custom-size":
+                if galaxy_data_option == "transient":
+                    storage_type = "transient"
+                    pss = 0
+                elif galaxy_data_option == "custom-size":
+                    storage_type = "volume"
                     if isinstance(pss, list):
                         ss = None
                         for x in pss:
                                 ss = x
                         pss = ss
                 else:
+                    storage_type = "volume"
                     pss = str(self.app.manager.get_default_data_size())
-                if pss and pss.isdigit():
+                if storage_type == "transient" or (pss and pss.isdigit()):
                     pss_int = int(pss)
-                    self.app.manager.init_cluster(startup_opt, pss_int)
+                    self.app.manager.init_cluster(startup_opt, pss_int, storage_type=storage_type)
                     return self.instance_state_json(trans)
                 else:
                     msg = "Wrong or no value provided for the persistent "\
         changesets = self.app.manager.check_for_new_version_of_CM()
         if 'default_CM_rev' in changesets and 'user_CM_rev' in changesets:
             try:
-                CM_url = trans.app.config.get("CM_url", "http://bitbucket.org/galaxy/cloudman/changesets/tip/")
+                CM_url = trans.app.config.get("CM_url",
+                    "https://bitbucket.org/galaxy/cloudman/commits/all?page=tip&search=")
                 # num_changes = int(changesets['default_CM_rev']) - int(changesets['user_CM_rev'])
                 CM_url += changesets['user_CM_rev'] + '::' + changesets['default_CM_rev']
                 return CM_url
             log_file = "paster.log"
         elif service_name == 'GalaxyReports':
             log_file = os.path.join(self.app.path_resolver.galaxy_home, 'reports_webapp.log')
+        elif service_name == 'LWR':
+            log_file = os.path.join(self.app.path_resolver.lwr_home, 'paster.log')
         # Set log length
         if num_lines:
             if show == 'more':

cm/framework/base.py

 # tempfiles.  Necessary for externalizing the upload tool.  It's a little hacky
 # but for performance reasons it's way better to use Paste's tempfile than to
 # create a new one and copy.
-import cgi
 import tempfile
 
 

cm/framework/middleware/profile.py

 information at the bottom of each page.
 """
 
-import sys
-import os
 import threading
 import cgi
-import time
-from cStringIO import StringIO
 from paste import response
 
 try:

cm/framework/middleware/static.py

 import os
-import sys
-import imp
-import pkg_resources
-import mimetypes
 from paste import request
 from paste import fileapp
-from paste.util import import_string
-from paste.deploy import converters
-from paste import httpexceptions
 from paste.httpheaders import ETAG
 
 from paste.urlparser import StaticURLParser

cm/services/__init__.py

                      "Transient NFS FS"}
     HADOOP = {'type': ServiceType.APPLICATION, 'name': "Hadoop Service"}
     MIGRATION = {'type': ServiceType.APPLICATION, 'name': "Migration Service"}
-
     HTCONDOR = {'type': ServiceType.APPLICATION, 'name': "HTCondor Service"}
+    LWR = {'type': ServiceType.APPLICATION, 'name': "LWR Service"}
 
     PROFTPD = {'type': ServiceType.APPLICATION, 'name': "ProFTPd Service"}
 
             return ServiceRole.GALAXY_POSTGRES
         elif val == "GalaxyReports":
             return ServiceRole.GALAXY_REPORTS
+        elif val == "LWR":
+            return ServiceRole.LWR
         elif val == "Autoscale":
             return ServiceRole.AUTOSCALE
         elif val == "PSS":
             return "Postgres"
         elif svc_role == ServiceRole.GALAXY_REPORTS:
             return "GalaxyReports"
+        elif svc_role == ServiceRole.LWR:
+            return "LWR"
         elif svc_role == ServiceRole.AUTOSCALE:
             return "Autoscale"
         elif svc_role == ServiceRole.PSS:

cm/services/apps/__init__.py

 import commands
 import os
-from cm.util import paths
+from socket import socket, AF_INET, SOCK_STREAM
 
 
 import logging
             alive_daemon_pid = None
             system_service = service
             # Galaxy deamon is named 'paster' so handle this special case
-            if service in ['galaxy', 'galaxy_reports']:
-                system_service = 'python'
+            special_services = {"galaxy": "python", "galaxy_reports": "python", "lwr": "paster"}
+            system_service = special_services.get(service, service)  # Default back to just service
             alive_daemon_pid = commands.getoutput(
                 "ps -o comm,pid -p %s | grep %s | awk '{print $2}'" % (daemon_pid, system_service))
             if alive_daemon_pid == daemon_pid:
             pid_file = '%s/main.pid' % self.app.path_resolver.galaxy_home
         elif service == 'galaxy_reports':
             pid_file = '%s/reports_webapp.pid' % self.app.path_resolver.galaxy_home
+        elif service == 'lwr':
+            pid_file = '%s/paster.pid' % self.app.path_resolver.lwr_home
         else:
             return -1
         # log.debug("Checking pid file '%s' for service '%s'" % (pid_file,
             return commands.getoutput("head -n 1 %s" % pid_file)
         else:
             return -1
+
+    def _port_bound(self, port):
+        """
+        Determine if any process is listening on localhost on specified port.
+        """
+        s = socket(AF_INET, SOCK_STREAM)
+        try:
+            result = s.connect_ex(('127.0.0.1', port))
+            return result == 0
+        finally:
+            s.close()

cm/services/apps/galaxy.py

         os.putenv("GALAXY_HOME", self.galaxy_home)
         os.putenv("TEMP", self.app.path_resolver.galaxy_temp)
         os.putenv("TMPDIR", self.app.path_resolver.galaxy_temp)
+        self.env_vars["GALAXY_HOME"] = self.galaxy_home
+        self.env_vars["TEMP"] = self.app.path_resolver.galaxy_temp
+        self.env_vars["TMPDIR"] = self.app.path_resolver.galaxy_temp
         conf_dir = self.option_manager.setup()
         if conf_dir:
             self.env_vars["GALAXY_UNIVERSE_CONFIG_DIR"] = conf_dir
                     % datetime.utcnow().strftime('%H_%M'), shell=True)
 
     def _multiple_processes(self):
-        return self.app.ud.get("configure_multiple_galaxy_processes", False)
+        return self.app.ud.get("configure_multiple_galaxy_processes", True)
 
     def galaxy_run_command(self, args):
         env_exports = "; ".join(["export %s='%s'" % (
         """
         nginx_dir = self.app.path_resolver.nginx_dir
         if nginx_dir:
+            galaxy_server = "server localhost:8080;"
+            if self._multiple_processes():
+                web_thread_count = int(self.app.ud.get("web_thread_count", 3))
+                galaxy_server = ''
+                if web_thread_count > 9:
+                    log.warning("Current code supports max 9 web threads. "
+                        "Setting the web thread count to 9.")
+                    web_thread_count = 9
+                for i in range(web_thread_count):
+                    galaxy_server += "server localhost:808%s;" % i
             # Customize the template
             nginx_conf_template = Template(templates.NGINX_CONF_TEMPLATE)
             params = {
                 'galaxy_home': self.galaxy_home,
-                'galaxy_data': self.app.path_resolver.galaxy_data
+                'galaxy_data': self.app.path_resolver.galaxy_data,
+                'galaxy_server': galaxy_server
             }
             templ = nginx_conf_template.substitute(params)
             # Write out the file

cm/services/apps/galaxy_reports.py

 from datetime import datetime
 import os
 import subprocess
-import urllib2
 
 from cm.services.apps import ApplicationService
 
 
 import logging
 log = logging.getLogger('cloudman')
+DEFAULT_REPORTS_PORT = 9001
 
 
 class GalaxyReportsService(ApplicationService):
     def __init__(self, app):
         super(GalaxyReportsService, self).__init__(app)
         self.galaxy_home = self.app.path_resolver.galaxy_home
+        self.reports_port = DEFAULT_REPORTS_PORT
         self.name = ServiceRole.to_string(ServiceRole.GALAXY_REPORTS)
         self.svc_roles = [ServiceRole.GALAXY_REPORTS]
         self.dependencies = [ServiceDependency(
             self.app.path_resolver.galaxy_home, 'reports.conf.d')
 
     def _check_galaxy_reports_running(self):
-        dns = "http://127.0.0.1:9001"
-        try:
-            urllib2.urlopen(dns)
-            return True
-        except:
-            return False
+        return self._port_bound(self.reports_port)
 
     def start(self):
         self.state = service_states.STARTING
+        log.debug("Starting GalaxyReportsService")
         self.status()
         if not self.state == service_states.RUNNING:
             self._setup()
                 self.start = service_states.ERROR
 
     def _setup(self):
-        reports_option_manager = \
-            DirectoryGalaxyOptionManager(self.app, conf_dir=self.conf_dir, conf_file_name='reports_wsgi.ini')
+        log.debug("Running GalaxyReportsService _setup")
+        reports_option_manager = DirectoryGalaxyOptionManager(self.app,
+            conf_dir=self.conf_dir, conf_file_name='reports_wsgi.ini')
         reports_option_manager.setup()
         main_props = {
             # Place dummy database_connection for run_reports.sh's --sync-config option to replace
             'use': 'egg:PasteDeploy#prefix',
             'prefix': '/reports',
         }
-        reports_option_manager.set_properties(main_props, section='app:main', description='app_main_props')
-        reports_option_manager.set_properties(proxy_props, section='filter:proxy-prefix', description='proxy_prefix_props')
+        reports_option_manager.set_properties(main_props, section='app:main',
+            description='app_main_props')
+        reports_option_manager.set_properties(proxy_props, section='filter:proxy-prefix',
+            description='proxy_prefix_props')
 
     def remove(self, synchronous=False):
         log.info("Removing '%s' service" % self.name)
     def _run(self, args):
         command = '%s - galaxy -c "export GALAXY_REPORTS_CONFIG_DIR=\'%s\'; sh $GALAXY_HOME/run_reports.sh %s"' % (
             paths.P_SU, self.conf_dir, args)
-        return misc.run(command, "Error invoking Galaxy Reports", "Successfully invoked Galaxy Reports.")
+        return misc.run(command, "Error invoking Galaxy Reports",
+            "Successfully invoked Galaxy Reports.")
 
     def status(self):
         if self.state == service_states.SHUTTING_DOWN or \
-            self.state == service_states.SHUT_DOWN or \
-            self.state == service_states.UNSTARTED or \
-                self.state == service_states.WAITING_FOR_USER_ACTION:
+           self.state == service_states.SHUT_DOWN or \
+           self.state == service_states.UNSTARTED or \
+           self.state == service_states.WAITING_FOR_USER_ACTION:
             pass
         elif self._check_daemon('galaxy_reports'):
             if self._check_galaxy_reports_running():

cm/services/apps/htcondor.py

 
 
 class HTCondorService(ApplicationService):
-    def __init__(self, app, srv_type, host=""):
+    def __init__(self, app, srv_type="master", host=""):
         """
         the srv_type defines whether we are running a master node or a
         worker node. If we have run a worker the host IP should be passed
         self.svc_roles = [ServiceRole.HTCONDOR]
         self.name = ServiceRole.to_string(ServiceRole.HTCONDOR)
         self.srv_type = srv_type
-        if srv_type == "master":
+        if self.srv_type == "master":
             self.flock_to = ""
         else:
             self.host = host

cm/services/apps/lwr.py

+import os
+
+
+from cm.util import paths
+from cm.util import misc
+from cm.services import service_states
+from cm.services import ServiceRole
+from cm.services import ServiceDependency
+from cm.services.apps import ApplicationService
+
+import logging
+log = logging.getLogger('cloudman')
+
+INVOKE_SUCCESS = "Successfully invoked LWR."
+INVOKE_FAILURE = "Error invoking LWR."
+DEFAULT_LWR_PORT = 8913
+
+
+class LwrService(ApplicationService):
+
+    def __init__(self, app):
+        super(LwrService, self).__init__(app)
+        self.lwr_home = self.app.path_resolver.lwr_home
+        self.lwr_port = DEFAULT_LWR_PORT
+        self.name = ServiceRole.to_string(ServiceRole.LWR)
+        self.svc_roles = [ServiceRole.LWR]
+        self.dependencies = [
+            ServiceDependency(self, ServiceRole.SGE),  # Well someday anyway :)
+            ServiceDependency(self, ServiceRole.GALAXY_TOOLS)  # Anyway to make this depend on where LWR installed?
+        ]
+
+    def __rel_path(self, *args):
+        return os.path.join(self.lwr_home, *args)
+
+    def __ini_path(self):
+        return self.__rel_path("server.ini")
+
+    def _check_lwr_running(self):
+        return self._port_bound(self.lwr_port)
+
+    def start(self):
+        self.state = service_states.STARTING
+        self.status()
+        if not self.state == service_states.RUNNING:
+            self._setup()
+            started = self._run("--daemon")
+            if not started:
+                log.warn("Failed to setup or run LWR server.")
+                self.start = service_states.ERROR
+
+    def _setup(self):
+        ini_path = self.__ini_path()
+        if not os.path.exists(ini_path):
+            misc.run("cp '%s.sample' '%s'" % (ini_path, ini_path))
+        # TODO: Configure LWR.
+
+    def remove(self, synchronous=False):
+        log.info("Removing '%s' service" % self.name)
+        super(LwrService, self).remove(synchronous)
+        self.state = service_states.SHUTTING_DOWN
+        log.info("Shutting down LWR service...")
+        if self._run("--stop-daemon"):
+            self.state = service_states.SHUT_DOWN
+            # TODO: Handle log files.
+        else:
+            log.info("Failed to shutdown down LWR service...")
+            self.state = service_states.ERROR
+
+    def _run(self, args):
+        command = '%s - galaxy -c "bash %s/run.sh %s"' % (
+            paths.P_SU, self.lwr_home, args)
+        return misc.run(command, INVOKE_FAILURE, INVOKE_SUCCESS)
+
+    def status(self):
+        if self.state == service_states.SHUTTING_DOWN or \
+            self.state == service_states.SHUT_DOWN or \
+            self.state == service_states.UNSTARTED or \
+                self.state == service_states.WAITING_FOR_USER_ACTION:
+            pass
+        elif self._check_daemon('lwr'):
+            if self._check_lwr_running():
+                self.state = service_states.RUNNING
+        elif self.state != service_states.STARTING:
+            log.error("LWR error; LWR not runnnig")
+            self.state = service_states.ERROR

cm/services/apps/migration.py

+import os
+import pwd
+import grp
+import fnmatch
+from cm.services import ServiceRole
+from cm.services import service_states
+from cm.services import ServiceDependency
+from cm.services import ServiceType
+from cm.services.apps import ApplicationService
+from cm.util import misc, paths
+
+import logging
+log = logging.getLogger('cloudman')
+
+
+class Migrate1to2:
+    """Functionality for upgrading from version 1 to 2.
+    """
+    def __init__(self, app):
+        self.app = app
+
+    def _as_postgres(self, cmd, cwd=None):
+        return misc.run('%s - postgres -c "%s"' % (paths.P_SU, cmd), cwd=cwd)
+
+    def _upgrade_postgres_8_to_9(self):
+        old_data_dir = os.path.join(self.app.path_resolver.galaxy_data, "pgsql", "data")
+        new_data_dir = self.app.path_resolver.psql_dir
+        if old_data_dir == new_data_dir:
+            log.debug("Nothing to upgrade in database - paths are the same: %s" % old_data_dir)
+            return True
+
+        if os.path.exists(old_data_dir):
+            with open(os.path.join(old_data_dir, "PG_VERSION")) as in_handle:
+                version = in_handle.read().strip()
+            if version.startswith("8"):
+                log.debug("Upgrading Postgres from version 8 to 9")
+                old_debs = ["http://launchpadlibrarian.net/102366400/postgresql-client-8.4_8.4.11-1_amd64.deb",
+                            "http://launchpadlibrarian.net/102366396/postgresql-8.4_8.4.11-1_amd64.deb"]
+                for deb in old_debs:
+                    misc.run("wget %s" % deb)
+                    misc.run("dpkg -i %s" % os.path.basename(deb))
+                    misc.run("rm -f %s" % os.path.basename(deb))
+                backup_dir = os.path.join(self.app.path_resolver.galaxy_data, "pgsql",
+                                          "data-backup-v%s" % version)
+                self._as_postgres("mv %s %s" % (old_data_dir, backup_dir))
+                misc.run("mkdir -p %s" % new_data_dir)
+                os.chown(new_data_dir, pwd.getpwnam("postgres")[2], grp.getgrnam("postgres")[2])
+                self._as_postgres("%s/initdb %s" % (self.app.path_resolver.pg_home,
+                    new_data_dir))
+                self._as_postgres("sed -i 's|#port = 5432|port = {0}|' {1}"
+                                  .format(paths.C_PSQL_PORT, os.path.join(new_data_dir, 'postgresql.conf')))
+                # Seems to require a start and stop before the upgrade can work!
+                log_loc = os.path.join(new_data_dir, 'cm_postgres_upgrade.log')
+                self._as_postgres("{0}/pg_ctl -w -l {1} -D {2} start".
+                                  format(self.app.path_resolver.pg_home, log_loc, new_data_dir),
+                                  cwd=new_data_dir)
+                self._as_postgres("{0}/pg_ctl -w -l {1} -D {2} stop".
+                                  format(self.app.path_resolver.pg_home, log_loc, new_data_dir),
+                                  cwd=new_data_dir)
+                # Assume that if this step works, all the previous ones worked
+                ok = self._as_postgres("{0}/pg_upgrade -d {1} -D {2} -p 5840 -P {3} -b /usr/lib/postgresql/8.4/bin"
+                                  " -B {0} -l {4}".
+                                  format(self.app.path_resolver.pg_home, backup_dir,
+                                         new_data_dir, paths.C_PSQL_PORT, log_loc),
+                                  cwd=new_data_dir)
+                misc.run("apt-get -y --force-yes remove postgresql-8.4 postgresql-client-8.4")
+        return ok
+
+    def _upgrade_database(self):
+        return self._upgrade_postgres_8_to_9()
+
+    def _as_galaxy(self, cmd):
+        return misc.run('%s - galaxy -c "%s"' % (paths.P_SU, cmd))
+
+    def _copy_to_new_fs(self):
+        fs_galaxy_data = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_DATA)[0]
+        fs_galaxy_tools = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_TOOLS)[0]
+        source_path = os.path.join(fs_galaxy_tools.mount_point, "tools")
+        target_path = os.path.join(fs_galaxy_data.mount_point, "tools")
+        ok_tools = self._as_galaxy("cp -R {0} {1}".format(source_path, target_path))
+
+        source_path = os.path.join(fs_galaxy_tools.mount_point, "galaxy-central")
+        target_path = os.path.join(fs_galaxy_data.mount_point, "galaxy-app")
+        if (os.path.exists(target_path)):
+            log.debug("Target path for galaxy-app ({0}) already exists! Skipping..."
+                .format(target_path))
+            ok_galaxy = True
+        else:
+            ok_galaxy = self._as_galaxy("cp -R {0} {1}".format(source_path, target_path))
+        if (ok_tools and ok_galaxy):
+            return True
+        else:
+            log.error("Problems copying data to the new file system; look at the log.")
+            return False
+
+    def _create_missing_dirs(self):
+        # TODO: on the file system missing/renamig dirs: galaxyData/'files' will
+        # need to be renamed to galaxy/data'
+        return True
+
+    def _upgrade_fs_structure(self):
+        ok_fs = self._copy_to_new_fs()
+        ok_dirs = self._create_missing_dirs()
+        if (ok_fs and ok_dirs):
+            return True
+        else:
+            log.error("Problems upgrading file system structure; look at the log.")
+            return False
+
+    def _adjust_stored_paths(self):
+        # Adjust galaxy mercurial location for future updates
+        galaxy_data_loc = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_DATA)[0].mount_point
+        galaxy_loc = os.path.join(galaxy_data_loc, "galaxy-app")
+        if os.path.isdir(galaxy_loc):
+            self._as_galaxy("sed -i 's/central/dist/' %s" % os.path.join(galaxy_loc, '.hg', 'hgrc'))
+        # Adjust tools location in galaxy
+        galaxy_ini_loc = os.path.join(galaxy_loc, 'universe_wsgi.ini')
+        ok_tools = self._as_galaxy("sed -i 's|tool_dependency_dir = /mnt/galaxyTools/tools|tool_dependency_dir = {0}|' {1}".
+                        format(os.path.join(galaxy_data_loc, 'tools'), galaxy_ini_loc))
+
+        ok_db = self._as_galaxy("sed -i 's|database_connection = postgres://galaxy@localhost:5840/galaxy|database_connection = postgres://galaxy@localhost:{0}/galaxy|' {1}".
+                        format(paths.C_PSQL_PORT, galaxy_ini_loc))
+        # Adjust content of tools' env.sh to reflect the new path
+        tools_dir = os.path.join(galaxy_data_loc, 'tools')
+        env_files = []
+        for root, dirnames, filenames in os.walk(tools_dir):
+            for filename in fnmatch.filter(filenames, 'env.sh'):
+                env_files.append(os.path.join(root, filename))
+        log.debug("Found the following env.sh files: {0}".format(env_files))
+        for f in env_files:
+            cmd = "sed --in-place=.orig 's/galaxyTools/galaxyData/g' {0}".format(f)
+            misc.run(cmd)
+        # Update broken symlinks for the tools' `default` dirs
+        default_symlinks = misc.detect_symlinks('/mnt/galaxyData/tools', 'default',
+            symlink_as_file=False)
+        if not default_symlinks:
+            log.debug("No 'default' symlinks found to update!")
+        for link in default_symlinks:
+            link_name = link[0]
+            # We'll make the symlinks relative so extract target's basename
+            target_name = os.path.basename(link[1])
+            os.unlink(link_name)
+            os.symlink(target_name, link_name)
+            log.debug("Updated symlink {0} to {1}.".format(link_name, target_name))
+        if (ok_tools and ok_db):
+            return True
+        else:
+            log.error("Problems updating Galaxy tools or database location; look at the log.")
+            return False
+
+    def _update_user_data(self):
+        if 'filesystems' in self.app.ud:
+            old_fs_list = self.app.ud.get('filesystems') or []
+            new_fs_list = []
+            # clear 'services' and replace with the new format
+            for fs in old_fs_list:
+                svc_roles = ServiceRole.from_string_array(fs['roles'])
+                if ServiceRole.GALAXY_TOOLS in svc_roles and ServiceRole.GALAXY_DATA in svc_roles:
+                    # This condition should only occur in new configs, but check
+                    # added so that things work properly even if run against a new config
+                    # Only works for default configs though...
+                    new_fs_list.append(fs)
+                elif ServiceRole.GALAXY_TOOLS in svc_roles:
+                    pass  # skip adding the galaxy tools file system, no longer needed.
+                else:
+                    if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(fs['roles']):
+                        fs['roles'] = ServiceRole.to_string_array([ServiceRole.GALAXY_TOOLS,
+                            ServiceRole.GALAXY_DATA])
+                        new_fs_list.append(fs)
+                    else:
+                        new_fs_list.append(fs)
+            self.app.ud['filesystems'] = new_fs_list
+        self.app.ud['deployment_version'] = 2
+        self.app.ud.pop('galaxy_home', None)  # TODO: Galaxy home is always reset
+                                              # to default. Discuss implications
+        return True
+
+    def _migrate1_prereqs_satisfied(self):
+        """
+        Make sure prerequisites for applying migration 1 to 2 are satisfied.
+        """
+        # 'Old' AMIs do not support this CloudMan version so guard against those
+        old_AMIs = ['ami-da58aab3', 'ami-9a7485f3', 'ami-46d4792f']
+        current_ami = self.app.cloud_interface.get_ami()
+        if current_ami in old_AMIs:
+            msg = ("The Machine Image you are running ({0}) does not support this version "
+                   "of CloudMan. You MUST terminate this instance and launch a new "
+                   "one using a more up-to-date Machine Image. See "
+                   "<a href='http://wiki.galaxyproject.org/CloudMan/' target='_blank'>"
+                   "this page</a> for more details.".format(current_ami))
+            log.critical(msg)
+            self.app.msgs.critical(msg)
+            return False
+        fs_galaxy_data_list = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_DATA)
+        if not fs_galaxy_data_list:
+            log.warn("Required File system GALAXY_DATA missing. Aborting migrate1to2.")
+            return False
+        fs_galaxy_tools_list = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_TOOLS)
+        if not fs_galaxy_tools_list:
+            log.warn("Required File system GALAXY_TOOLS missing. Aborting Aborting migrate1to2.")
+            return False
+        # Get a handle to the actual service objects
+        fs_galaxy_data = fs_galaxy_data_list[0]
+        fs_galaxy_tools = fs_galaxy_tools_list[0]
+        space_available = int(fs_galaxy_data.size) - int(fs_galaxy_data.size_used)
+        if space_available < int(fs_galaxy_tools.size_used):
+            log.debug("Cannot migrate from 1 to 2: Insufficient space available on "
+                      "Galaxy data volume. Available: {0}, Required: {1}"
+                      .format(space_available, fs_galaxy_tools.size_used))
+            return False
+        for svc in self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM):
+            if ServiceRole.GALAXY_TOOLS in svc.svc_roles and ServiceRole.GALAXY_DATA in svc.svc_roles:
+                log.warn("File system appears to have been already migrated! Aborting.")
+                return False
+        return True
+
+    def migrate_1to2(self):
+        # First set the current ``deployment_version``, in case it's empty.
+        # This is to prevent it from being set to the latest version when empty
+        self.app.ud['deployment_version'] = 1
+
+        if not self._migrate1_prereqs_satisfied():
+            msg = "Cannot migrate from version 1 to 2. Pre-requisites not satisfied!"
+            log.warn(msg)
+            self.app.msgs.warning(msg)
+            return False
+
+        msg = ("Migrating this deployment from version 1 to 2. Note that this "
+               "may take a while. Please wait until the process completes before "
+               "starting to use any services or features.")
+        log.info(msg)
+        self.app.msgs.info(msg)
+        log.info("Migration: Step 1: Upgrading Postgres Database...")
+        ok = self._upgrade_database()
+        if ok:
+            log.info("Migration: Step 2: Upgrading to new file system structure...")
+            ok = self._upgrade_fs_structure()
+        if ok:
+            log.info("Migration: Step 3: Adjusting paths in configuration files...")
+            ok = self._adjust_stored_paths()
+        if ok:
+            log.info("Migration: Step 4: Updating user data...")
+            ok = self._update_user_data()
+        if ok:
+            log.info("Migration: Step 5: Shutting down all file system services...")
+            fs_svcs = self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM)
+            # TODO: Is a clean necessary?
+            for svc in fs_svcs:
+                svc.remove(synchronous=True)
+            log.info("Migration: Step 6: Restarting all file system services...")
+            ok = self.app.manager.add_preconfigured_filesystems()
+        # Migration 1 to 2 complete
+        if ok:
+            msg = ("Migration from version 1 to 2 complete. Please continue to wait "
+                   "until all the services have completed initializing.")
+            log.info(msg)
+            self.app.msgs.info(msg)
+        return ok
+
+
+class MigrationService(ApplicationService, Migrate1to2):
+    def __init__(self, app):
+        super(MigrationService, self).__init__(app)
+
+        self.svc_roles = [ServiceRole.MIGRATION]
+        self.name = ServiceRole.to_string(ServiceRole.MIGRATION)
+
+        self.dependencies = []
+
+        if 'filesystems' in self.app.ud:
+            for fs in self.app.ud.get('filesystems') or []:
+                # Wait for galaxy data, indices and tools to come up before attempting migration
+                if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(fs['roles']):
+                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_DATA))
+                if ServiceRole.GALAXY_TOOLS in ServiceRole.from_string_array(fs['roles']):
+                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_TOOLS))
+                if ServiceRole.GALAXY_INDICES in ServiceRole.from_string_array(fs['roles']):
+                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_INDICES))
+
+    def start(self):
+        """
+        Start the migration service
+        """
+        log.debug("Starting migration service...")
+        self.state = service_states.STARTING
+        if self._is_migration_needed():
+            self.state = service_states.RUNNING
+            try:
+                if not self._apply():
+                    msg = "Error running the migration service! Look at the log for clues."
+                    log.error(msg)
+                    self.app.msgs.error(msg)
+                    self.state = service_states.ERROR
+            except Exception, e:
+                log.error("Error starting migration service: {0}".format(e))
+                self.state = service_states.ERROR
+            else:
+                self.state = service_states.COMPLETED
+        else:
+            log.debug("No migration required. Service complete.")
+            self.state = service_states.COMPLETED
+
+    def _apply(self):
+        """
+        Apply the migration; do the actual work.
+        """
+        log.debug("Migration is required. Starting...")
+        return self._perform_migration()
+
+    def _perform_migration(self):
+        """
+        Based on the version number, carry out appropriate migration actions
+        """
+        if self._get_old_version() <= 1:
+            return self.migrate_1to2()
+        # No migration necessary so all is good
+        return True
+
+    def _is_migration_needed(self):
+        return self._get_old_version() < self._get_current_version()
+
+    def _get_current_version(self):
+        version = 2
+        log.debug("Current deployment version: {0}".format(version))
+        return version  # Whichever version that this upgrade script last understands
+
+    def _get_old_version(self):
+        # TODO: Need old version discovery. Where do we get that from?
+        version = self.app.ud.get('deployment_version', None)
+        if not version:
+            version = 1  # A version prior to version number being introduced
+        log.debug("Old deployment version: {0}".format(version))
+        return version
+
+    def remove(self, synchronous=False):
+        """
+        Remove the migration service
+        """
+        log.info("Removing Migration service")
+        super(MigrationService, self).remove(synchronous)
+        self.state = service_states.SHUTTING_DOWN
+        self._clean()
+        self.state = service_states.SHUT_DOWN
+
+    def _clean(self):
+        """
+        Clean up the system
+        """
+        pass
+
+    def status(self):
+        """
+        Check and update the status of service
+        """
+        pass

cm/services/apps/migration_service.py

-import os
-import pwd
-import grp
-import fnmatch
-from cm.services import ServiceRole
-from cm.services import service_states
-from cm.services import ServiceDependency
-from cm.services import ServiceType
-from cm.services.apps import ApplicationService
-from cm.util import misc, paths
-
-import logging
-log = logging.getLogger('cloudman')
-
-
-class Migrate1to2:
-    """Functionality for upgrading from version 1 to 2.
-    """
-    def __init__(self, app):
-        self.app = app
-
-    def _as_postgres(self, cmd, cwd=None):
-        return misc.run('%s - postgres -c "%s"' % (paths.P_SU, cmd), cwd=cwd)
-
-    def _upgrade_postgres_8_to_9(self):
-        old_data_dir = os.path.join(self.app.path_resolver.galaxy_data, "pgsql", "data")
-        new_data_dir = self.app.path_resolver.psql_dir
-        if old_data_dir == new_data_dir:
-            log.debug("Nothing to upgrade in database - paths are the same: %s" % old_data_dir)
-            return True
-
-        if os.path.exists(old_data_dir):
-            with open(os.path.join(old_data_dir, "PG_VERSION")) as in_handle:
-                version = in_handle.read().strip()
-            if version.startswith("8"):
-                log.debug("Upgrading Postgres from version 8 to 9")
-                old_debs = ["http://launchpadlibrarian.net/102366400/postgresql-client-8.4_8.4.11-1_amd64.deb",
-                            "http://launchpadlibrarian.net/102366396/postgresql-8.4_8.4.11-1_amd64.deb"]
-                for deb in old_debs:
-                    misc.run("wget %s" % deb)
-                    misc.run("dpkg -i %s" % os.path.basename(deb))
-                    misc.run("rm -f %s" % os.path.basename(deb))
-                backup_dir = os.path.join(self.app.path_resolver.galaxy_data, "pgsql",
-                                          "data-backup-v%s" % version)
-                self._as_postgres("mv %s %s" % (old_data_dir, backup_dir))
-                misc.run("mkdir -p %s" % new_data_dir)
-                os.chown(new_data_dir, pwd.getpwnam("postgres")[2], grp.getgrnam("postgres")[2])
-                self._as_postgres("%s/initdb %s" % (self.app.path_resolver.pg_home,
-                    new_data_dir))
-                self._as_postgres("sed -i 's|#port = 5432|port = {0}|' {1}"
-                                  .format(paths.C_PSQL_PORT, os.path.join(new_data_dir, 'postgresql.conf')))
-                # Seems to require a start and stop before the upgrade can work!
-                log_loc = os.path.join(new_data_dir, 'cm_postgres_upgrade.log')
-                self._as_postgres("{0}/pg_ctl -w -l {1} -D {2} start".
-                                  format(self.app.path_resolver.pg_home, log_loc, new_data_dir),
-                                  cwd=new_data_dir)
-                self._as_postgres("{0}/pg_ctl -w -l {1} -D {2} stop".
-                                  format(self.app.path_resolver.pg_home, log_loc, new_data_dir),
-                                  cwd=new_data_dir)
-                # Assume that if this step works, all the previous ones worked
-                ok = self._as_postgres("{0}/pg_upgrade -d {1} -D {2} -p 5840 -P {3} -b /usr/lib/postgresql/8.4/bin"
-                                  " -B {0} -l {4}".
-                                  format(self.app.path_resolver.pg_home, backup_dir,
-                                         new_data_dir, paths.C_PSQL_PORT, log_loc),
-                                  cwd=new_data_dir)
-                misc.run("apt-get -y --force-yes remove postgresql-8.4 postgresql-client-8.4")
-        return ok
-
-    def _upgrade_database(self):
-        return self._upgrade_postgres_8_to_9()
-
-    def _as_galaxy(self, cmd):
-        return misc.run('%s - galaxy -c "%s"' % (paths.P_SU, cmd))
-
-    def _copy_to_new_fs(self):
-        fs_galaxy_data = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_DATA)[0]
-        fs_galaxy_tools = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_TOOLS)[0]
-        source_path = os.path.join(fs_galaxy_tools.mount_point, "tools")
-        target_path = os.path.join(fs_galaxy_data.mount_point, "tools")
-        ok_tools = self._as_galaxy("cp -R {0} {1}".format(source_path, target_path))
-
-        source_path = os.path.join(fs_galaxy_tools.mount_point, "galaxy-central")
-        target_path = os.path.join(fs_galaxy_data.mount_point, "galaxy-app")
-        if (os.path.exists(target_path)):
-            log.debug("Target path for galaxy-app ({0}) already exists! Skipping..."
-                .format(target_path))
-            ok_galaxy = True
-        else:
-            ok_galaxy = self._as_galaxy("cp -R {0} {1}".format(source_path, target_path))
-        if (ok_tools and ok_galaxy):
-            return True
-        else:
-            log.error("Problems copying data to the new file system; look at the log.")
-            return False
-
-    def _create_missing_dirs(self):
-        # TODO: on the file system missing/renamig dirs: galaxyData/'files' will
-        # need to be renamed to galaxy/data'
-        return True
-
-    def _upgrade_fs_structure(self):
-        ok_fs = self._copy_to_new_fs()
-        ok_dirs = self._create_missing_dirs()
-        if (ok_fs and ok_dirs):
-            return True
-        else:
-            log.error("Problems upgrading file system structure; look at the log.")
-            return False
-
-    def _adjust_stored_paths(self):
-        # Adjust galaxy mercurial location for future updates
-        galaxy_data_loc = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_DATA)[0].mount_point
-        galaxy_loc = os.path.join(galaxy_data_loc, "galaxy-app")
-        if os.path.isdir(galaxy_loc):
-            self._as_galaxy("sed -i 's/central/dist/' %s" % os.path.join(galaxy_loc, '.hg', 'hgrc'))
-        # Adjust tools location in galaxy
-        galaxy_ini_loc = os.path.join(galaxy_loc, 'universe_wsgi.ini')
-        ok_tools = self._as_galaxy("sed -i 's|tool_dependency_dir = /mnt/galaxyTools/tools|tool_dependency_dir = {0}|' {1}".
-                        format(os.path.join(galaxy_data_loc, 'tools'), galaxy_ini_loc))
-
-        ok_db = self._as_galaxy("sed -i 's|database_connection = postgres://galaxy@localhost:5840/galaxy|database_connection = postgres://galaxy@localhost:{0}/galaxy|' {1}".
-                        format(paths.C_PSQL_PORT, galaxy_ini_loc))
-        # Adjust content of tools' env.sh to reflect the new path
-        tools_dir = os.path.join(galaxy_data_loc, 'tools')
-        env_files = []
-        for root, dirnames, filenames in os.walk(tools_dir):
-            for filename in fnmatch.filter(filenames, 'env.sh'):
-                env_files.append(os.path.join(root, filename))
-        log.debug("Found the following env.sh files: {0}".format(env_files))
-        for f in env_files:
-            cmd = "sed --in-place=.orig 's/galaxyTools/galaxyData/g' {0}".format(f)
-            misc.run(cmd)
-        # Update broken symlinks for the tools' `default` dirs
-        default_symlinks = misc.detect_symlinks('/mnt/galaxyData/tools', 'default',
-            symlink_as_file=False)
-        if not default_symlinks:
-            log.debug("No 'default' symlinks found to update!")
-        for link in default_symlinks:
-            link_name = link[0]
-            # We'll make the symlinks relative so extract target's basename
-            target_name = os.path.basename(link[1])
-            os.unlink(link_name)
-            os.symlink(target_name, link_name)
-            log.debug("Updated symlink {0} to {1}.".format(link_name, target_name))
-        if (ok_tools and ok_db):
-            return True
-        else:
-            log.error("Problems updating Galaxy tools or database location; look at the log.")
-            return False
-
-    def _update_user_data(self):
-        if 'filesystems' in self.app.ud:
-            old_fs_list = self.app.ud.get('filesystems') or []
-            new_fs_list = []
-            # clear 'services' and replace with the new format
-            for fs in old_fs_list:
-                svc_roles = ServiceRole.from_string_array(fs['roles'])
-                if ServiceRole.GALAXY_TOOLS in svc_roles and ServiceRole.GALAXY_DATA in svc_roles:
-                    # This condition should only occur in new configs, but check
-                    # added so that things work properly even if run against a new config
-                    # Only works for default configs though...
-                    new_fs_list.append(fs)
-                elif ServiceRole.GALAXY_TOOLS in svc_roles:
-                    pass  # skip adding the galaxy tools file system, no longer needed.
-                else:
-                    if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(fs['roles']):
-                        fs['roles'] = ServiceRole.to_string_array([ServiceRole.GALAXY_TOOLS,
-                            ServiceRole.GALAXY_DATA])
-                        new_fs_list.append(fs)
-                    else:
-                        new_fs_list.append(fs)
-            self.app.ud['filesystems'] = new_fs_list
-        self.app.ud['deployment_version'] = 2
-        self.app.ud.pop('galaxy_home', None)  # TODO: Galaxy home is always reset
-                                              # to default. Discuss implications
-        return True
-
-    def _migrate1_prereqs_satisfied(self):
-        """
-        Make sure prerequisites for applying migration 1 to 2 are satisfied.
-        """
-        # 'Old' AMIs do not support this CloudMan version so guard against those
-        old_AMIs = ['ami-da58aab3', 'ami-9a7485f3', 'ami-46d4792f']
-        current_ami = self.app.cloud_interface.get_ami()
-        if current_ami in old_AMIs:
-            msg = ("The Machine Image you are running ({0}) does not support this version "
-                   "of CloudMan. You MUST terminate this instance and launch a new "
-                   "one using a more up-to-date Machine Image. See "
-                   "<a href='http://wiki.galaxyproject.org/CloudMan/' target='_blank'>"
-                   "this page</a> for more details.".format(current_ami))
-            log.critical(msg)
-            self.app.msgs.critical(msg)
-            return False
-        fs_galaxy_data_list = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_DATA)
-        if not fs_galaxy_data_list:
-            log.warn("Required File system GALAXY_DATA missing. Aborting migrate1to2.")
-            return False
-        fs_galaxy_tools_list = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_TOOLS)
-        if not fs_galaxy_tools_list:
-            log.warn("Required File system GALAXY_TOOLS missing. Aborting Aborting migrate1to2.")
-            return False
-        # Get a handle to the actual service objects
-        fs_galaxy_data = fs_galaxy_data_list[0]
-        fs_galaxy_tools = fs_galaxy_tools_list[0]
-        space_available = int(fs_galaxy_data.size) - int(fs_galaxy_data.size_used)
-        if space_available < int(fs_galaxy_tools.size_used):
-            log.debug("Cannot migrate from 1 to 2: Insufficient space available on "
-                      "Galaxy data volume. Available: {0}, Required: {1}"
-                      .format(space_available, fs_galaxy_tools.size_used))
-            return False
-        for svc in self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM):
-            if ServiceRole.GALAXY_TOOLS in svc.svc_roles and ServiceRole.GALAXY_DATA in svc.svc_roles:
-                log.warn("File system appears to have been already migrated! Aborting.")
-                return False
-        return True
-
-    def migrate_1to2(self):
-        # First set the current ``deployment_version``, in case it's empty.
-        # This is to prevent it from being set to the latest version when empty
-        self.app.ud['deployment_version'] = 1
-
-        if not self._migrate1_prereqs_satisfied():
-            msg = "Cannot migrate from version 1 to 2. Pre-requisites not satisfied!"
-            log.warn(msg)
-            self.app.msgs.warning(msg)
-            return False
-
-        msg = ("Migrating this deployment from version 1 to 2. Note that this "
-               "may take a while. Please wait until the process completes before "
-               "starting to use any services or features.")
-        log.info(msg)
-        self.app.msgs.info(msg)
-        log.info("Migration: Step 1: Upgrading Postgres Database...")
-        ok = self._upgrade_database()
-        if ok:
-            log.info("Migration: Step 2: Upgrading to new file system structure...")
-            ok = self._upgrade_fs_structure()
-        if ok:
-            log.info("Migration: Step 3: Adjusting paths in configuration files...")
-            ok = self._adjust_stored_paths()
-        if ok:
-            log.info("Migration: Step 4: Updating user data...")
-            ok = self._update_user_data()
-        if ok:
-            log.info("Migration: Step 5: Shutting down all file system services...")
-            fs_svcs = self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM)
-            # TODO: Is a clean necessary?
-            for svc in fs_svcs:
-                svc.remove(synchronous=True)
-            log.info("Migration: Step 6: Restarting all file system services...")
-            ok = self.app.manager.add_preconfigured_filesystems()
-        # Migration 1 to 2 complete
-        if ok:
-            msg = ("Migration from version 1 to 2 complete. Please continue to wait "
-                   "until all the services have completed initializing.")
-            log.info(msg)
-            self.app.msgs.info(msg)
-        return ok
-
-
-class MigrationService(ApplicationService, Migrate1to2):
-    def __init__(self, app):
-        super(MigrationService, self).__init__(app)
-
-        self.svc_roles = [ServiceRole.MIGRATION]
-        self.name = ServiceRole.to_string(ServiceRole.MIGRATION)
-
-        self.dependencies = []
-
-        if 'filesystems' in self.app.ud:
-            for fs in self.app.ud.get('filesystems') or []:
-                # Wait for galaxy data, indices and tools to come up before attempting migration
-                if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(fs['roles']):
-                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_DATA))
-                if ServiceRole.GALAXY_TOOLS in ServiceRole.from_string_array(fs['roles']):
-                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_TOOLS))
-                if ServiceRole.GALAXY_INDICES in ServiceRole.from_string_array(fs['roles']):
-                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_INDICES))
-
-    def start(self):
-        """
-        Start the migration service
-        """
-        log.debug("Starting migration service...")
-        self.state = service_states.STARTING
-        if self._is_migration_needed():
-            self.state = service_states.RUNNING
-            try:
-                if not self._apply():
-                    msg = "Error running the migration service! Look at the log for clues."
-                    log.error(msg)
-                    self.app.msgs.error(msg)
-                    self.state = service_states.ERROR
-            except Exception, e:
-                log.error("Error starting migration service: {0}".format(e))
-                self.state = service_states.ERROR
-            else:
-                self.state = service_states.COMPLETED
-        else:
-            log.debug("No migration required. Service complete.")
-            self.state = service_states.COMPLETED
-
-    def _apply(self):
-        """
-        Apply the migration; do the actual work.
-        """
-        log.debug("Migration is required. Starting...")
-        return self._perform_migration()
-
-    def _perform_migration(self):
-        """
-        Based on the version number, carry out appropriate migration actions
-        """
-        if self._get_old_version() <= 1:
-            return self.migrate_1to2()
-        # No migration necessary so all is good
-        return True
-
-    def _is_migration_needed(self):
-        return self._get_old_version() < self._get_current_version()
-
-    def _get_current_version(self):
-        version = 2
-        log.debug("Current deployment version: {0}".format(version))
-        return version  # Whichever version that this upgrade script last understands
-
-    def _get_old_version(self):
-        # TODO: Need old version discovery. Where do we get that from?
-        version = self.app.ud.get('deployment_version', None)
-        if not version:
-            version = 1  # A version prior to version number being introduced
-        log.debug("Old deployment version: {0}".format(version))
-        return version
-
-    def remove(self, synchronous=False):
-        """
-        Remove the migration service
-        """
-        log.info("Removing Migration service")
-        super(MigrationService, self).remove(synchronous)
-        self.state = service_states.SHUTTING_DOWN
-        self._clean()
-        self.state = service_states.SHUT_DOWN
-
-    def _clean(self):
-        """
-        Clean up the system
-        """
-        pass
-
-    def status(self):
-        """
-        Check and update the status of service
-        """
-        pass

cm/services/apps/pss.py

 log = logging.getLogger('cloudman')
 
 
-class PSS(ApplicationService):
+class PSSService(ApplicationService):
     """ post_start_script service - this service runs once at the end of the
         configuration of all services defined in CloudMan. It runs a predefined
         script.
         be extended to run arbitrary script when a condition is met."""
 
     def __init__(self, app, instance_role='master'):
-        super(PSS, self).__init__(app)
+        super(PSSService, self).__init__(app)
         self.svc_roles = [ServiceRole.PSS]
         self.name = ServiceRole.to_string(ServiceRole.PSS)
         self.instance_role = instance_role
                       .format(self.pss_filename, self.app.ud['bucket_cluster']))
 
     def remove(self, synchronous=False):
-        super(PSS, self).remove(synchronous)
+        super(PSSService, self).remove(synchronous)
         if self.state == service_states.UNSTARTED:
             self.state = service_states.SHUT_DOWN
         if self.state == service_states.SHUT_DOWN:

cm/services/apps/sge.py

             os.mkdir(self.app.path_resolver.sge_root)
         # Ensure SGE_ROOT directory is empty (useful for restarts)
         if len(os.listdir(self.app.path_resolver.sge_root)) > 0:
+            log.debug("SGE_ROOT directory %s is not empty" % self.app.path_resolver.sge_root)
             # Check if qmaster is running in that case
             self.status()
             if self.state == service_states.RUNNING:
                 log.info("Found SGE already running; will reconfigure it.")
                 self.stop_sge()
-            log.debug(
-                "Cleaning '%s' directory." % self.app.path_resolver.sge_root)
+            log.debug("Cleaning '%s' directory." % self.app.path_resolver.sge_root)
             for base, dirs, files in os.walk(self.app.path_resolver.sge_root):
                 for f in files:
                     os.unlink(os.path.join(base, f))
                 for d in dirs:
                     shutil.rmtree(os.path.join(base, d))
         log.debug("Unpacking SGE to '%s'." % self.app.path_resolver.sge_root)
-        tar = tarfile.open(
-            '%s/ge-6.2u5-common.tar.gz' % self.app.path_resolver.sge_tars)
+        tar = tarfile.open('%s/ge-6.2u5-common.tar.gz' % self.app.path_resolver.sge_tars)
         tar.extractall(path=self.app.path_resolver.sge_root)
         tar.close()
         tar = tarfile.open('%s/ge-6.2u5-bin-lx24-amd64.tar.gz' %
                     self.app.path_resolver.sge_root, SGE_allq_file),
                 "Error modifying all.q", "Successfully modified all.q")
             log.debug("Configuring users' SGE profiles")
-            with open(paths.LOGIN_SHELL_SCRIPT, 'a') as f:
-                f.write("\nexport SGE_ROOT=%s" % self.app.path_resolver.sge_root)
-                f.write("\n. $SGE_ROOT/default/common/settings.sh\n")
+            misc.append_to_file(paths.LOGIN_SHELL_SCRIPT,
+                "\nexport SGE_ROOT=%s" % self.app.path_resolver.sge_root)
+            misc.append_to_file(paths.LOGIN_SHELL_SCRIPT,
+                "\n. $SGE_ROOT/default/common/settings.sh\n")
             # Write out the .sge_request file for individual users
             sge_request_template = Template(templates.SGE_REQUEST_TEMPLATE)
             sge_request_params = {
         for inst in self.app.manager.worker_instances:
             self.remove_sge_host(inst.get_id(), inst.get_private_ip())
         misc.run('export SGE_ROOT=%s; . $SGE_ROOT/default/common/settings.sh; %s/bin/lx24-amd64/qconf -km'
-                 % (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root), "Problems stopping SGE master",
-                 "Successfully stopped SGE master.")
+            % (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root),
+            "Problems stopping SGE master", "Successfully stopped SGE master.")
 
     def write_allhosts_file(self, filename='/tmp/ah', to_add=None, to_remove=None):
         ahl = []

cm/services/data/filesystem.py

 from cm.services import service_states
 from cm.services import ServiceRole
 from cm.services.data import DataService
-from cm.services.data.nfs import NfsFS
+from cm.services.data.mountablefs import MountableFS
 from cm.services.data.volume import Volume
 from cm.services.data.bucket import Bucket
 from cm.services.data.transient_storage import TransientStorage
         self.buckets = []  # A list of cm.services.data.bucket.Bucket objects
         self.transient_storage = []  # Instance's transient storage
         self.nfs_fs = None  # NFS file system object implementing this file system's device
+        self.gluster_fs = None  # GlusterFs based file system object implementing this file system's device
         self.name = name  # File system name
         self.persistent = persistent  # Whether it should be part of the cluster config
         self.size = None  # Total size of this file system
             details = ts._get_details(details)
         if self.kind == 'nfs':
             details = self.nfs_fs._get_details(details)
+        if self.kind == 'gluster':
+            details = self.gluster_fs._get_details(details)
         return details
 
     def _get_details(self, details):
                         b.bucket_name))
                 for ts in self.transient_storage:
                     self.kind = 'transient'
-                    self.persistent = False
                     ts.add()
                 if self.kind == 'nfs':
                     self.nfs_fs.start()
+                elif self.kind == 'gluster':
+                    self.gluster_fs.start()
             except Exception, e:
                 log.error("Error adding file system service {0}: {1}".format(
                     self.get_full_name(), e))
                 return False
             self.status()
-            log.debug("Done adding devices to {0} (devices: {1}, {2}, {3}, {4})"
+            log.debug("Done adding devices to {0} (devices: {1}, {2}, {3}, {4}, {5})"
                       .format(self.get_full_name(), self.volumes, self.buckets,
-                      self.transient_storage, self.nfs_fs.nfs_server if self.nfs_fs else '-'))
+                      self.transient_storage, self.nfs_fs.device if self.nfs_fs else '-', self.gluster_fs.device if self.gluster_fs else '-'))
             return True
         else:
             log.debug("Data service {0} in {2} state instead of {1} state; cannot add it"
 
         """
         log.info("Initiating removal of '{0}' data service with: volumes {1}, buckets {2}, "
-                 "transient storage {3}, and nfs server {4}".format(self.get_full_name(),
-                 self.volumes, self.buckets, self.transient_storage, self.nfs_fs))
+                 "transient storage {3}, nfs server {4} and gluster fs {5}".format(self.get_full_name(),
+                 self.volumes, self.buckets, self.transient_storage, self.nfs_fs, self.gluster_fs))
         self.state = service_states.SHUTTING_DOWN
         r_thread = threading.Thread(target=self.__remove, kwargs={'delete_devices':
             delete_devices})
             t.remove()
         if self.nfs_fs:
             self.nfs_fs.stop()
+        elif self.gluster_fs:
+            self.gluster_fs.stop()
         log.debug("Setting state of %s to '%s'" % (
             self.get_full_name(), service_states.SHUT_DOWN))
         self.state = service_states.SHUT_DOWN
                 for i, sp in enumerate(shared_paths):
                     if mount_point in sp:
                         in_ee = i
-                    if hadoo_mnt_point == sp:
+                    if hadoo_mnt_point in sp:
                         hadoop_set = True
 
                 # TODO:: change the follwoing line and make hadoop a file
             log.debug("Did not check status of filesystem '%s' with mount point '%s' in state '%s'"
                       % (self.name, self.mount_point, self.state))
 
-    def add_volume(self, vol_id=None, size=0, from_snapshot_id=None, dot=False):
+    def add_volume(self, vol_id=None, size=0, from_snapshot_id=None, dot=False, from_archive_url=None):
         """
         Add a volume device to this file system.
 
         log.debug("Adding Volume (id={id}, size={size}, snap={snap}) into Filesystem {fs}"
                   .format(id=vol_id, size=size, snap=from_snapshot_id, fs=self.get_full_name()))
         self.volumes.append(Volume(self, vol_id=vol_id, size=size,
-                            from_snapshot_id=from_snapshot_id, static=dot))
+                            from_snapshot_id=from_snapshot_id, static=dot, from_archive_url=from_archive_url))
 
     def add_bucket(self, bucket_name, bucket_a_key=None, bucket_s_key=None):
         """
         self.buckets.append(
             Bucket(self, bucket_name, bucket_a_key, bucket_s_key))
 
-    def add_transient_storage(self):
+    def add_transient_storage(self, from_archive_url=None, persistent=False):
         """
         Add instance's transient storage and make it available over NFS to the
         cluster. All this really does is makes a directory under ``/mnt`` and
         """
         log.debug("Configuring instance transient storage at {0} with NFS.".format(
             self.mount_point))
-        self.transient_storage.append(TransientStorage(self))
+        self.kind = 'transient'
+        self.persistent = True if from_archive_url else persistent
+        self.transient_storage.append(TransientStorage(self, from_archive_url=from_archive_url))
+
+    def add_glusterfs(self, gluster_server):
+        """
+        Add a Gluster server (e.g., ``172.22.169.17:/gluster_dir``) to mount the file system from
+        """
+        log.debug("Adding Gluster server {0} to file system {1}".format(gluster_server, self.name))
+        self.kind = 'gluster'
+        self.gluster_fs = MountableFS(self, 'glusterfs', gluster_server)
 
     def add_nfs(self, nfs_server, username=None, pwd=None):
         """
         """
         log.debug("Adding NFS server {0} to file system {1}".format(nfs_server, self.name))
         self.kind = 'nfs'
-        self.nfs_fs = NfsFS(self, nfs_server, username, pwd)
+        self.nfs_fs = MountableFS(self, 'nfs', nfs_server)

cm/services/data/mountablefs.py

+"""
+An implementation of a file system service capable of managing
+interactions with any filesystem that can be mounted via *nix mount
+"""
+import os
+import time
+import subprocess
+
+from cm.util.misc import run
+from cm.services import service_states
+
+import logging
+log = logging.getLogger('cloudman')
+
+
+class MountableFS(object):
+
+    def __init__(self, filesystem, fs_type, device):
+        self.fs = filesystem  # File system encapsulating this implementation
+        self.fs_type = fs_type
+        self.app = self.fs.app  # A local reference to app (used by @TestFlag)
+        self.device = device
+
+    def __str__(self):
+        return str(self.device)
+
+    def __repr__(self):
+        return str(self.device)
+
+    def _get_details(self, details):
+        details['device'] = self.device
+        details['DoT'] = "No"
+        details['kind'] = self.fs_type
+        return details
+
+    def start(self):
+        """
+        Start the service.
+        Satisfy any preconditions and try to mount the device self.device locally at
+        ``self.fs.mount_point``
+        """
+        if not os.path.exists(self.fs.mount_point):
+            os.makedirs(self.fs.mount_point)
+        self.mount()
+
+    def stop(self):
+        """
+        Stop the service and cleanup the ``self.fs.mount_point`` directory.
+        """
+        if self.unmount():
+            # Clean up the system path now that the file system is unmounted
+            try:
+                os.rmdir(self.fs.mount_point)
+            except OSError, e:
+                log.error("Error removing unmounted path {0}: {1}".format(
+                    self.fs.mount_point, e))
+
+    def mount(self):
+        """
+        Do the actual mounting of the device locally.
+        """
+        log.debug("Mounting device of type {0} from location {0} to mount pount {1}".format(self.fs_type, self.device, self.fs.mount_point))
+        cmd = '/bin/mount -t {0} {1} {2}'.format(self.fs_type, self.device, self.fs.mount_point)
+        process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        _, _ = process.communicate()
+        if process.returncode != 0:
+            log.error("Trouble mounting file system at {0} from server {1} of type {2}"
+                .format(self.fs.mount_point, self.device, self.fs_type))
+        else:
+            log.info("Successfully mounted file system at {0} from {1} of type {2}"
+                .format(self.fs.mount_point, self.device, self.fs_type))
+
+    def unmount(self):
+        """
+        Do the actual unmounting of this file system.
+        """
+        log.debug("Unmounting FS of type {0} from {1}".format(self.fs_type, self.fs.mount_point))
+        for counter in range(10):
+            if (self.fs.state == service_states.RUNNING and
+                run('/bin/umount %s' % self.fs.mount_point)):
+                break
+            if counter == 9:
+                log.warning("Could not unmount file system of type %s at '%s'" % (self.fs_type, self.fs.mount_point))
+                return False
+            counter += 1
+            time.sleep(3)
+        return True

cm/services/data/nfs.py

-"""
-An implementation of the NFS-based file system service capable of managing
-interactions with a remote NFS server and making it available locally as a
-file system.
-"""
-import os
-import time
-import subprocess
-
-from cm.util.misc import run
-from cm.services import service_states
-
-import logging
-log = logging.getLogger('cloudman')
-
-
-class NfsFS(object):
-    def __init__(self, filesystem, nfs_server, username=None, pwd=None):
-        self.fs = filesystem  # File system encapsulating this implementation
-        self.app = self.fs.app  # A local reference to app (used by @TestFlag)
-        self.nfs_server = nfs_server
-        self.server_username = username
-        self.server_pwd = pwd
-
-    def __str__(self):
-        return str(self.nfs_server)
-
-    def __repr__(self):
-        return str(self.nfs_server)
-
-    def _get_details(self, details):
-        details['nfs_server'] = self.nfs_server
-        details['DoT'] = "No"
-        details['kind'] = 'External NFS'
-        return details
-
-    def start(self):
-        """
-        Start NfsFS service.
-        Satisfy any preconditions and try to mount ``self.nfs_server`` locally at
-        ``self.fs.mount_point``
-        """
-        if not os.path.exists(self.fs.mount_point):
-            os.makedirs(self.fs.mount_point)
-        self.mount()
-
-    def stop(self):
-        """
-        Stop NfsFS service and cleanup the ``self.fs.mount_point`` directory.
-        """
-        if self.unmount():
-            # Clean up the system path now that the file system is unmounted
-            try:
-                os.rmdir(self.fs.mount_point)
-            except OSError, e:
-                log.error("Error removing unmounted path {0}: {1}".format(
-                    self.fs.mount_point, e))
-
-    def mount(self):
-        """
-        Do the actual mounting of the remote NFS file system locally.
-        """
-        log.debug("Mounting NFS FS {0} from {1}".format(self.fs.mount_point, self.nfs_server))
-        cmd = '/bin/mount -t nfs {0} {1}'.format(self.nfs_server, self.fs.mount_point)
-        process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-        _, _ = process.communicate()
-        if process.returncode != 0:
-            log.error("Trouble mounting file system at {0} from NFS server {1}"
-                .format(self.fs.mount_point, self.nfs_server))
-        else:
-            log.info("Successfully mounted file system at {0} from {1}"
-                .format(self.fs.mount_point, self.nfs_server))
-