Commits

Enis Afgan committed 1212299

Merge

Comments (0)

Files changed (1)

 # os.environ['AWS_SECRET_ACCESS_KEY'] = "your secret key"
 
 
-# -- Provide methods for easy switching between specific environment setups for 
+# -- Provide methods for easy switching between specific environment setups for
 # different deployment scenarios (an environment must be loaded as the first line
 # in any invokable function)
 def _amazon_ec2_environment(galaxy=False):
     """ Environment setup for Galaxy on Ubuntu on EC2 """
     env.user = 'ubuntu'
     env.use_sudo = True
-    if env.use_sudo: 
+    if env.use_sudo:
         env.safe_sudo = sudo
-    else: 
+    else:
         env.safe_sudo = run
     # install_dir is used to install custom packages used primarily by CloudMan or integrated apps
     env.install_dir = '/opt/cloudman/pkg'
     _setup_users()
     _required_programs()
     _required_libraries()
-    _configure_environment() 
+    _configure_environment()
     time_end = dt.datetime.utcnow()
     print(yellow("Duration of machine configuration: %s" % str(time_end-time_start)))
     if do_rebundle == 'do_rebundle':
 
 # == users
 def _setup_users():
-    # These users are required regardless of type of install because some 
-    # CloudMan code uses those. 'galaxy' user can be considered a generic 
+    # These users are required regardless of type of install because some
+    # CloudMan code uses those. 'galaxy' user can be considered a generic
     # end user account and used for such a purpose.
     _add_user('galaxy', '1001') # Must specify uid for 'galaxy' user because of the configuration for proFTPd
     _add_user('postgres')
     if not exists(env.install_dir):
         sudo("mkdir -p %s" % env.install_dir)
         sudo("chown %s %s" % (env.user, env.install_dir))
-    
+
     # Setup global environment for all users
     install_dir = os.path.split(env.install_dir)[0]
     exports = [ "export PATH=%s/bin:%s/sbin:$PATH" % (install_dir, install_dir),
     upload_url = "http://www.grid.net.ru/nginx/download/" \
                  "nginx_upload_module-%s.tar.gz" % upload_module_version
     url = "http://nginx.org/download/nginx-%s.tar.gz" % version
-    
+
     install_dir = os.path.join(env.install_dir, "nginx")
     remote_conf_dir = os.path.join(install_dir, "conf")
-    
+
     # skip install if already present
     if exists(remote_conf_dir) and contains(os.path.join(remote_conf_dir, "nginx.conf"), "/cloud"):
         return
-    
+
     with _make_tmp_dir() as work_dir:
         with contextlib.nested(cd(work_dir), settings(hide('stdout'))):
             run("wget %s" % upload_url)
                 sudo("make install")
                 with settings(warn_only=True):
                     sudo("cd %s; stow nginx" % env.install_dir)
-    
+
     nginx_conf_file = 'nginx.conf'
     url = os.path.join(REPO_ROOT_URL, nginx_conf_file)
     with cd(remote_conf_dir):
         sudo("wget --output-document=%s/%s %s" % (remote_conf_dir, nginx_conf_file, url))
-    
+
     nginx_errdoc_file = 'nginx_errdoc.tar.gz'
     url = os.path.join(REPO_ROOT_URL, nginx_errdoc_file)
     remote_errdoc_dir = os.path.join(install_dir, "html")
     with cd(remote_errdoc_dir):
         sudo("wget --output-document=%s/%s %s" % (remote_errdoc_dir, nginx_errdoc_file, url))
         sudo('tar xvzf %s' % nginx_errdoc_file)
-    
+
     cloudman_default_dir = "/opt/cloudman/sbin"
     sudo("mkdir -p %s" % cloudman_default_dir)
     if not exists("%s/nginx" % cloudman_default_dir):
 
 def _configure_postgresql(delete_main_dbcluster=False):
     """ This method is intended for cleaning up the installation when
-    PostgreSQL is installed from a package. Basically, when PostgreSQL 
-    is installed from a package, it creates a default database cluster 
-    and splits the config file away from the data. 
+    PostgreSQL is installed from a package. Basically, when PostgreSQL
+    is installed from a package, it creates a default database cluster
+    and splits the config file away from the data.
     This method can delete the default database cluster that was automatically
-    created when the package is installed. Deleting the main database cluster 
-    also has the effect of stopping the auto-start of the postmaster server at 
+    created when the package is installed. Deleting the main database cluster
+    also has the effect of stopping the auto-start of the postmaster server at
     machine boot. The method adds all of the PostgreSQL commands to the PATH.
     """
     pg_ver = sudo("dpkg -s postgresql | grep Version | cut -f2 -d':'")
     print(green("----- R packages installed -----"))
 
 # == libraries
- 
+
 def _required_libraries():
     """Install pyhton libraries"""
     # Libraries to be be installed using easy_install
 def _configure_bash():
     """Some convenience/preference settings"""
     append('/etc/bash.bashrc', ['alias lt=\"ls -ltr\"', 'alias mroe=more'], use_sudo=True)
-    
+
     # Create a custom vimrc for the system
     vimrc_url = os.path.join(REPO_ROOT_URL, 'conf_files', 'vimrc')
     remote_file = '/etc/vim/vimrc'
         the above mentioned method).
         Also note that lately this has been a more reliable method for creating
         Images than the rebundle method.
-        
+
         :rtype: bool
         :return: If instance was successfully rebundled and an Image ID was received,
                  return True. False, otherwise.
     print "Rebundling instance '%s'. Start time: %s" % (env.hosts[0], time_start)
     _amazon_ec2_environment()
     instance_id = run("curl --silent http://169.254.169.254/latest/meta-data/instance-id")
-    
+
     # Handle reboot if required
     if not _reboot(instance_id, reboot_if_needed):
         return False # Indicates that rebundling was not completed and should be restarted
-    
+
     if boto:
         _clean() # Clean up the environment before rebundling
         # Select appropriate region
             print(yellow('galaxy-cloudman-%s' % time_start.strftime("%Y-%m-%d")))
             name, desc = _get_image_name()
             image_id = ec2_conn.create_image(instance_id, name=name, description=desc)
-            
+
             print(green("--------------------------"))
             print(green("Creating the new machine image now. Image ID (AMI) will be: '%s'" % (image_id)))
             print(yellow("Before this image can be used, the background process still needs to be completed."))
     """
     Rebundles the EC2 instance that is passed as the -H parameter
     This script handles all aspects of the rebundling process and is (almost) fully automated.
-    Two things should be edited and provided before invoking it: AWS account information 
-    and the desired size of the root volume for the new instance.  
-     
+    Two things should be edited and provided before invoking it: AWS account information
+    and the desired size of the root volume for the new instance.
+
     :rtype: bool
     :return: If instance was successfully rebundled and an AMI ID was received,
              return True.
         availability_zone = run("curl --silent http://169.254.169.254/latest/meta-data/placement/availability-zone")
         instance_region = availability_zone[:-1] # Truncate zone letter to get region name
         ec2_conn = _get_ec2_conn(instance_region)
-        
+
         # hostname = env.hosts[0] # -H flag to fab command sets this variable so get only 1st hostname
         instance_id = run("curl --silent http://169.254.169.254/latest/meta-data/instance-id")
-        
+
         # Get the size (in GB) of the root partition for the new image
         vol_size = _get_root_vol_size(ec2_conn, instance_id)
-        
+
         # Handle reboot if required
         if not _reboot(instance_id, reboot_if_needed):
             return False # Indicates that rebundling was not completed and should be restarted
-        
+
         _clean() # Clean up the environment before rebundling
         image_id = None
         kernel_id = run("curl --silent http://169.254.169.254/latest/meta-data/kernel-id")
             except EC2ResponseError, e:
                 print(red("Error creating volume: %s" % e))
                 return False
-            
+
             if vol:
                 try:
                     # Attach newly created volumes to the instance
                 return False
         else:
             print(red("Error retrieving instance availability zone"))
-            return False            
+            return False
     else:
         print(red("Python boto library not available. Aborting."))
         return False
 
 def _reboot(instance_id, force=False):
     """
-    Reboot current instance if required. Reboot can be forced by setting the 
+    Reboot current instance if required. Reboot can be forced by setting the
     method's 'force' parameter to True.
-    
+
     :rtype: bool
-    :return: If instance was rebooted, return True. Note that this primarily 
-             indicates if the instance was rebooted and does not guarantee that 
+    :return: If instance was rebooted, return True. Note that this primarily
+             indicates if the instance was rebooted and does not guarantee that
              the instance is accessible.
              False, otherwise.
     """
 def _get_image_name():
     """ Prompt a user for a name for the new Image while ensuring the name is not
         empty and that the user is happy with the input.
-    
+
     :rtype: string
     :return: Name of the Image as provided and confirmed by the user.
     """
     except EC2ResponseError, e:
         print "Attaching volume '%s' to instance '%s' as device '%s' failed. Exception: %s" % ( volume_id, instance_id, device, e )
         return False
-    
+
     for counter in range( 30 ):
         print "Attach attempt %s, volume status: %s" % ( counter, volumestatus )
         if volumestatus == 'attached':
         if counter == 29:
             print(red("Volume '%s' FAILED to attach to instance '%s' as device '%s'. Aborting." % ( volume_id, instance_id, device )))
             return False
-        
+
         volumes = ec2_conn.get_all_volumes( [volume_id] )
         volumestatus = volumes[0].attachment_state()
         time.sleep( 3 )
     except EC2ResponseError, ( e ):
         print(red("Detaching volume '%s' from instance '%s' failed. Exception: %s" % ( volume_id, instance_id, e )))
         return False
-    
+
     for counter in range( 30 ):
         print "Volume '%s' status '%s'" % ( volume_id, volumestatus )
         if volumestatus == 'available':
 
 def _create_snapshot(ec2_conn, volume_id, description=None):
     """
-    Create a snapshot of the EBS volume with the provided volume_id. 
+    Create a snapshot of the EBS volume with the provided volume_id.
     Wait until the snapshot process is complete (note that this may take quite a while)
     """
     snap_start_time = dt.datetime.utcnow()
     print "Initiating snapshot of EBS volume '%s' in region '%s' at '%s'" % (volume_id, ec2_conn.region.name, snap_start_time)
     snapshot = ec2_conn.create_snapshot(volume_id, description=description)
-    if snapshot: 
+    if snapshot:
         while snapshot.status != 'completed':
             print "Snapshot '%s' progress: '%s'; status: '%s'; duration: %s" % (snapshot.id, snapshot.progress, snapshot.status, (dt.datetime.utcnow()-snap_start_time))
             time.sleep(10)