Commits

Enis Afgan  committed aa8f9ba

Add cluster name tag to any attached volume as well as any snapshots created during persisting of a file system

  • Participants
  • Parent commits 704d18c

Comments (0)

Files changed (3)

File cm/controllers/root.py

             fs_name = self.app.manager.get_services(svc_role=ServiceRole.GALAXY_DATA)[0]
         if delete_snap:
             delete_snap = True
-        log.debug("Initating expansion of {0} file system to size {1} w/ snap desc '{2}', which "\
+        log.debug("Initiating expansion of {0} file system to size {1} w/ snap desc '{2}', which "\
                 "{3} be deleted".format(fs_name, new_vol_size, vol_expand_desc,
                 "will" if delete_snap else "will not"))
         try:
         except Exception, g_ex:
             log.error("Unknown Exception: %s" % g_ex)
             return "Unknown exception. Check the log for details."
-        return "Initated '{0}' file system expansion".format(fs_name)
+        return "Initiated '{0}' file system expansion".format(fs_name)
 
     @expose
     def update_file_system(self, trans, fs_name):
         self.app.manager.update_file_system(fs_name)
-        return "Initiated persisiting of '{0}' file system".format(fs_name)
+        return "Initiated persisting of '{0}' file system".format(fs_name)
 
     @expose
     def add_file_system(self, trans, fs_kind, dot=False, persist=False,

File cm/services/data/volume.py

                 snapshot.update()
             log.info("Completed creation of a snapshot for the volume '%s', snap id: '%s'" \
                 % (self.volume_id, snapshot.id))
+            self.app.cloud_interface.add_tag(snapshot, 'clusterName',
+                self.app.ud['cluster_name'])
+            self.app.cloud_interface.add_tag(
+                self.volume, 'bucketName', self.app.ud['bucket_cluster'])
+            self.app.cloud_interface.add_tag(self.volume, 'filesystem', self.fs.name)
             self.snapshot_progress = None  # Reset because of the UI
             self.snapshot_status = None  # Reset because of the UI
             return str(snapshot.id)

File cm/util/master.py

                 if ServiceRole.GALAXY_DATA in roles:
                     self.snapshot = self.app.cloud_interface.get_ec2_connection().get_all_snapshots([snap['snap_id']])[0]
                     self.default_galaxy_data_size = self.snapshot.volume_size
-        return self.default_galaxy_data_size
+        return str(self.default_galaxy_data_size)
 
     @TestFlag(False)
     def start(self):
 
         self._handle_prestart_commands()
         # Generating public key before any worker has been initialized
-        # This is required for cnfiguring Hadoop the main hadoop worker still needs to be
+        # This is required for configuring Hadoop the main Hadoop worker still needs to be
         # bale to ssh into itself!!!
         # this should happen before SGE is added
         self.get_root_public_key()
         except EC2ResponseError, e:
             log.debug("Error checking for attached volumes: %s" % e)
         log.debug("Attached volumes: %s" % attached_volumes)
+        # Add ``clusterName`` tag to any attached volumes
+        for att_vol in attached_volumes:
+            self.app.cloud_interface.add_tag(att_vol, 'clusterName', self.app.ud['cluster_name'])
         return attached_volumes
 
     @TestFlag(None)
                     # If the state has changed, do a deeper update
                     if self.spot_state != old_state:
                         if self.spot_state == spot_states.CANCELLED:
-                            # The request was cancelled so remove this Instance
+                            # The request was canceled so remove this Instance
                             # object
-                            log.info("Spot request {0} was cancelled; removing Instance object {1}"
+                            log.info("Spot request {0} was canceled; removing Instance object {1}"
                                 .format(self.spot_request_id, self.id))
                             self._remove_instance()
                         elif self.spot_state == spot_states.ACTIVE:
                             # We should have an instance now
                             self.id = req.instance_id
-                            instance = self.get_cloud_instance_object()
                             log.info("Spot request {0} filled with instance {1}"
                                 .format(self.spot_request_id, self.id))
-                            self.app.cloud_interface.add_tag(instance,
-                                'clusterName', self.app.ud['cluster_name'])
-                            self.app.cloud_interface.add_tag(instance, 'role', 'worker')
+                            # Potentially give it a few seconds so everything gets registered
+                            for i in range(3):
+                                instance = self.get_cloud_instance_object()
+                                if instance:
+                                    self.app.cloud_interface.add_tag(instance,
+                                        'clusterName', self.app.ud['cluster_name'])
+                                    self.app.cloud_interface.add_tag(instance, 'role', 'worker')
+                                    break
+                                time.sleep(5)
             except EC2ResponseError, e:
                 log.error("Trouble retrieving spot request {0}: {1}".format(
                     self.spot_request_id, e))