Commits

Anonymous committed 1d0bb56

add actual user changes to galaxy-central

  • Participants
  • Parent commits 070513d

Comments (0)

Files changed (14)

lib/galaxy/jobs/__init__.py

 from galaxy.util.json import from_json_string
 from galaxy.util.expressions import ExpressionContext
 from galaxy.jobs.actions.post import ActionBox
-
+import subprocess, pwd
 from sqlalchemy.sql.expression import and_, or_
 
 import pkg_resources
         self.sa_session.expunge_all() #this prevents the metadata reverting that has been seen in conjunction with the PBS job runner
         if not os.path.exists( self.working_directory ):
             os.mkdir( self.working_directory )
+        if self.app.config.drmaa_external_runjob_script:
+            os.chmod(self.working_directory , 0777)
+
         # Restore parameters from the database
         job = self.get_job()
         if job.user is None and job.galaxy_session is None:
 
         # fix permissions
         for path in [ dp.real_path for dp in self.get_output_fnames() ]:
+            #change the ownership of the files in file_path directory back to galaxy user
+            if self.app.config.drmaa_external_runjob_script and self.app.config.external_chown_script:
+                galaxy_user_name = pwd.getpwuid(os.getuid())[0]
+                galaxy_group_id = str(pwd.getpwuid(os.getuid())[3])
+                p = subprocess.Popen([ '/usr/bin/sudo', '-E', self.app.config.external_chown_script, path,galaxy_user_name,galaxy_group_id], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                (stdoutdata, stderrdata) = p.communicate() 
+                exitcode = p.returncode
+                if exitcode != 0:
+                    ## There was an error in the child process
+                    raise RuntimeError("External_chown_script failed (exit code %s) with error %s" % (str(exitcode), stderrdata))
             util.umask_fix_perms( path, self.app.config.umask, 0666, self.app.config.gid )
         self.sa_session.flush()
         log.debug( 'job %d ended' % self.job_id )
         jeha_false_path = None
         if self.app.config.outputs_to_working_directory:
             self.output_paths = []
-            self.output_dataset_paths = {}
+            output_dataset_paths = {}
             for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets + job.output_library_datasets ]:
                 false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % data.id ) )
                 dsp = DatasetPath( data.id, data.file_name, false_path )
         else:
             self.prepare_input_files_cmds = None
         self.status = task.states.NEW
-
+       
     def get_job( self ):
         if self.job_id:
             return self.sa_session.query( model.Job ).get( self.job_id )
 
     def __get_runner_name( self, job_wrapper ):
         if self.app.config.use_tasked_jobs and job_wrapper.tool.parallelism is not None and not isinstance(job_wrapper, TaskWrapper):
-            runnner_name = "tasks"
+            runner_name = "tasks"
         else:
             runner_name = ( job_wrapper.get_job_runner().split(":", 1) )[0]
         return runner_name

lib/galaxy/jobs/runners/drmaa.py

 import os, sys, logging, threading, time
+import pprint, pwd
+from pwd import getpwnam
+import subprocess
+import inspect
+import simplejson as json
+
 from Queue import Queue, Empty
 
 from galaxy import model
 
 import pkg_resources
 
+
 if sys.version_info[:2] == ( 2, 4 ):
     pkg_resources.require( "ctypes" )
 pkg_resources.require( "drmaa" )
     fi
     export PYTHONPATH
 fi
+%s
 cd %s
 %s
+%s
+%s
+%s
 """
+def __lineno__():
+    """Returns the current line number in our program."""
+    return inspect.currentframe().f_back.f_lineno
+
+def __filename__():
+    """Returns the current filename in our program."""
+    return inspect.currentframe().f_back.f_code.co_filename
+
+DRMAA_jobTemplate_attributes = [ 'args', 'remoteCommand', 'outputPath', 'errorPath', 'nativeSpecification',
+                    'name','email','project' ]
 
 class DRMAAJobState( object ):
     def __init__( self ):
             worker.start()
             self.work_threads.append( worker )
         log.debug( "%d workers ready" % nworkers )
+        # external_runJob_script can be None, in which case it's not used.
+        self.external_runJob_script = app.config.drmaa_external_runjob_script
+        self.external_killJob_script = app.config.drmaa_external_killjob_script
+        self.TMPDIR  =  app.config.TMPDIR
 
     def get_native_spec( self, url ):
         """Get any native DRM arguments specified by the site configuration"""
 
     def queue_job( self, job_wrapper ):
         """Create job script and submit it to the DRM"""
-
         try:
             job_wrapper.prepare()
             command_line = self.build_command_line( job_wrapper, include_metadata=True )
             return
 
         runner_url = job_wrapper.get_job_runner()
-        
+
         # This is silly, why would we queue a job with no command line?
         if not command_line:
             job_wrapper.finish( '', '' )
         job_wrapper.change_state( model.Job.states.QUEUED )
 
         # define job attributes
-        ofile = "%s/%s.o" % (self.app.config.cluster_files_directory, job_wrapper.job_id)
-        efile = "%s/%s.e" % (self.app.config.cluster_files_directory, job_wrapper.job_id)
+        ofile = "%s/%s.o" % (self.app.config.cluster_files_directory, job_wrapper.get_id_tag())
+        efile = "%s/%s.e" % (self.app.config.cluster_files_directory, job_wrapper.get_id_tag())
         jt = self.ds.createJobTemplate()
         jt.remoteCommand = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job_wrapper.get_id_tag())
         jt.outputPath = ":%s" % ofile
         native_spec = self.get_native_spec( runner_url )
         if native_spec is not None:
             jt.nativeSpecification = native_spec
+        #set and export galaxy user PATH enviroment to actual user if submitting jobs as actual user
+        try:
+            if self.external_runJob_script:
+               export_path = 'export PATH=%s:$PATH' %(os.environ['PATH'])
+            else:
+               export_path = ''
+        except:
+            export_path = ''
+        
+        if self.TMPDIR:
+            export_tmp = 'export TMPDIR=%s' %self.TMPDIR
+        else:
+            export_tmp = ''	   
 
-        script = drm_template % (job_wrapper.galaxy_lib_dir, os.path.abspath( job_wrapper.working_directory ), command_line)
-        try:
-            fh = file( jt.remoteCommand, "w" )
-            fh.write( script )
-            fh.close()
-            os.chmod( jt.remoteCommand, 0750 )
-        except:
-            job_wrapper.fail( "failure preparing job script", exception=True )
-            log.exception("failure running job %s" % job_wrapper.get_id_tag())
-            return                          
+        if self.external_runJob_script == None: 
+            script = drm_template % (job_wrapper.galaxy_lib_dir, export_path, os.path.abspath( job_wrapper.working_directory ),export_tmp, command_line,'','')
+        else:
+	    touchcmd = 'touch ' + os.path.abspath( job_wrapper.working_directory ) + '/just_in_cases.txt'
+            chmodcmd = 'chmod -Rf  a+rwx ' + os.path.abspath( job_wrapper.working_directory ) + '/*'
+            script = drm_template % (job_wrapper.galaxy_lib_dir, export_path, os.path.abspath( job_wrapper.working_directory ), export_tmp, command_line, touchcmd,chmodcmd)
+
+        fh = file( jt.remoteCommand, "w" )
+        fh.write( script )
+        fh.close()
+        os.chmod( jt.remoteCommand, 0755 )
 
         # job was deleted while we were preparing it
         if job_wrapper.get_state() == model.Job.states.DELETED:
         log.debug("(%s) submitting file %s" % ( galaxy_id_tag, jt.remoteCommand ) )
         log.debug("(%s) command is: %s" % ( galaxy_id_tag, command_line ) )
         # runJob will raise if there's a submit problem
-        job_id = self.ds.runJob(jt)
+        if self.external_runJob_script is None:
+            job_id = self.ds.runJob(jt)
+        else:
+            userid = self.get_qsub_user(job_wrapper)
+            filename = self.store_jobtemplate(job_wrapper, jt)
+            job_id = self.external_runjob(filename, userid)
         log.info("(%s) queued as %s" % ( galaxy_id_tag, job_id ) )
 
         # store runner information for tracking if Galaxy restarts
         efile = drm_job_state.efile
         job_file = drm_job_state.job_file
         # collect the output
-        try:
-            ofh = file(ofile, "r")
-            efh = file(efile, "r")
-            stdout = ofh.read( 32768 )
-            stderr = efh.read( 32768 )
-        except:
-            stdout = ''
-            stderr = 'Job output not returned from cluster'
-            log.debug(stderr)
+        # JED - HACK to wait for the files to appear
+        which_try = 0
+        while which_try < 60:
+            try:
+                ofh = file(ofile, "r")
+                efh = file(efile, "r")
+                stdout = ofh.read( 32768 )
+                stderr = efh.read( 32768 )
+                which_try = 60
+            except:
+                if which_try == 60:
+                    stdout = ''
+                    stderr = 'Job output not returned from cluster'
+                    log.debug(stderr)
+                else:
+                    which_try += 1
+                    time.sleep(1)
 
         try:
             drm_job_state.job_wrapper.finish( stdout, stderr )
 
     def stop_job( self, job ):
         """Attempts to delete a job from the DRM queue"""
-        try:
-            self.ds.control( job.job_runner_external_id, drmaa.JobControlAction.TERMINATE )
-            log.debug( "(%s/%s) Removed from DRM queue at user's request" % ( job.id, job.job_runner_external_id ) )
-        except drmaa.InvalidJobException:
-            log.debug( "(%s/%s) User killed running job, but it was already dead" % ( job.id, job.job_runner_external_id ) )
-        except Exception, e:
-            log.debug( "(%s/%s) User killed running job, but error encountered removing from DRM queue: %s" % ( job.id, job.job_runner_external_id, e ) )
+        if self.external_killJob_script is None:
+                try:
+                        self.ds.control( job.job_runner_external_id, drmaa.JobControlAction.TERMINATE )
+                        log.debug( "(%s/%s) Removed from DRM queue at user's request" % ( job.id, job.job_runner_external_id ) )
+                except drmaa.InvalidJobException:
+                        log.debug( "(%s/%s) User killed running job, but it was already dead" % ( job.id, job.job_runner_external_id ) )
+                except Exception, e:
+                        log.debug( "(%s/%s) User killed running job, but error encountered removing from DRM queue: %s" % ( job.id, job.job_runner_external_id, e ) )
+        else:
+                try:
+                        subprocess.Popen(['/usr/bin/sudo','-E', self.external_killJob_script,  str(job.job_runner_external_id), str(self.job_user_uid[2])],shell=False)
+                        log.debug( "(%s/%s) Removed from DRM queue at user's request" % ( job.id, job.job_runner_external_id ) )
+                except Exception, e:
+                        log.debug( "(%s/%s) User killed running job, but error encountered removing from DRM queue: %s" % ( job.id, job.job_runner_external_id, e ) )
 
     def recover( self, job, job_wrapper ):
         """Recovers jobs stuck in the queued/running state when Galaxy started"""
             drm_job_state.old_state = drmaa.JobState.QUEUED_ACTIVE
             drm_job_state.running = False
             self.monitor_queue.put( drm_job_state )
+
+    def get_qsub_user(self, job_wrapper):
+        """ Returns the UserID (or Username) that should be used to execute the job. """
+        #TODO:
+        #add some logic to decide on an SGE user for the given job.
+        job_user_name = job_wrapper.user.split('@')
+        self.job_user_uid = getpwnam(job_user_name[0])
+        log.debug (" (%s) is the uid being passed to the DRM queu\n" % ( self.job_user_uid[2]) )
+        return self.job_user_uid[2]
+
+    def store_jobtemplate(self, job_wrapper, jt):
+        """ Stores the content of a DRMAA JobTemplate object in a file as a JSON string.
+        Path is hard-coded, but it's no worse than other path in this module.
+        Uses Galaxy's JobID, so file is expected to be unique."""
+        filename = "%s/database/pbs/%s.jt_json" % (os.getcwd(), job_wrapper.get_id_tag())
+        data = {}
+        for attr in DRMAA_jobTemplate_attributes:
+            try:
+                data[attr] = getattr(jt, attr)
+            except:
+                pass
+        s = json.dumps(data);
+        f = open(filename,'w')
+        f.write(s)
+        f.close()
+        return filename
+
+    def external_runjob(self, jobtemplate_filename, username):
+        """ runs an external script the will QSUB a new job.
+        The external script will be run with sudo, and will setuid() to the specified user.
+        Effectively, will QSUB as a different user (then the one used by Galaxy).
+        """
+        p = subprocess.Popen([ '/usr/bin/sudo', '-E', self.external_runJob_script, str(username), jobtemplate_filename ],
+                shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        (stdoutdata, stderrdata) = p.communicate()
+        exitcode = p.returncode
+        #os.unlink(jobtemplate_filename)
+        if exitcode != 0:
+            # There was an error in the child process
+            raise RuntimeError("External_runjob failed (exit code %s)\nCalled from %s:%d\nChild process reported error:\n%s" % (str(exitcode), __filename__(), __lineno__(), stderrdata))
+        if not stdoutdata.strip():
+            raise RuntimeError("External_runjob did return the job id: %s" % (stdoutdata))
+        
+        # The expected output is a single line containing a single numeric value:
+        # the DRMAA job-ID. If not the case, will throw an error.
+        jobId = stdoutdata
+        return jobId;
+
+

lib/galaxy/jobs/splitters/multi.py

         subdir_index[0] = subdir_index[0] + 1
         if not os.path.exists(dir):
             os.makedirs(dir)
+            os.chmod(dir,0777)
         task_dirs.append(dir)
         return dir
     

lib/galaxy/model/__init__.py

             rval = galaxy.datatypes.data.nice_size( rval )
         return rval
 
+    def get_api_value( self, view='collection', value_mapper = None ):
+        if value_mapper is None:
+            value_mapper = {}
+        rval = {}
+        try:
+            visible_keys = self.__getattribute__( 'api_' + view + '_visible_keys' )
+        except AttributeError:
+            raise Exception( 'Unknown API view: %s' % view )
+        for key in visible_keys:
+            try:
+                rval[key] = self.__getattribute__( key )
+                if key in value_mapper:
+                    rval[key] = value_mapper.get( key )( rval[key] ) 
+            except AttributeError:
+                rval[key] = None
+        return rval
+
 class HistoryUserShareAssociation( object ):
     def __init__( self ):
         self.history = None
     permitted_actions = get_permitted_actions( filter='DATASET' )
     file_path = "/tmp/"
     engine = None
-    def __init__( self, id=None, state=None, external_filename=None, extra_files_path=None, file_size=None, purgable=True ):
+    def __init__( self, id=None, state=None, external_filename=None, extra_files_path=None, file_size=None, purgable=True):
         self.id = id
         self.state = state
         self.deleted = False
                 # Create directory if it does not exist
                 if not os.path.exists( dir ):
                     os.makedirs( dir )
+		os.chmod(dir, 0777)
                 # Return filename inside hashed directory
                 return os.path.abspath( os.path.join( dir, "dataset_%d.dat" % self.id ) )
         else:
         return hda_name
     def get_access_roles( self, trans ):
         return self.dataset.get_access_roles( trans )
+    def get_api_value( self, view='collection' ):
+        # Since this class is a proxy to rather complex attributes we want to
+        # display in other objects, we can't use the simpler method used by
+        # other model classes.
+        hda = self          
+        rval = dict( name = hda.name,
+                     extension = hda.extension,
+                     deleted = hda.deleted,
+                     visible = hda.visible,
+                     state = hda.state,
+                     file_size = int( hda.get_size() ),
+                     genome_build = hda.dbkey,
+                     misc_info = hda.info,
+                     misc_blurb = hda.blurb )
+        for name, spec in hda.metadata.spec.items():
+            val = hda.metadata.get( name )
+            if isinstance( val, MetadataFile ):
+                val = val.file_name
+            elif isinstance( val, list ):
+                val = ', '.join( [str(v) for v in val] )
+            rval['metadata_' + name] = val
+        return rval
     def quota_amount( self, user ):
         """
         If the user has multiple instances of this dataset, it will not affect their disk usage statistic.
             # File Exists is okay, otherwise reraise
             if e.errno != errno.EEXIST:
                 raise
+
+        os.chmod(path, 0777)
         # Return filename inside hashed directory
         return os.path.abspath( os.path.join( path, "metadata_%d.dat" % self.id ) )
 

lib/galaxy/tools/__init__.py

 from cgi import FieldStorage
 from galaxy.util.hash_util import *
 from galaxy.util import listify
+from galaxy.web import security 
+import socket
+
 
 log = logging.getLogger( __name__ )
 
         try:
             path = elem.get( "file" )
             tool = self.load_tool( os.path.join( tool_path, path ), guid=guid )
-            if guid is not None:
-                # Tool was installed from a Galaxy tool shed.
-                tool.tool_shed = elem.find( "tool_shed" ).text
-                tool.repository_name = elem.find( "repository_name" ).text
-                tool.repository_owner = elem.find( "repository_owner" ).text
-                tool.changeset_revision = elem.find( "changeset_revision" ).text
-                tool.old_id = elem.find( "id" ).text
-                tool.version = elem.find( "version" ).text
             if self.app.config.get_bool( 'enable_tool_tags', False ):
                 tag_names = elem.get( "tags", "" ).split( "," )
                 for tag_name in tag_names:
             # legacy basic mode - provide compatible defaults
             self.attributes['split_size'] = 20
             self.attributes['split_mode'] = 'number_of_parts'
-
+            
+        
+    
 class Tool:
     """
     Represents a computational tool that can be executed through Galaxy. 
         # easily ensure that parameter dependencies like index files or
         # tool_data_table_conf.xml entries exist.
         self.input_params = []
-        # Attributes of tools installed from Galaxy tool sheds.
-        self.tool_shed = None
-        self.repository_name = None
-        self.repository_owner = None
-        self.changeset_revision = None
-        self.old_id = None
-        self.version = None
         # Parse XML element containing configuration
         self.parse( root, guid=guid )
+        self.external_runJob_script = app.config.drmaa_external_runjob_script
     
     @property
     def sa_session( self ):
             raise Exception, "Missing tool 'name'"
         # Get the UNIQUE id for the tool 
         # TODO: can this be generated automatically?
-        if guid is None:
+        if guid is not None:
+            self.id = guid
+        else:
             self.id = root.get( "id" )
-            self.version = root.get( "version" )
-        else:
-            self.id = guid
         if not self.id: 
-            raise Exception, "Missing tool 'id'"
-        if not self.version:
+            raise Exception, "Missing tool 'id'" 
+        self.version = root.get( "version" )
+        if not self.version: 
             # For backward compatibility, some tools may not have versions yet.
             self.version = "1.0.0"
         # Support multi-byte tools
             if elem.tag == "repeat":
                 group = Repeat()
                 group.name = elem.get( "name" )
-                group.title = elem.get( "title" )
-                group.help = elem.get( "help", None )
+                group.title = elem.get( "title" ) 
                 group.inputs = self.parse_input_elem( elem, enctypes, context )
                 group.default = int( elem.get( "default", 0 ) )
                 group.min = int( elem.get( "min", 0 ) )
                                 DatasetFilenameWrapper( converted_dataset,
                                                         datatypes_registry = self.app.datatypes_registry,
                                                         tool = Bunch( conversion_name = Bunch( extensions = conv_ext ) ), 
-                                                        name = conversion_name )
+                                                        name = conversion_name, config_info = self.app.config )
                     # Wrap actual input dataset
                     input_values[ input.name ] = \
-                        DatasetFilenameWrapper( input_values[ input.name ],
+                        DatasetFilenameWrapper( input_values[ input.name ], 
                                                 datatypes_registry = self.app.datatypes_registry,
                                                 tool = self,
-                                                name = input.name )
+                                                name = input.name, config_info = self.app.config )
                 elif isinstance( input, SelectToolParameter ):
                     input_values[ input.name ] = SelectToolParameterWrapper( 
                         input, input_values[ input.name ], self.app, other_values = param_dict )
             param_dict[name] = DatasetFilenameWrapper( data, 
                                                        datatypes_registry = self.app.datatypes_registry, 
                                                        tool = self, 
-                                                       name = name )
+                                                       name = name, config_info = self.app.config )
             if data:
                 for child in data.children:
-                    param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child )
+                    param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child,config_info = self.app.config )
         for name, hda in output_datasets.items():
             # Write outputs to the working directory (for security purposes) 
             # if desired.
             if self.app.config.outputs_to_working_directory:
                 try:
                     false_path = [ dp.false_path for dp in output_paths if dp.real_path == hda.file_name ][0]
-                    param_dict[name] = DatasetFilenameWrapper( hda, false_path = false_path )
+                    param_dict[name] = DatasetFilenameWrapper( hda, false_path = false_path, config_info = self.app.config )
                     open( false_path, 'w' ).close()
                 except IndexError:
                     log.warning( "Unable to determine alternate path for writing job outputs, outputs will be written to their real paths" )
-                    param_dict[name] = DatasetFilenameWrapper( hda )
+                    param_dict[name] = DatasetFilenameWrapper( hda, config_info = self.app.config )
             else:
-                param_dict[name] = DatasetFilenameWrapper( hda )
+                param_dict[name] = DatasetFilenameWrapper( hda, config_info = self.app.config )
             # Provide access to a path to store additional files
             # TODO: path munging for cluster/dataset server relocatability
             param_dict[name].files_path = os.path.abspath(os.path.join( job_working_directory, "dataset_%s_files" % (hda.dataset.id) ))
             for child in hda.children:
-                param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child )
+                param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child, config_info = self.app.config )
         for out_name, output in self.outputs.iteritems():
             if out_name not in param_dict and output.filters:
                 # Assume the reason we lack this output is because a filter 
                 fd, config_filename = tempfile.mkstemp( dir=directory )
                 os.close( fd )
             f = open( config_filename, "wt" )
+            os.chmod(config_filename, 0777)
             f.write( fill_template( template_text, context=param_dict ) )
             f.close()
+            os.chmod(config_filename, 0777)
             param_dict[name] = config_filename
             config_filenames.append( config_filename )
         return config_filenames
         """
         for name, hda in output.items():
             temp_file_path = os.path.join( job_working_directory, "dataset_%s_files" % ( hda.dataset.id ) )
-            try:
-                if len( os.listdir( temp_file_path ) ) > 0:
-                    store_file_path = os.path.join( 
-                        os.path.join( self.app.config.file_path, *directory_hash_id( hda.dataset.id ) ), 
+            #try:
+            if os.path.exists(temp_file_path) and len( os.listdir( temp_file_path ) ) > 0:
+                store_file_path = os.path.join( 
+                    os.path.join( self.app.config.file_path, *directory_hash_id( hda.dataset.id ) ), 
                         "dataset_%d_files" % hda.dataset.id )
-                    shutil.move( temp_file_path, store_file_path )
-                    # Fix permissions
+                os.mkdir(store_file_path)
+                os.system('mv %s/* %s/' %(temp_file_path ,store_file_path))
+                # Fix permissions
+                if self.external_runJob_script == None:
                     for basedir, dirs, files in os.walk( store_file_path ):
                         util.umask_fix_perms( basedir, self.app.config.umask, 0777, self.app.config.gid )
                         for file in files:
                             if os.path.islink( path ):
                                 continue 
                             util.umask_fix_perms( path, self.app.config.umask, 0666, self.app.config.gid )
-            except:
-                continue
+            #except:
+                #continue
     
     def collect_child_datasets( self, output):
         """
         def items( self ):
             return iter( [ ( k, self.get( k ) ) for k, v in self.metadata.items() ] )
     
-    def __init__( self, dataset, datatypes_registry = None, tool = None, name = None, false_path = None ):
+    def __init__( self, dataset, datatypes_registry = None, tool = None, name = None, false_path = None , config_info=None):
         if not dataset:
             try:
                 # TODO: allow this to work when working with grouping
             self.dataset = dataset
             self.metadata = self.MetadataWrapper( dataset.metadata )
         self.false_path = false_path
+        
+        # create web_display_url attribute
+        sec = security.SecurityHelper( id_secret=config_info.id_secret )
+        try:
+            url = 'http://' + socket.getfqdn() + config_info.cookie_path + '/datasets/' + sec.encode_id(dataset.id) + '/display/?preview=True'
+            self.web_display_url = url 
+        except:
+            self.web_display_url = None      
 
     def __str__( self ):
         if self.false_path is not None:

lib/galaxy/tools/actions/__init__.py

                         galaxy.tools.DatasetFilenameWrapper( input_values[ input.name ],
                                                              datatypes_registry = trans.app.datatypes_registry,
                                                              tool = tool,
-                                                             name = input.name )
+                                                             name = input.name, config_info = trans.app.config)
                 elif isinstance( input, SelectToolParameter ):
                     input_values[ input.name ] = galaxy.tools.SelectToolParameterWrapper( input, input_values[ input.name ], tool.app, other_values = incoming )
                 else:
                     trans.sa_session.flush()
                     trans.app.security_agent.set_all_dataset_permissions( data.dataset, output_permissions )
                 # Create an empty file immediately
-                open( data.file_name, "w" ).close()
-                # Fix permissions
-                util.umask_fix_perms( data.file_name, trans.app.config.umask, 0666 )
+		self.external_runJob_script = trans.app.config.drmaa_external_runjob_script
+		if self.external_runJob_script == None:
+                	open( data.file_name, "w" ).close()
+                        # Fix permissions
+                        util.umask_fix_perms( data.file_name, trans.app.config.umask, 0666)
+                log.debug('.DAT file name = %s\n' %(data.file_name))
                 # This may not be neccesary with the new parent/child associations
                 data.designation = name
                 # Copy metadata from one of the inputs if requested. 

lib/galaxy/tools/actions/upload_common.py

                 is_binary = None
             try:
                 link_data_only = uploaded_dataset.link_data_only
+                chmod_flag = 1
             except:
                 link_data_only = 'copy_files'
+                chmod_flag = 0
             json = dict( file_type = uploaded_dataset.file_type,
                          ext = uploaded_dataset.ext,
                          name = uploaded_dataset.name,
                          link_data_only = link_data_only,
                          space_to_tab = uploaded_dataset.space_to_tab,
                          path = uploaded_dataset.path )
+            if chmod_flag == 0 and trans.app.config.drmaa_external_runjob_script:	
+                 os.chmod(uploaded_dataset.path, 0777)
         json_file.write( to_json_string( json ) + '\n' )
     json_file.close()
+    if trans.app.config.drmaa_external_runjob_script:
+        os.chmod(json_file_path, 0777)
     return json_file_path
 def create_job( trans, params, tool, json_file_path, data_list, folder=None, return_job=False ):
     """
             # Create an empty file immediately
             if not dataset.dataset.external_filename:
                 open( dataset.file_name, "w" ).close()
+                if trans.app.config.drmaa_external_runjob_script:
+                    os.chmod(dataset.file_name, 0777)
     else:
         for i, dataset in enumerate( data_list ):
             job.add_output_dataset( 'output%i' % i, dataset )
             # Create an empty file immediately
             if not dataset.dataset.external_filename:
                 open( dataset.file_name, "w" ).close()
+                if trans.app.config.drmaa_external_runjob_script:
+                    os.chmod(dataset.file_name, 0777)
+
     job.state = job.states.NEW
     trans.sa_session.add( job )
     trans.sa_session.flush()

scripts/drmaa_external_killer.py

+#!/usr/bin/env python
+import os
+import sys
+import errno
+import pwd
+#import drmaa
+new_path = [ os.path.join( os.getcwd(), "lib" ) ]
+new_path.extend( sys.path[1:] ) # remove scripts/ from the path
+sys.path = new_path
+
+from galaxy import eggs
+import pkg_resources
+pkg_resources.require("simplejson")
+import simplejson as json
+pkg_resources.require("drmaa")
+import drmaa
+
+
+
+def validate_paramters():
+    if len(sys.argv)<3:
+        sys.stderr.write("usage: %s [job ID] [user uid]\n" % sys.argv[0])
+        exit(1)
+
+    jobID  = sys.argv[1]
+    uid = int(sys.argv[2])
+
+
+
+    return jobID, uid
+
+def set_user(uid):
+    try:
+        gid = pwd.getpwuid(uid).pw_gid
+        os.setgid(gid)
+        os.setuid(uid)
+    except OSError, e:
+        if e.errno == errno.EPERM:
+            sys.stderr.write("error: setuid(%d) failed: permission denied. Did you setup 'sudo' correctly for this script?\n" % uid )
+            exit(1)
+        else:
+            pass
+    if os.getuid()==0:
+        sys.stderr.write("error: UID is 0 (root) after changing user. This script should not be run as root. aborting.\n" )
+        exit(1)
+    if os.geteuid()==0:
+        sys.stderr.write("error: EUID is 0 (root) after changing user. This script should not be run as root. aborting.\n" )
+        exit(1)
+
+def main():
+    jobID, uid  = validate_paramters()
+    set_user(uid)
+    s=drmaa.Session()
+    s.initialize()
+    s.control(jobID,drmaa.JobControlAction.TERMINATE)
+    s.exit()
+
+
+
+if __name__ == "__main__":
+    main()
+
+

scripts/drmaa_external_runner.py

+#!/usr/bin/env python
+import os
+import sys
+import errno
+import pwd
+
+#import simplejson as json
+#import drmaa
+new_path = [ os.path.join( os.getcwd(), "lib" ) ]
+new_path.extend( sys.path[1:] ) # remove scripts/ from the path
+sys.path = new_path
+
+from galaxy import eggs
+import pkg_resources
+pkg_resources.require("simplejson")
+import simplejson as json
+pkg_resources.require("drmaa")
+import drmaa
+
+DRMAA_jobTemplate_attributes = [ 'args', 'remoteCommand', 'outputPath', 'errorPath', 'nativeSpecification',
+                    'name','email','project' ]
+
+def load_job_template_from_file(jt, filename):
+    f = open(filename,'r')
+    data = json.load(f)
+    for attr in DRMAA_jobTemplate_attributes:
+        if attr in data:
+            setattr(jt, attr, data[attr])
+
+def valid_numeric_userid(userid):
+    try:
+        uid = int(userid)
+    except:
+        return False
+    try:
+        pw = pwd.getpwuid(uid)
+    except KeyError:
+        sys.stderr.write("error: User-ID (%d) is not valid.\n" % uid)
+        exit(1)
+    return True
+
+def get_user_id_by_name(username):
+    try:
+        pw = pwd.getpwnam(username)
+    except KeyError:
+        sys.stderr.write("error: User name (%s) is not valid.\n" % username)
+        exit(1)
+    return pw.pw_uid
+def validate_paramters():
+    if len(sys.argv)<3:
+        sys.stderr.write("usage: %s [USER-ID] [JSON-JOB-TEMPLATE-FILE]\n" % sys.argv[0])
+        exit(1)
+
+    userid = sys.argv[1]
+    json_filename = sys.argv[2]
+
+    if valid_numeric_userid(userid):
+        uid = int(userid)
+    else:
+        uid = get_user_id_by_name(userid)
+
+    if uid == 0:
+        sys.stderr.write("error: userid must not be 0 (root)\n")
+        exit(1)
+
+    if not os.path.exists(json_filename):
+        sys.stderr.write("error: JobTemplate file (%s) doesn't exist\n" % ( json_filename ) )
+        exit(1)
+
+    return uid, json_filename
+
+def set_user(uid):
+    try:
+        # Get user's default group and set it to current process to make sure file permissions are inherited correctly
+        # Solves issue with permission denied for JSON files
+        gid = pwd.getpwuid(uid).pw_gid
+        os.setgid(gid)
+        os.setuid(uid)
+    except OSError, e:
+        if e.errno == errno.EPERM:
+            sys.stderr.write("error: setuid(%d) failed: permission denied. Did you setup 'sudo' correctly for this script?\n" % uid )
+            exit(1)
+        else:
+            pass
+    if os.getuid()==0:
+        sys.stderr.write("error: UID is 0 (root) after changing user. This script should not be run as root. aborting.\n" )
+        exit(1)
+    if os.geteuid()==0:
+        sys.stderr.write("error: EUID is 0 (root) after changing user. This script should not be run as root. aborting.\n" )
+        exit(1)
+def main():
+    userid, json_filename = validate_paramters()
+    set_user(userid)        
+    s = drmaa.Session()
+    s.initialize()
+    jt = s.createJobTemplate()
+    load_job_template_from_file(jt, json_filename)
+    # runJob will raise if there's a submittion error
+    jobId = s.runJob(jt)
+    s.deleteJobTemplate(jt)
+    s.exit()
+
+    # Print the Job-ID and exit. Galaxy will pick it up from there.
+    print jobId
+
+if __name__ == "__main__":
+    main()
+

scripts/external_chown_script.py

+#!/usr/bin/env python
+import os
+import sys
+import errno
+import pwd
+#import drmaa
+new_path = [ os.path.join( os.getcwd(), "lib" ) ]
+new_path.extend( sys.path[1:] ) # remove scripts/ from the path
+sys.path = new_path
+
+from galaxy import eggs
+import pkg_resources
+pkg_resources.require("simplejson")
+import simplejson as json
+pkg_resources.require("drmaa")
+import drmaa
+
+
+
+def validate_paramters():
+    if len(sys.argv)<4:
+        sys.stderr.write("usage: %s path user_name gid\n" % sys.argv[0])
+        exit(1)
+
+    path  = sys.argv[1]
+    galaxy_user_name  = sys.argv[2]
+    gid  = sys.argv[3]
+
+
+
+    return path, galaxy_user_name, gid 
+
+def main():
+    path, galaxy_user_name, gid  = validate_paramters()
+    os.system('chown  %s %s' %(galaxy_user_name, path))
+    os.system('chgrp  %s %s' %(gid, path))
+
+
+
+if __name__ == "__main__":
+    main()
+
+

tools/data_source/upload.py

                     dataset.path = uncompressed
                 else:
                     shutil.move( uncompressed, dataset.path )
+                os.chmod(dataset.path, 0644)
             dataset.name = dataset.name.rstrip( '.gz' )
             data_type = 'gzip'
         if not data_type and bz2 is not None:
                         dataset.path = uncompressed
                     else:
                         shutil.move( uncompressed, dataset.path )
+                    os.chmod(dataset.path, 0644)
                 dataset.name = dataset.name.rstrip( '.bz2' )
                 data_type = 'bz2'
         if not data_type:
                             dataset.path = uncompressed
                         else:
                             shutil.move( uncompressed, dataset.path )
+                        os.chmod(dataset.path, 0644)
                         dataset.name = uncompressed_name
                 data_type = 'zip'
         if not data_type:
                 pass
         else:
             # This should not happen, but it's here just in case
-            shutil.copy( dataset.path, output_path )
+            shutil.move( dataset.path, output_path )
+            try:
+               os.chmod(output_path,0644)
+            except:
+               pass
     elif link_data_only == 'copy_files':
         shutil.move( dataset.path, output_path )
+        try:
+           os.chmod(output_path,0644)
+        except:
+ 	   pass 
+
     # Write the job info
     stdout = stdout or 'uploaded %s file' % data_type
     info = dict( type = 'dataset',

tools/ngs_rna/tophat_wrapper.xml

-<tool id="tophat" name="Tophat for Illumina" version="1.5.0">
+<tool id="tophat" name="Tophat" version="1.2.1">
     <description>Find splice junctions using RNA-seq data</description>
-    <version_command>tophat --version</version_command>
     <requirements>
+        <requirement type="package">samtools</requirement>
+        <requirement type="package">bowtie</requirement>
         <requirement type="package">tophat</requirement>
     </requirements>
     <command interpreter="python">
             #if $refGenomeSource.genomeSource == "history":
                 --own-file=$refGenomeSource.ownFile
             #else:
-                --indexes-path="${ filter( lambda x: str( x[0] ) == str( $refGenomeSource.index ), $__app__.tool_data_tables[ 'tophat_indexes' ].get_fields() )[0][-1] }"
+                #if $refGenomeSource.genomeSource == "indexed":
+                    --indexes-path="${ filter( lambda x: str( x[0] ) == str( $refGenomeSource.index ), $__app__.tool_data_tables[ 'tophat_indexes' ].get_fields() )[0][-1] }"
+                #else:
+                    --indexes-path="${ filter( lambda x: str( x[0] ) == str( $singlePaired.input1.metadata.dbkey ), $__app__.tool_data_tables[ 'tophat_indexes' ].get_fields() )[0][-1] }"
+                #end if
             #end if
 
             ## Are reads single-end or paired?
             --single-paired=$singlePaired.sPaired
 
             ## First input file always required.
-            --input1=$input1
+            --input1=$singlePaired.input1
 
             ## Set params based on whether reads are single-end or paired.
             #if $singlePaired.sPaired == "single":
                     ## Supplying junctions parameters.
                     #if $singlePaired.sParams.own_junctions.use_junctions == "Yes":
                         #if $singlePaired.sParams.own_junctions.gene_model_ann.use_annotations == "Yes":
-                            -G $singlePaired.sParams.own_junctions.gene_model_ann.gene_annotation_model
+                                #if $singlePaired.sParams.own_junctions.gene_model_ann.annotationSource.reference_annotation_file == "indexed":
+                                     -G "${ filter( lambda x: str( x[0] ) == str( $refGenomeSource.index ), $__app__.tool_data_tables[ 'gtf_index' ].get_fields() )[0][-1] }"
+                                #else:
+                                     #if $singlePaired.sParams.own_junctions.gene_model_ann.annotationSource.reference_annotation_file == "attribute":
+                                           -G "${ filter( lambda x: str( x[0] ) == str( $singlePaired.input1.metadata.dbkey ), $__app__.tool_data_tables[ 'gtf_index' ].get_fields() )[0][-1] }"
+                                     #else
+                                           -G "${singlePaired.sParams.own_junctions.gene_model_ann.annotationSource.owngtfFile}"
+                                     #end if
+                                #end if
                         #end if
                         #if $singlePaired.sParams.own_junctions.raw_juncs.use_juncs == "Yes":
                             -j $singlePaired.sParams.own_junctions.raw_juncs.raw_juncs
                     ## Supplying junctions parameters.
                     #if $singlePaired.pParams.own_junctions.use_junctions == "Yes":
                         #if $singlePaired.pParams.own_junctions.gene_model_ann.use_annotations == "Yes":
-                            -G $singlePaired.pParams.own_junctions.gene_model_ann.gene_annotation_model
+                                #if $singlePaired.pParams.own_junctions.gene_model_ann.annotationSource.reference_annotation_file == "indexed":
+                                     -G "${ filter( lambda x: str( x[0] ) == str( $refGenomeSource.index ), $__app__.tool_data_tables[ 'gtf_index' ].get_fields() )[0][-1] }"
+                                #else:
+                                     #if $singlePaired.pParams.own_junctions.gene_model_ann.annotationSource.reference_annotation_file == "attribute":
+                                           -G "${ filter( lambda x: str( x[0] ) == str( $singlePaired.input1.metadata.dbkey ), $__app__.tool_data_tables[ 'gtf_index' ].get_fields() )[0][-1] }"
+                                     #else
+                                           -G "${singlePaired.sParams.own_junctions.gene_model_ann.annotationSource.owngtfFile}"
+                                     #end if
+                                #end if
                         #end if
                         #if $singlePaired.pParams.own_junctions.raw_juncs.use_juncs == "Yes":
                             -j $singlePaired.pParams.own_junctions.raw_juncs.raw_juncs
             #end if
     </command>
     <inputs>
-        <param format="fastqsanger" name="input1" type="data" label="RNA-Seq FASTQ file" help="Nucleotide-space: Must have Sanger-scaled quality values with ASCII offset 33" />
         <conditional name="refGenomeSource">
           <param name="genomeSource" type="select" label="Will you select a reference genome from your history or use a built-in index?" help="Built-ins were indexed using default options">
             <option value="indexed">Use a built-in index</option>
             <option value="history">Use one from the history</option>
+            <option value="attribute">Use input fastq metadata.dbkey attribute</option>
           </param>
           <when value="indexed">
             <param name="index" type="select" label="Select a reference genome" help="If your genome of interest is not listed, contact the Galaxy team">
-              <options from_data_table="tophat_indexes">
-                <filter type="sort_by" column="2"/>
-                <validator type="no_options" message="No indexes are available for the selected input dataset"/>
-              </options>
+              <options from_data_table="tophat_indexes" />
             </param>
           </when>
           <when value="history">
               <option value="paired">Paired-end</option>
             </param>
             <when value="single">
+              <param format="fastqsanger" name="input1" type="data" label="RNA-Seq FASTQ file" help="Must have Sanger-scaled quality values with ASCII offset 33"/>
               <conditional name="sParams">
                 <param name="sSettingsType" type="select" label="TopHat settings to use" help="You can use the default settings or set custom values for any of Tophat's parameters.">
                   <option value="preSet">Use Defaults</option>
                              </param>
                              <when value="No" />
                              <when value="Yes">
-                               <param format="gtf" name="gene_annotation_model" type="data" label="Gene Model Annotations" help="TopHat will use the exon records in this file to build a set of known splice junctions for each gene, and will attempt to align reads to these junctions even if they would not normally be covered by the initial mapping."/>
+                               <conditional name="annotationSource">
+                                 <param name="reference_annotation_file" type="select" label="Please select a reference Aonnotation">
+                                   <option value="indexed">Use a built-in index</option>
+                                   <option value="history">Use one from the history</option>
+                                   <option value="attribute">Use metadata.bkey attribute from input fastq file</option>
+                                 </param>
+                                 <when value="history">
+                                   <param name="owngtfFile" type="data" format="gff3, gtf" label="Select a reference annotation file" />
+                                 </when>
+                               </conditional>
                              </when>
                           </conditional>
                           <conditional name="raw_juncs">
                         <param name="max_coverage_intron" type="integer" value="20000" label="Maximum intron length that may be found during coverage search" />
                     </when>
                     <when value="No" />
-                  </conditional>
+                  </conditional>     
                   <param name="microexon_search" type="select" label="Use Microexon Search" help="With this option, the pipeline will attempt to find alignments incident to microexons. Works only for reads 50bp or longer.">
                     <option value="No">No</option>
                     <option value="Yes">Yes</option>
               </conditional>  <!-- sParams -->
             </when>  <!--  single -->
             <when value="paired">
-              <param format="fastqsanger" name="input2" type="data" label="RNA-Seq FASTQ file" help="Nucleotide-space: Must have Sanger-scaled quality values with ASCII offset 33" />
+              <param format="fastqsanger" name="input1" type="data" label="RNA-Seq FASTQ file" help="Must have Sanger-scaled quality values with ASCII offset 33"/>
+              <param format="fastqsanger" name="input2" type="data" label="RNA-Seq FASTQ file" help="Must have Sanger-scaled quality values with ASCII offset 33"/>
               <param name="mate_inner_distance" type="integer" value="20" label="Mean Inner Distance between Mate Pairs" />
               <conditional name="pParams">
                 <param name="pSettingsType" type="select" label="TopHat settings to use" help="For most mapping needs use Commonly used settings. If you want full control use Full parameter list">
                              </param>
                              <when value="No" />
                              <when value="Yes">
-                               <param format="gtf" name="gene_annotation_model" type="data" label="Gene Model Annotations" help="TopHat will use the exon records in this file to build a set of known splice junctions for each gene, and will attempt to align reads to these junctions even if they would not normally be covered by the initial mapping."/>
+                               <conditional name="annotationSource">
+                                 <param name="reference_annotation_file" type="select" label="Please select a reference Aonnotation">
+                                   <option value="indexed">Use a built-in index</option>
+                                   <option value="history">Use one from the history</option>
+                                   <option value="attribute">Use metadata.bkey attribute from input fastq files</option>
+                                 </param>
+                                 <when value="history">
+                                   <param name="owngtfFile" type="data" format="gff3, gtf" label="Select a reference annotation file" />
+                                 </when>
+                               </conditional>
                              </when>
                           </conditional>
                           <conditional name="raw_juncs">
                       ( singlePaired['sParams']['indel_search']['allow_indel_search'] == 'Yes' ) ) or 
                     ( ( 'pParams' in singlePaired ) and ( 'indel_search' in singlePaired['pParams'] ) and 
                       ( singlePaired['pParams']['indel_search']['allow_indel_search'] == 'Yes' ) )
-                )
+                ) 
             </filter>
             <actions>
               <conditional name="refGenomeSource.genomeSource">
     </outputs>
 
     <tests>
-        <!-- Test base-space single-end reads with pre-built index and preset parameters -->
+        <!-- Test single-end reads with pre-built index and preset parameters -->
         <test>
             <!-- TopHat commands:
-            tophat -o tmp_dir -p 1 tophat_in1 test-data/tophat_in2.fastqsanger
-            Rename the files in tmp_dir appropriately
+            tophat -o tmp_dir -p 1 /afs/bx.psu.edu/depot/data/genome/test/tophat/tophat_in1 test-data/tophat_in2.fastqsanger
             -->
-            <param name="input1" ftype="fastqsanger" value="tophat_in2.fastqsanger" />
             <param name="genomeSource" value="indexed" />
             <param name="index" value="tophat_test" />
             <param name="sPaired" value="single" />
+            <param name="input1" ftype="fastqsanger" value="tophat_in2.fastqsanger" />
             <param name="sSettingsType" value="preSet" />
             <output name="junctions" file="tophat_out1j.bed" />
             <output name="accepted_hits" file="tophat_out1h.bam" compare="sim_size" />
         </test>
-        <!-- Test using base-space test data: paired-end reads, index from history. -->
+        <!-- Test using test data: paired-end reads, index from history. -->
         <test>
             <!-- TopHat commands:
             bowtie-build -f test-data/tophat_in1.fasta tophat_in1
             tophat -o tmp_dir -p 1 -r 20 tophat_in1 test-data/tophat_in2.fastqsanger test-data/tophat_in3.fastqsanger
-            Rename the files in tmp_dir appropriately
             -->
-            <param name="input1" ftype="fastqsanger" value="tophat_in2.fastqsanger" />
             <param name="genomeSource" value="history" />
             <param name="ownFile" ftype="fasta" value="tophat_in1.fasta" />
             <param name="sPaired" value="paired" />
+            <param name="input1" ftype="fastqsanger" value="tophat_in2.fastqsanger" />
             <param name="input2" ftype="fastqsanger" value="tophat_in3.fastqsanger" />
             <param name="mate_inner_distance" value="20" />
             <param name="pSettingsType" value="preSet" />
             <output name="junctions" file="tophat_out2j.bed" />
             <output name="accepted_hits" file="tophat_out2h.bam" compare="sim_size" />
         </test>
-        <!-- Test base-space single-end reads with user-supplied reference fasta and full parameters -->
+        <!-- Test single-end reads with user-supplied reference fasta and full parameters -->
         <test>
             <!-- Tophat commands:
             bowtie-build -f test-data/tophat_in1.fasta tophat_in1
             tophat -o tmp_dir -p 1 -a 8 -m 0 -i 70 -I 500000 -F 0.15 -g 40 +allow-indels +coverage-search +min-coverage-intron 50 +max-coverage-intro 20000 +segment-mismatches 2 +segment-length 25 +closure-search +min-closure-exon 50 +min-closure-intron 50 +max-closure-intro 5000 +microexon-search tophat_in1 test-data/tophat_in2.fastqsanger
             Replace the + with double-dash
-            Rename the files in tmp_dir appropriately
             -->
-            <param name="input1" ftype="fastqsanger" value="tophat_in2.fastqsanger"/>
             <param name="genomeSource" value="history"/>
             <param name="ownFile" value="tophat_in1.fasta"/>
             <param name="sPaired" value="single"/>
+            <param name="input1" ftype="fastqsanger" value="tophat_in2.fastqsanger"/>
             <param name="sSettingsType" value="full"/>
             <param name="library_type" value="FR Unstranded"/>
             <param name="anchor_length" value="8"/>
             <output name="junctions" file="tophat_out3j.bed" />
             <output name="accepted_hits" file="tophat_out3h.bam" compare="sim_size" />
         </test>
-        <!-- Test base-space paired-end reads with user-supplied reference fasta and full parameters -->
+        <!-- Test paired-end reads with user-supplied reference fasta and full parameters -->
         <test>
             <!-- TopHat commands:
-            tophat -o tmp_dir -r 20 -p 1 -a 8 -m 0 -i 70 -I 500000 -F 0.15 -g 40 +coverage-search +min-coverage-intron 50 +max-coverage-intro 20000 +segment-mismatches 2 +segment-length 25 +closure-search +min-closure-exon 50 +min-closure-intron 50 +max-closure-intron 5000 +microexon-search tophat_in1 test-data/tophat_in2.fastqsanger test-data/tophat_in3.fastqsanger
+            tophat -o tmp_dir -r 20 -p 1 -a 8 -m 0 -i 70 -I 500000 -F 0.15 -g 40 +coverage-search +min-coverage-intron 50 +max-coverage-intro 20000 +segment-mismatches 2 +segment-length 25 +closure-search +min-closure-exon 50 +min-closure-intron 50 +max-closure-intron 5000 +microexon-search /afs/bx.psu.edu/depot/data/genome/test/tophat/tophat_in1 test-data/tophat_in2.fastqsanger test-data/tophat_in3.fastqsanger
             Replace the + with double-dash
-            Rename the files in tmp_dir appropriately
             -->
-            <param name="input1" ftype="fastqsanger" value="tophat_in2.fastqsanger"/>
             <param name="genomeSource" value="indexed"/>
             <param name="index" value="tophat_test"/>
             <param name="sPaired" value="paired"/>
+            <param name="input1" ftype="fastqsanger" value="tophat_in2.fastqsanger"/>
             <param name="input2" ftype="fastqsanger" value="tophat_in3.fastqsanger"/>
             <param name="mate_inner_distance" value="20"/>
             <param name="pSettingsType" value="full"/>
 
 .. _BED: http://genome.ucsc.edu/FAQ/FAQformat.html#format1
 .. _BAM: http://samtools.sourceforge.net/
-
-Two other possible outputs, depending on the options you choose, are insertions and deletions, both of which are in BED format.
-
+    
 -------
 
 **Tophat settings**
   --mate-std-dev INT                The standard deviation for the distribution on inner distances between mate pairs. The default is 20bp.
   -a/--min-anchor-length INT        The "anchor length". TopHat will report junctions spanned by reads with at least this many bases on each side of the junction. Note that individual spliced     
                                     alignments may span a junction with fewer than this many bases on one side. However, every junction involved in spliced alignments is supported by at least one 
-                                    read with this many bases on each side. This must be at least 3 and the default is 8.
+                                    read with this many bases on each side.  This must be at least 3 and the default is 8.
   -m/--splice-mismatches INT        The maximum number of mismatches that may appear in the "anchor" region of a spliced alignment. The default is 0.
   -i/--min-intron-length INT        The minimum intron length. TopHat will ignore donor/acceptor pairs closer than this many bases apart. The default is 70.
   -I/--max-intron-length INT        The maximum intron length. When searching for junctions ab initio, TopHat will ignore donor/acceptor pairs farther than this many bases apart, except when such a pair is supported by a split segment alignment of a long read. The default is 500000.

universe_wsgi.ini.sample

 # large servers.
 #enable_tool_tags = False
 
+# Enable a feature when running workflows. When enabled, default datasets
+# are selected for "Set at Runtime" inputs from the history such that the
+# same input will not be selected twice, unless there are more inputs than
+# compatible datasets in the history.
+# When False, the most recently added compatible item in the history will
+# be used for each "Set at Runtime" input, independent of others in the Workflow
+#enable_unique_workflow_defaults = False
+
 # Enable Galaxy's "Upload via FTP" interface.  You'll need to install and
 # configure an FTP server (we've used ProFTPd since it can use Galaxy's
 # database for authentication) and set the following two options.
 # currently available are 'pbs' and 'drmaa'.
 #start_job_runners = None
 
+# Uncomment drmaa_external_runjob_script , drmaa_external_killjob_script, and external_chown_script pameters and have them point to the
+# absolute path for scripts/drmaa_external_runner.py and scripts/drmaa_external_killer.py.
+# The scripts directory is located in the top level galaxy directory. The parameters when
+# uncommented allow for submission to the drmaa queue with the user name of the user submitting
+# the job and not the galaxy user. In order for this to work the actual user must log into galaxy
+# and the galaxy authentication must be consistent with the authentication on the server in which the
+# drmaa queue is running (i.e. the username must have an account on the server and be allowed to
+# submit jobs to the queue). The galaxy user must also be given sudo permission to execute
+# scripts/drmaa_external_runner.py and scripts/drmaa_external_killer.py in /etc/sudoers
+# Example:
+# galaxy  ALL = (root) NOPASSWD: SETENV: /opt/galaxy/scripts/drmaa_external_runner.py
+# galaxy  ALL = (root) NOPASSWD: SETENV: /opt/galaxy/scripts/drmaa_external_killer.py
+# Also the
+# Defaults    requiretty
+# in /etc/sudoers must be commented out
+#drmaa_external_runjob_script = /opt/galaxy/scripts/drmaa_external_runner.py
+#drmaa_external_killjob_script = /opt/galaxy/scripts/drmaa_external_killer.py
+#external_chown_script = /opt/galaxy/scripts/external_chown_script.py
+
+#important if running as actual user since enviromental variables are not passed
+#will supercede an other definition of TMPDIR if using drmaa
+#TMPDIR = /opt/galaxy/database/tmp
+
+
 # The URL for the default runner to use when a tool doesn't explicitly define a
 # runner below.
 #default_cluster_job_runner = local:///
 # run with the runner defined with default_cluster_job_runner.
 
 [galaxy:tool_runners]
-
+binsort = drmaa://-cwd -V -pe threaded 4/
+bwa_wrapper = drmaa://-cwd -V -pe threaded 4/
+unified_genotyper = drmaa://-cwd -V -pe threaded 2/
 biomart = local:///
 encode_db1 = local:///
 hbvar = local:///