Commits

mruffalo committed bf8ddf9

Fix more usage of old cmp function

Conflicts:

lib/bx/gene_reader.py
lib/bx_extras/lrucache.py

Comments (0)

Files changed (3)

distribute_setup.py

 
     # Reverse sort directories.
     if sys.version_info < (2, 4):
-        def sorter(dir1, dir2):
-            return cmp(dir1.name, dir2.name)
-        directories.sort(sorter)
+        directories.sort(key=lambda dir: dir.name)
         directories.reverse()
     else:
         directories.sort(key=operator.attrgetter('name'), reverse=True)

lib/bx/align/tools/tile.py

     """
     Tile maf blocks onto an interval. The resulting block will span the interval
     exactly and contain the column from the highest scoring alignment at each
-    position. 
-    
+    position.
+
     `sources`: list of sequence source names to include in final block
     `index`: an instnace that can return maf blocks overlapping intervals
     `ref_src`: source name of the interval (ie, hg17.chr7)
     base_len = end - start
     blocks = index.get( ref_src, start, end )
     # From low to high score
-    blocks.sort( lambda a, b: cmp( a.score, b.score ) )
+    blocks.sort(key=lambda t: t.score)
     mask = [ -1 ] * base_len
     ref_src_size = None
     for i, block in enumerate( blocks ):
         for j in range( slice_start, slice_end ):
             mask[j-start] = i
     tiled = []
-    for i in range( len( sources ) ): 
+    for i in range( len( sources ) ):
         tiled.append( [] )
     for ss, ee, index in intervals_from_mask( mask ):
         # Interval with no covering alignments
             slice_end = start + ee
             block = blocks[index]
             ref = block.get_component_by_src_start( ref_src )
-            sliced = block.slice_by_component( ref, slice_start, slice_end ) 
+            sliced = block.slice_by_component( ref, slice_start, slice_end )
             sliced = sliced.limit_to_species( sources )
             sliced.remove_all_gap_columns()
             for i, src in enumerate( sources ):
                 if comp:
                     tiled[i].append( comp.text )
                 else:
-                    tiled[i].append( "-" * sliced.text_size )        
+                    tiled[i].append( "-" * sliced.text_size )
     return [ "".join( t ) for t in tiled ]
 
 def intervals_from_mask( mask ):

lib/bx_extras/lrucache.py

 """
 
 from __future__ import generators
+
+# TODO: Remove this in favor of functools.lru_cache
+
 import time
 from heapq import heappush, heappop, heapify
+from functools import total_ordering
 
 __version__ = "0.2"
 __all__ = ['CacheKeyError', 'LRUCache', 'DEFAULT_SIZE']
 
 class CacheKeyError(KeyError):
     """Error raised when cache requests fail
-    
+
     When a cache record is accessed which no longer exists (or never did),
     this error is raised. To avoid it, you may want to check for the existence
     of a cache record before reading or deleting it."""
 
 class LRUCache(object):
     """Least-Recently-Used (LRU) cache.
-    
+
     Instances of this class provide a least-recently-used (LRU) cache. They
     emulate a Python mapping type. You can use an LRU cache more or less like
     a Python dictionary, with the exception that objects you put into the
     cache may be discarded before you take them out.
-    
+
     Some example usage::
-	
+
     cache = LRUCache(32) # new cache
     cache['foo'] = get_file_contents('foo') # or whatever
-	
+
     if 'foo' in cache: # if it's still in cache...
 	    # use cached version
         contents = cache['foo']
         cache['foo'] = contents
 
     print cache.size # Maximum size
-	
+
     print len(cache) # 0 <= len(cache) <= cache.size
-	
+
     cache.size = 10 # Auto-shrink on size assignment
-	
+
     for i in range(50): # note: larger than cache size
         cache[i] = i
-	    
+
     if 0 not in cache: print 'Zero was discarded.'
 
     if 42 in cache:
     for j in cache:   # iterate (in LRU order)
         print j, cache[j] # iterator produces keys, not values
     """
-    
+
+    @total_ordering
     class __Node(object):
         """Record of a cached value. Not for public consumption."""
-        
+
         def __init__(self, key, obj, timestamp):
             object.__init__(self)
             self.key = key
             self.obj = obj
             self.atime = timestamp
             self.mtime = self.atime
-	    
-        def __cmp__(self, other):
-            return cmp(self.atime, other.atime)
+
+        def __lt__(self, other):
+            return self.atime < other.atime
+
+        def __eq__(self, other):
+            return self.atime == other.atime
 
         def __repr__(self):
             return "<%s %s => %s (%s)>" % \
-                   (self.__class__, self.key, self.obj, \
+                   (self.__class__, self.key, self.obj,
                     time.asctime(time.localtime(self.atime)))
 
     def __init__(self, size=DEFAULT_SIZE):
             raise ValueError, size
         elif type(size) is not type(0):
             raise TypeError, size
-        object.__init__(self)	
+        object.__init__(self)
         self.__heap = []
         self.__dict = {}
         self.size = size
         """Maximum size of the cache.
         If more than 'size' elements are added to the cache,
         the least-recently-used ones will be discarded."""
-	
+
     def __len__(self):
         return len(self.__heap)
-    
+
     def __contains__(self, key):
         return self.__dict.has_key(key)
-    
+
     def __setitem__(self, key, obj):
         if self.__dict.has_key(key):
             node = self.__dict[key]
             node = self.__Node(key, obj, time.time())
             self.__dict[key] = node
             heappush(self.__heap, node)
-	
+
     def __getitem__(self, key):
         if not self.__dict.has_key(key):
             raise CacheKeyError(key)
             node.atime = time.time()
             heapify(self.__heap)
             return node.obj
-	
+
     def __delitem__(self, key):
         if not self.__dict.has_key(key):
             raise CacheKeyError(key)
             while len(self.__heap) > value:
                 lru = heappop(self.__heap)
                 del self.__dict[lru.key]
-	    
+
     def __repr__(self):
         return "<%s (%d elements)>" % (str(self.__class__), len(self.__heap))