Commits

Mike Bayer  committed 17f2860

- this is 0.5.0
- convert get_multi return value to a list
- temporarily replace Redis lock with the old one, need to test more

  • Participants
  • Parent commits 4f4a5be

Comments (0)

Files changed (6)

File dogpile/cache/backends/file.py

             return value
 
     def get_multi(self, keys):
-        values = {}
-        for key in keys:
-            values[key] = self.get(key)
-        return values
+        return [self.get(key) for key in keys]
 
     def set(self, key, value):
         with self._dbm_file(True) as dbm:

File dogpile/cache/backends/memcached.py

 
     def get_multi(self, keys):
         values = self.client.get_multi(keys)
-        for key in keys:
-            if not key in values or values[key] is None:
-                values[key] = NO_VALUE
-        return values
+        return [
+            NO_VALUE if key not in values
+            else values[key] for key in keys
+        ]
 
     def set(self, key, value):
         self.client.set(key,

File dogpile/cache/backends/memory.py

         return self._cache.get(key, NO_VALUE)
 
     def get_multi(self, keys):
-        values = {}
-        for key in keys:
-            values[key] = self._cache.get(key, NO_VALUE)
-        return values
+        return [
+            self._cache.get(key, NO_VALUE)
+            for key in keys
+        ]
 
     def set(self, key, value):
         self._cache[key] = value

File dogpile/cache/backends/redis.py

 from __future__ import absolute_import
 from dogpile.cache.api import CacheBackend, NO_VALUE
 from dogpile.cache.compat import pickle
+import random
+import time
 
 redis = None
 
      When left at False, dogpile will coordinate on a regular
      threading mutex.
 
-    :param lock_timeout: integer, number of seconds after acquiring a lock that
-     Redis should expire it.
+    """
 
-    :param lock_sleep: integer, number of seconds to sleep when failed to
-     acquire a lock.
+    # TODO: when lock works
+    #:param lock_timeout: integer, number of seconds after acquiring a lock that
+    # Redis should expire it.
 
-    """
+    #:param lock_sleep: integer, number of seconds to sleep when failed to
+    # acquire a lock.
 
     def __init__(self, arguments):
         self._imports()
         self.port = arguments.pop('port', 6379)
         self.db = arguments.pop('db', 0)
         self.distributed_lock = arguments.get('distributed_lock', False)
-        self.lock_timeout = arguments.get('lock_timeout', None)
-        self.lock_sleep = arguments.get('lock_sleep', 0.1)
+
+        #self.lock_timeout = arguments.get('lock_timeout', None)
+        #self.lock_sleep = arguments.get('lock_sleep', 0.1)
+
         self.redis_expiration_time = arguments.pop('redis_expiration_time', 0)
         self.client = self._create_client()
 
 
     def get_mutex(self, key):
         if self.distributed_lock:
-            return self.client.lock(u"_lock{}".format(key), self.lock_timeout,
-                                    self.lock_sleep)
+            return RedisLock(lambda: self.client, key)
+
+            # TODO: see if we can use this lock, however it is
+            # deadlocking in unit tests right now
+            # return self.client.lock(u"_lock{}".format(key), self.lock_timeout,
+            #                        self.lock_sleep)
         else:
             return None
 
 
     def get_multi(self, keys):
         values = self.client.mget(keys)
-        values = [pickle.loads(v) if v is not None else NO_VALUE
+        return [pickle.loads(v) if v is not None else NO_VALUE
                   for v in values]
-        return dict(zip(keys, values))
 
     def set(self, key, value):
         if self.redis_expiration_time:
 
     def delete_multi(self, keys):
         self.client.delete(*keys)
+
+
+class RedisLock(object):
+    """Simple distributed lock using Redis.
+
+    This is an adaptation of the memcached lock featured at
+    http://amix.dk/blog/post/19386
+
+    """
+    def __init__(self, client_fn, key):
+        self.client_fn = client_fn
+        self.key = "_lock" + key
+
+    def acquire(self, wait=True):
+        client = self.client_fn()
+        i = 0
+        while True:
+            if client.setnx(self.key, 1):
+                return True
+            elif not wait:
+                return False
+            else:
+                sleep_time = (((i + 1) * random.random()) + 2 ** i) / 2.5
+                time.sleep(sleep_time)
+            if i < 15:
+                i += 1
+
+    def release(self):
+        client = self.client_fn()
+        client.delete(self.key)
+
+

File dogpile/cache/region.py

          to the dogpile system.  May be passed as an integer number
          of seconds, or as a ``datetime.timedelta`` value.
 
-         .. versionadded 0.4.4
+         .. versionadded 0.5.0
             ``expiration_time`` may be optionally passed as a
             ``datetime.timedelta`` value.
 
          in a chain to ultimately wrap the original backend,
          so that custom functionality augmentation can be applied.
 
-         .. versionadded:: 0.4.4
+         .. versionadded:: 0.5.0
 
          .. seealso::
 
     def get_multi(self, keys, expiration_time=None, ignore_expiration=False):
         """Return multiple values from the cache, based on the given keys.
 
-        If the value is not present, the method returns the token
-        ``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from
+        Returns values as a list matching the keys given.
+
+        E.g.::
+
+            values = region.get_multi(["one", "two", "three"])
+
+        To convert values to a dictionary, use ``zip()``::
+
+            keys = ["one", "two", "three"]
+            values = region.get_multi(keys)
+            dictionary = dict(zip(keys, values))
+
+        Keys which aren't present in the list are returned as
+        the ``NO_VALUE`` token.  ``NO_VALUE`` evaluates to False,
+        but is separate from
         ``None`` to distinguish between a cached value of ``None``.
 
         By default, the configured expiration time of the
         token is returned.  Passing the flag ``ignore_expiration=True``
         bypasses the expiration time check.
 
-        .. versionadded:: 0.4.4
+        .. versionadded:: 0.5.0
 
         """
         if self.key_mangler:
             keys = map(lambda key: self.key_mangler(key), keys)
-        values = {}
+
         backend_values = self.backend.get_multi(keys)
 
         _unexpired_value_fn = self._unexpired_value_fn(
                             expiration_time, ignore_expiration)
-        values = dict(
-                    (key, value.payload if value is not NO_VALUE else value)
-                    for key, value in
+        return [
+                    value.payload if value is not NO_VALUE else value
+                    for value in
                     (
-                        (key, _unexpired_value_fn(value))
-                        for key, value in backend_values.items()
+                        _unexpired_value_fn(value) for value in
+                        backend_values
                     )
-                )
-        return values
+                ]
 
     def get_or_create(self, key, creator, expiration_time=None,
                                 should_cache_fn=None):
 
         orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys))
 
-        values = self.backend.get_multi(mangled_keys)
+        values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys)))
 
         for orig_key, mangled_key in orig_to_mangled.items():
             with Lock(
     def set_multi(self, mapping):
         """Place new values in the cache under the given keys.
 
-        .. versionadded:: 0.4.4
+        .. versionadded:: 0.5.0
 
         """
 
         This operation is idempotent (can be called multiple times, or on a
         non-existent key, safely)
 
-        .. versionadded:: 0.4.4
+        .. versionadded:: 0.5.0
 
         """
 
          end of the day, week or time period" and "cache until a certain date
          or time passes".
 
-         .. versionchanged:: 0.4.4
+         .. versionchanged:: 0.5.0
             ``expiration_time`` may be passed as a callable to
             :meth:`.CacheRegion.cache_on_arguments`.
 

File tests/cache/_fixtures.py

         reg.set('key2', key2)
         reg.set('key3', key3)
         values = reg.get_multi(['key1', 'key2', 'key3'])
-        eq_(key1, values['key1'])
-        eq_(key2, values['key2'])
-        eq_(key3, values['key3'])
+        eq_(
+            [key1, key2, key3], values
+        )
 
     def test_region_get_nothing_multiple(self):
         reg = self._region()
         values = {'key1': 'value1', 'key3': 'value3', 'key5': 'value5'}
         reg.set_multi(values)
         reg_values = reg.get_multi(['key1', 'key2', 'key3', 'key4', 'key5', 'key6'])
-        eq_(reg_values['key1'], values['key1'])
-        eq_(reg_values['key2'], NO_VALUE)
-        eq_(reg_values['key3'], values['key3'])
-        eq_(reg_values['key4'], NO_VALUE)
-        eq_(reg_values['key5'], values['key5'])
-        eq_(reg_values['key6'], NO_VALUE)
+        eq_(
+            reg_values,
+            ["value1", NO_VALUE, "value3", NO_VALUE,
+                "value5", NO_VALUE
+            ]
+        )
 
     def test_region_delete_multiple(self):
         reg = self._region()
 
         ac = mutex.acquire()
         assert ac
-        ac2 = mutex.acquire(wait=False)
+        ac2 = mutex.acquire(False)
         assert not ac2
         mutex.release()
         ac3 = mutex.acquire()
             m2 = backend.get_mutex("bar")
             try:
                 m1.acquire()
-                assert m2.acquire(wait=False)
-                assert not m2.acquire(wait=False)
+                assert m2.acquire(False)
+                assert not m2.acquire(False)
                 m2.release()
 
-                assert m2.acquire(wait=False)
-                assert not m2.acquire(wait=False)
+                assert m2.acquire(False)
+                assert not m2.acquire(False)
                 m2.release()
             finally:
                 m1.release()