Commits

Antoine Bertin committed 9954516 Merge

Merged zzzeek/dogpile.cache into master

Comments (0)

Files changed (13)

docs/build/changelog.rst

 Changelog
 ==============
 .. changelog::
+    :version: 0.5.3
+
+    .. change::
+        :tags: bug
+        :pullreq: 9
+
+      Fixed a format call in the redis backend which would otherwise fail
+      on Python 2.6; courtesy Jeff Dairiki.
+
+.. changelog::
+    :version: 0.5.2
+    :released: Fri Nov 15 2013
+
+    .. change::
+        :tags: bug
+
+      Fixes to routines on Windows, including that default unit tests pass,
+      and an adjustment to the "soft expiration" feature to ensure the
+      expiration works given windows time.time() behavior.
+
+    .. change::
+        :tags: bug
+
+      Added py2.6 compatibility for unsupported ``total_seconds()`` call
+      in region.py
+
+    .. change::
+        :tags: feature
+        :tickets: 44
+
+      Added a new argument ``lock_factory`` to the :class:`.DBMBackend`
+      implementation.  This allows for drop-in replacement of the default
+      :class:`.FileLock` backend, which builds on ``os.flock()`` and only
+      supports Unix platforms.  A new abstract base :class:`.AbstractFileLock`
+      has been added to provide a common base for custom lock implementations.
+      The documentation points to an example thread-based rw lock which is
+      now tested on Windows.
+
+.. changelog::
     :version: 0.5.1
     :released: Thu Oct 10 2013
 

docs/build/usage.rst

 
     from dogpile.cache import make_region
 
-    region = make_region("dictionary")
+    region = make_region("myregion")
+
+    region.configure("dictionary")
 
     data = region.set("somekey", "somevalue")
 

dogpile/cache/__init__.py

-__version__ = '0.5.1'
+__version__ = '0.5.3'
 
 from .region import CacheRegion, register_backend, make_region

dogpile/cache/backends/file.py

 from dogpile.cache import compat
 from dogpile.cache import util
 import os
-import fcntl
 
-__all__ = 'DBMBackend', 'FileLock'
+__all__ = 'DBMBackend', 'FileLock', 'AbstractFileLock'
 
 class DBMBackend(CacheBackend):
     """A file-backend using a dbm file to store keys.
     concurrent writes, the other is to coordinate
     value creation (i.e. the dogpile lock).  By default,
     these lockfiles use the ``flock()`` system call
-    for locking; this is only available on Unix
-    platforms.
+    for locking; this is **only available on Unix
+    platforms**.   An alternative lock implementation, such as one
+    which is based on threads or uses a third-party system
+    such as `portalocker <https://pypi.python.org/pypi/portalocker>`_,
+    can be dropped in using the ``lock_factory`` argument
+    in conjunction with the :class:`.AbstractFileLock` base class.
 
     Currently, the dogpile lock is against the entire
     DBM file, not per key.   This means there can
      suffix ".dogpile.lock" to the DBM filename. If
      False, then dogpile.cache uses the default dogpile
      lock, a plain thread-based mutex.
+    :param lock_factory: a function or class which provides
+     for a read/write lock.  Defaults to :class:`.FileLock`.
+     Custom implementations need to implement context-manager
+     based ``read()`` and ``write()`` functions - the
+     :class:`.AbstractFileLock` class is provided as a base class
+     which provides these methods based on individual read/write lock
+     functions.  E.g. to replace the lock with the dogpile.core
+     :class:`.ReadWriteMutex`::
+
+        from dogpile.core.readwrite_lock import ReadWriteMutex
+        from dogpile.cache.backends.file import AbstractFileLock
+
+        class MutexLock(AbstractFileLock):
+            def __init__(self, filename):
+                self.mutex = ReadWriteMutex()
+
+            def acquire_read_lock(self, wait):
+                ret = self.mutex.acquire_read_lock(wait)
+                return wait or ret
+
+            def acquire_write_lock(self, wait):
+                ret = self.mutex.acquire_write_lock(wait)
+                return wait or ret
+
+            def release_read_lock(self):
+                return self.mutex.release_read_lock()
+
+            def release_write_lock(self):
+                return self.mutex.release_write_lock()
+
+        from dogpile.cache import make_region
+
+        region = make_region().configure(
+            "dogpile.cache.dbm",
+            expiration_time=300,
+            arguments={
+                "filename": "file.dbm",
+                "lock_factory": MutexLock
+            }
+        )
+
+     While the included :class:`.FileLock` uses ``os.flock()``, a
+     windows-compatible implementation can be built using a library
+     such as `portalocker <https://pypi.python.org/pypi/portalocker>`_.
+
+     .. versionadded:: 0.5.2
+
 
 
     """
                         )
         dir_, filename = os.path.split(self.filename)
 
+        self.lock_factory = arguments.get("lock_factory", FileLock)
         self._rw_lock = self._init_lock(
                                 arguments.get('rw_lockfile'),
                                 ".rw.lock", dir_, filename)
 
     def _init_lock(self, argument, suffix, basedir, basefile, wrapper=None):
         if argument is None:
-            lock = FileLock(os.path.join(basedir, basefile + suffix))
+            lock = self.lock_factory(os.path.join(basedir, basefile + suffix))
         elif argument is not False:
-            lock = FileLock(
+            lock = self.lock_factory(
                         os.path.abspath(
                             os.path.normpath(argument)
                         ))
                 except KeyError:
                     pass
 
-class FileLock(object):
-    """Use lockfiles to coordinate read/write access to a file.
+class AbstractFileLock(object):
+    """Coordinate read/write access to a file.
+
+    typically is a file-based lock but doesn't necessarily have to be.
+
+    The default implementation here is :class:`.FileLock`.
+
+    Implementations should provide the following methods::
+
+        * __init__()
+        * acquire_read_lock()
+        * acquire_write_lock()
+        * release_read_lock()
+        * release_write_lock()
+
+    The ``__init__()`` method accepts a single argument "filename", which
+    may be used as the "lock file", for those implementations that use a lock
+    file.
+
+    Note that multithreaded environments must provide a thread-safe
+    version of this lock.  The recommended approach for file-descriptor-based
+    locks is to use a Python ``threading.local()`` so that a unique file descriptor
+    is held per thread.  See the source code of :class:`.FileLock` for an
+    implementation example.
 
-    Only works on Unix systems, using
-    `fcntl.flock() <http://docs.python.org/library/fcntl.html>`_.
 
     """
 
     def __init__(self, filename):
-        self._filedescriptor = compat.threading.local()
-        self.filename = filename
+        """Constructor, is given the filename of a potential lockfile.
+
+        The usage of this filename is optional and no file is
+        created by default.
+
+        Raises ``NotImplementedError`` by default, must be
+        implemented by subclasses.
+        """
+        raise NotImplementedError()
 
     def acquire(self, wait=True):
+        """Acquire the "write" lock.
+
+        This is a direct call to :meth:`.AbstractFileLock.acquire_write_lock`.
+
+        """
         return self.acquire_write_lock(wait)
 
     def release(self):
-        self.release_write_lock()
+        """Release the "write" lock.
 
-    @property
-    def is_open(self):
-        return hasattr(self._filedescriptor, 'fileno')
+        This is a direct call to :meth:`.AbstractFileLock.release_write_lock`.
+
+        """
+        self.release_write_lock()
 
     @contextmanager
     def read(self):
+        """Provide a context manager for the "read" lock.
+
+        This method makes use of :meth:`.AbstractFileLock.acquire_read_lock`
+        and :meth:`.AbstractFileLock.release_read_lock`
+
+        """
+
         self.acquire_read_lock(True)
         try:
             yield
 
     @contextmanager
     def write(self):
+        """Provide a context manager for the "write" lock.
+
+        This method makes use of :meth:`.AbstractFileLock.acquire_write_lock`
+        and :meth:`.AbstractFileLock.release_write_lock`
+
+        """
+
         self.acquire_write_lock(True)
         try:
             yield
         finally:
             self.release_write_lock()
 
+    @property
+    def is_open(self):
+        """optional method."""
+        raise NotImplementedError()
+
+    def acquire_read_lock(self, wait):
+        """Acquire a 'reader' lock.
+
+        Raises ``NotImplementedError`` by default, must be
+        implemented by subclasses.
+        """
+        raise NotImplementedError()
+
+    def acquire_write_lock(self, wait):
+        """Acquire a 'write' lock.
+
+        Raises ``NotImplementedError`` by default, must be
+        implemented by subclasses.
+        """
+        raise NotImplementedError()
+
+    def release_read_lock(self):
+        """Release a 'reader' lock.
+
+        Raises ``NotImplementedError`` by default, must be
+        implemented by subclasses.
+        """
+        raise NotImplementedError()
+
+    def release_write_lock(self):
+        """Release a 'writer' lock.
+
+        Raises ``NotImplementedError`` by default, must be
+        implemented by subclasses.
+        """
+        raise NotImplementedError()
+
+class FileLock(AbstractFileLock):
+    """Use lockfiles to coordinate read/write access to a file.
+
+    Only works on Unix systems, using
+    `fcntl.flock() <http://docs.python.org/library/fcntl.html>`_.
+
+    """
+
+    def __init__(self, filename):
+        self._filedescriptor = compat.threading.local()
+        self.filename = filename
+
+    @util.memoized_property
+    def _module(self):
+        import fcntl
+        return fcntl
+
+    @property
+    def is_open(self):
+        return hasattr(self._filedescriptor, 'fileno')
+
     def acquire_read_lock(self, wait):
-        return self._acquire(wait, os.O_RDONLY, fcntl.LOCK_SH)
+        return self._acquire(wait, os.O_RDONLY, self._module.LOCK_SH)
 
     def acquire_write_lock(self, wait):
-        return self._acquire(wait, os.O_WRONLY, fcntl.LOCK_EX)
+        return self._acquire(wait, os.O_WRONLY, self._module.LOCK_EX)
 
     def release_read_lock(self):
         self._release()
         fileno = os.open(self.filename, wrflag)
         try:
             if not wait:
-                lockflag |= fcntl.LOCK_NB
-            fcntl.flock(fileno, lockflag)
+                lockflag |= self._module.LOCK_NB
+            self._module.flock(fileno, lockflag)
         except IOError:
             os.close(fileno)
             if not wait:
         except AttributeError:
             return
         else:
-            fcntl.flock(fileno, fcntl.LOCK_UN)
+            self._module.flock(fileno, self._module.LOCK_UN)
             os.close(fileno)
             del self._filedescriptor.fileno

dogpile/cache/backends/redis.py

 
     def get_mutex(self, key):
         if self.distributed_lock:
-            return self.client.lock(u('_lock{}').format(key), self.lock_timeout,
-                                    self.lock_sleep)
+            return self.client.lock(u('_lock{0}').format(key),
+                                    self.lock_timeout, self.lock_sleep)
         else:
             return None
 

dogpile/cache/compat.py

 py2k = sys.version_info < (3, 0)
 py3k = sys.version_info >= (3, 0)
 py32 = sys.version_info >= (3, 2)
+py27 = sys.version_info >= (2, 7)
 jython = sys.platform.startswith('java')
-
+win32 = sys.platform.startswith('win')
 
 try:
     import threading
 if py3k or jython:
     import pickle
 else:
-    import cPickle as pickle
+    import cPickle as pickle
+
+
+def timedelta_total_seconds(td):
+    if py27:
+        return td.total_seconds()
+    else:
+        return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
+
+

dogpile/cache/region.py

         if not expiration_time or isinstance(expiration_time, Number):
             self.expiration_time = expiration_time
         elif isinstance(expiration_time, datetime.timedelta):
-            self.expiration_time = int(expiration_time.total_seconds())
+            self.expiration_time = int(compat.timedelta_total_seconds(expiration_time))
         else:
             raise exception.ValidationError(
                 'expiration_time is not a number or timedelta.')
             ct = value.metadata["ct"]
             if self._soft_invalidated:
                 if ct < self._soft_invalidated:
-                    ct = time.time() - expiration_time
+                    ct = time.time() - expiration_time - .0001
 
             return value.payload, ct
 
                 ct = value.metadata["ct"]
                 if self._soft_invalidated:
                     if ct < self._soft_invalidated:
-                        ct = time.time() - expiration_time
+                        ct = time.time() - expiration_time - .0001
 
                 return value.payload, ct
 
 [upload_docs]
 upload-dir = docs/build/output/html
 
+[wheel]
+universal = 1
+
 [upload]
 sign = 1
 identity = C4DAFEE1

tests/cache/__init__.py

 from nose import SkipTest
 from functools import wraps
 from dogpile.cache import compat
+import time
+
 
 def eq_(a, b, msg=None):
     """Assert a == b, with repr messaging on failure."""
 
 from dogpile.cache.compat import configparser, io
 
+def winsleep():
+    # sleep a for an amount of time
+    # sufficient for windows time.time()
+    # to change
+    if compat.win32:
+        time.sleep(.001)
 
 def requires_py3k(fn):
     @wraps(fn)

tests/cache/test_dbm_backend.py

 import time
 import os
 from nose import SkipTest
+from dogpile.core.readwrite_lock import ReadWriteMutex
+from dogpile.cache.backends.file import AbstractFileLock
 
 try:
     import fcntl
+    has_fcntl = True
 except ImportError:
-    raise SkipTest("fcntl not available")
+    has_fcntl = False
 
-class DBMBackendTest(_GenericBackendTest):
-    backend = "dogpile.cache.dbm"
+class MutexLock(AbstractFileLock):
+    def __init__(self, filename):
+        self.mutex = ReadWriteMutex()
 
-    config_args = {
-        "arguments":{
-            "filename":"test.dbm"
+    def acquire_read_lock(self, wait):
+        ret = self.mutex.acquire_read_lock(wait)
+        return wait or ret
+
+    def acquire_write_lock(self, wait):
+        ret = self.mutex.acquire_write_lock(wait)
+        return wait or ret
+
+    def release_read_lock(self):
+        return self.mutex.release_read_lock()
+
+    def release_write_lock(self):
+        return self.mutex.release_write_lock()
+
+if has_fcntl:
+    class DBMBackendTest(_GenericBackendTest):
+        backend = "dogpile.cache.dbm"
+
+        config_args = {
+            "arguments": {
+                "filename": "test.dbm"
+            }
         }
-    }
 
-class DBMBackendNoLockTest(_GenericBackendTest):
+class DBMBackendConditionTest(_GenericBackendTest):
     backend = "dogpile.cache.dbm"
 
     config_args = {
-        "arguments":{
-            "filename":"test.dbm",
-            "rw_lockfile":False,
-            "dogpile_lockfile":False,
+        "arguments": {
+            "filename": "test.dbm",
+            "lock_factory": MutexLock
         }
     }
 
 
-class DBMMutexTest(_GenericMutexTest):
+class DBMBackendNoLockTest(_GenericBackendTest):
     backend = "dogpile.cache.dbm"
 
     config_args = {
-        "arguments":{
-            "filename":"test.dbm"
+        "arguments": {
+            "filename": "test.dbm",
+            "rw_lockfile": False,
+            "dogpile_lockfile": False,
         }
     }
 
+
+class _DBMMutexTest(_GenericMutexTest):
+    backend = "dogpile.cache.dbm"
+
     def test_release_assertion_thread(self):
         backend = self._backend()
         m1 = backend.get_mutex("foo")
         finally:
             m1.release()
 
+if has_fcntl:
+    class DBMMutexFileTest(_DBMMutexTest):
+        config_args = {
+            "arguments": {
+                "filename": "test.dbm"
+            }
+        }
+
+
+class DBMMutexConditionTest(_DBMMutexTest):
+    config_args = {
+        "arguments": {
+            "filename": "test.dbm",
+            "lock_factory": MutexLock
+        }
+    }
+
 
 def teardown():
     for fname in os.listdir(os.curdir):

tests/cache/test_decorator.py

 #! coding: utf-8
 
 from ._fixtures import _GenericBackendFixture
-from . import eq_, requires_py3k
+from . import eq_, requires_py3k, winsleep
 from unittest import TestCase
 import time
 from dogpile.cache import util, compat
     def test_decorator_expire_callable_zero(self):
         go = self._fixture(expiration_time=lambda: 0)
         eq_(go(1, 2), (1, 1, 2))
+        winsleep()
         eq_(go(1, 2), (2, 1, 2))
+        winsleep()
         eq_(go(1, 2), (3, 1, 2))
 
     def test_explicit_expire(self):

tests/cache/test_memcached_backend.py

 from ._fixtures import _GenericBackendTest, _GenericMutexTest
-from . import eq_
+from . import eq_, winsleep
 from unittest import TestCase
 from threading import Thread
 import time
 from nose import SkipTest
+from dogpile.cache import compat
+
 
 class _TestMemcachedConn(object):
     @classmethod
 
 class _NonDistributedMemcachedTest(_TestMemcachedConn, _GenericBackendTest):
     region_args = {
-        "key_mangler":lambda x: x.replace(" ", "_")
+        "key_mangler": lambda x: x.replace(" ", "_")
     }
     config_args = {
-        "arguments":{
-            "url":"127.0.0.1:11211"
+        "arguments": {
+            "url": "127.0.0.1:11211"
         }
     }
 
 class _DistributedMemcachedTest(_TestMemcachedConn, _GenericBackendTest):
     region_args = {
-        "key_mangler":lambda x: x.replace(" ", "_")
+        "key_mangler": lambda x: x.replace(" ", "_")
     }
     config_args = {
-        "arguments":{
-            "url":"127.0.0.1:11211",
-            "distributed_lock":True
+        "arguments": {
+            "url": "127.0.0.1:11211",
+            "distributed_lock": True
         }
     }
 
 class _DistributedMemcachedMutexTest(_TestMemcachedConn, _GenericMutexTest):
     config_args = {
-        "arguments":{
-            "url":"127.0.0.1:11211",
-            "distributed_lock":True
+        "arguments": {
+            "url": "127.0.0.1:11211",
+            "distributed_lock": True
         }
     }
 
 
 class PylibmcArgsTest(TestCase):
     def test_binary_flag(self):
-        backend = MockPylibmcBackend(arguments={'url':'foo','binary':True})
+        backend = MockPylibmcBackend(arguments={'url': 'foo','binary': True})
         eq_(backend._create_client().kw["binary"], True)
 
     def test_url_list(self):
-        backend = MockPylibmcBackend(arguments={'url':["a", "b", "c"]})
+        backend = MockPylibmcBackend(arguments={'url': ["a", "b", "c"]})
         eq_(backend._create_client().arg[0], ["a", "b", "c"])
 
     def test_url_scalar(self):
-        backend = MockPylibmcBackend(arguments={'url':"foo"})
+        backend = MockPylibmcBackend(arguments={'url': "foo"})
         eq_(backend._create_client().arg[0], ["foo"])
 
     def test_behaviors(self):
-        backend = MockPylibmcBackend(arguments={'url':"foo",
-                                    "behaviors":{"q":"p"}})
+        backend = MockPylibmcBackend(arguments={'url': "foo",
+                                    "behaviors": {"q": "p"}})
         eq_(backend._create_client().kw["behaviors"], {"q": "p"})
 
     def test_set_time(self):
-        backend = MockPylibmcBackend(arguments={'url':"foo",
-                                "memcached_expire_time":20})
+        backend = MockPylibmcBackend(arguments={'url': "foo",
+                                "memcached_expire_time": 20})
         backend.set("foo", "bar")
-        eq_(backend._clients.memcached.canary, [{"time":20}])
+        eq_(backend._clients.memcached.canary, [{"time": 20}])
 
     def test_set_min_compress_len(self):
-        backend = MockPylibmcBackend(arguments={'url':"foo",
-                                "min_compress_len":20})
+        backend = MockPylibmcBackend(arguments={'url': "foo",
+                                "min_compress_len": 20})
         backend.set("foo", "bar")
-        eq_(backend._clients.memcached.canary, [{"min_compress_len":20}])
+        eq_(backend._clients.memcached.canary, [{"min_compress_len": 20}])
 
     def test_no_set_args(self):
-        backend = MockPylibmcBackend(arguments={'url':"foo"})
+        backend = MockPylibmcBackend(arguments={'url': "foo"})
         backend.set("foo", "bar")
         eq_(backend._clients.memcached.canary, [{}])
 
 class MemcachedArgstest(TestCase):
     def test_set_time(self):
-        backend = MockMemcacheBackend(arguments={'url':"foo",
-                                "memcached_expire_time":20})
+        backend = MockMemcacheBackend(arguments={'url': "foo",
+                                "memcached_expire_time": 20})
         backend.set("foo", "bar")
-        eq_(backend._clients.memcached.canary, [{"time":20}])
+        eq_(backend._clients.memcached.canary, [{"time": 20}])
 
     def test_set_min_compress_len(self):
-        backend = MockMemcacheBackend(arguments={'url':"foo",
-                                "min_compress_len":20})
+        backend = MockMemcacheBackend(arguments={'url': "foo",
+                                "min_compress_len": 20})
         backend.set("foo", "bar")
-        eq_(backend._clients.memcached.canary, [{"min_compress_len":20}])
+        eq_(backend._clients.memcached.canary, [{"min_compress_len": 20}])
 
 
 class LocalThreadTest(TestCase):
         for t in threads:
             t.join()
         eq_(canary, [i + 1 for i in range(count)])
-        eq_(MockClient.number_of_clients, 0)
+
+        if compat.py27:
+            eq_(MockClient.number_of_clients, 0)
+        else:
+            eq_(MockClient.number_of_clients, 1)
 
 

tests/cache/test_region.py

 from dogpile.cache import exception
 from dogpile.cache import make_region, register_backend, CacheRegion, util
 from dogpile.cache.proxy import ProxyBackend
-from . import eq_, is_, assert_raises_message, io, configparser
+from . import eq_, is_, assert_raises_message, io, configparser, winsleep
 import time, datetime
 import itertools
 from collections import defaultdict
     def test_hard_invalidate_get(self):
         reg = self._region()
         reg.set("some key", "some value")
+        time.sleep(.1)
         reg.invalidate()
         is_(reg.get("some key"), NO_VALUE)
 
         eq_(reg.get_or_create("some key", creator),
                     "some value 1")
 
+        time.sleep(.1)
         reg.invalidate()
         eq_(reg.get_or_create("some key", creator),
                     "some value 2")
     def test_soft_invalidate_get(self):
         reg = self._region(config_args={"expiration_time": 1})
         reg.set("some key", "some value")
+        time.sleep(.1)
         reg.invalidate(hard=False)
         is_(reg.get("some key"), NO_VALUE)
 
         eq_(reg.get_or_create("some key", creator),
                     "some value 1")
 
+        time.sleep(.1)
         reg.invalidate(hard=False)
         eq_(reg.get_or_create("some key", creator),
                     "some value 2")
         ret = reg.get_or_create_multi(
                     [1, 2], creator)
         eq_(ret, [1, 1])
+        time.sleep(.1)
         reg.invalidate(hard=False)
         ret = reg.get_or_create_multi(
                     [1, 2], creator)
                     should_cache_fn=should_cache_fn)
         eq_(ret, 1)
         eq_(reg.backend._cache['some key'][0], 1)
+        time.sleep(.1)
         reg.invalidate()
         ret = reg.get_or_create(
                     "some key", creator,
                     should_cache_fn=should_cache_fn)
         eq_(ret, [1, 1])
         eq_(reg.backend._cache[1][0], 1)
+        time.sleep(.1)
         reg.invalidate()
         ret = reg.get_or_create_multi(
                     [1, 2], creator,
                     should_cache_fn=should_cache_fn)
         eq_(ret, [2, 2])
         eq_(reg.backend._cache[1][0], 1)
+        time.sleep(.1)
         reg.invalidate()
         ret = reg.get_or_create_multi(
                     [1, 2], creator,