Commits

Hadrien David  committed 5095980 Merge

Merge

  • Participants
  • Parent commits aef9952, 80543db

Comments (0)

Files changed (30)

 50648927fc21fb092f2b7960d581b391f589ab92 v0.4.0
 7e6e50b0ab5ce8a1427a011a5197684148e5b709 v0.4.1
 279d4b8ab3f697ed3c79f80060a46c6a9344e0e8 v1.0.0
+cc79696d60b5cfccdb756d7f01e22428845ba191 v1.0.1
+cc79696d60b5cfccdb756d7f01e22428845ba191 v1.0.1
+9faec7ac59875c36f67f16519419b574ee022e6d v1.0.1
+a890129d14fe2cc7737e5cb3a5269ea306ead464 v1.0.2
+=============
+ddbmock 1.0.2
+=============
+
+This section documents all user visible changes included between ddbmock
+version 1.0.0 and version 1.0.2.
+
+Fixes
+-----
+
+- Fixed issues #10, #11, #13 and #15. Thanks to Lance Linder, Michael Hart
+and James O'Beirne for the pull requests.
+
+
 =============
 ddbmock 1.0.0
 =============
 ::
 
     $ hg clone ssh://hg@bitbucket.org/Ludia/dynamodb-mock
-    $ pip install nose nosexcover coverage mock webtests boto
+    $ pip install nose nosexcover coverage mock webtest boto
     $ python setup.py develop
     $ nosetests # --no-skip to run boto integration tests too
 

File ddbmock/config.py

 # boolean: enable timers ?
 ENABLE_DELAYS = True
 # seconds: simulate table creation delay. It will still be available immediately
-DELAY_CREATING = 1000
+DELAY_CREATING = 60
 # seconds: simulate table update delay. It will still be available immediately
-DELAY_UPDATING = 1000
+DELAY_UPDATING = 60
 # seconds: simulate table deletion delay. It will still be available until time is exhauted
-DELAY_DELETING = 1000
+DELAY_DELETING = 60
 
 ### Throughput statistics ###
 

File ddbmock/database/comparison.py

     if itype != rtype: raise TypeError(TYPEMSG2.format(itype, rtype, __name__))
     return ivalue == rvalue
 
+def ne(target, rule):
+    # None (missing attr) considered not equal to anything.
+    if target is None: return True
+    itype, ivalue = _parse_elem(target)
+    rtype, rvalue = _parse_elem(rule)
+    if itype != rtype: raise TypeError(TYPEMSG2.format(itype, rtype, __name__))
+    return ivalue != rvalue
+
 def le(target, rule):
     if target is None: return False
     itype, ivalue = _parse_elem(target)

File ddbmock/database/storage/sqlite.py

 # -*- coding: utf-8 -*-
 
-from ..item import Item
 from ddbmock import config
-import sqlite3, cPickle as pickle
+from multiprocessing import Lock
+import sqlite3
+import cPickle as pickle
 
 # I know, using global "variable" for this kind of state *is* bad. But it helps
 # keeping execution times to a sane value. In particular, this allows to use
 # in-memory version of sqlite
-conn = sqlite3.connect(config.STORAGE_SQLITE_FILE)
+conn = sqlite3.connect(config.STORAGE_SQLITE_FILE, check_same_thread=False)
+conn_lock = Lock()
+
 
 class Store(object):
+
     def __init__(self, name):
         """
         Initialize the sqlite store
 
         :param name: Table name.
         """
-        conn.execute('''CREATE TABLE IF NOT EXISTS `{}` (
-          `hash_key` blob NOT NULL,
-          `range_key` blob NOT NULL,
-          `data` blob NOT NULL,
-          PRIMARY KEY (`hash_key`,`range_key`)
-        );'''.format(name))
-        conn.commit()
+        with conn_lock:
+            conn.execute('''CREATE TABLE IF NOT EXISTS `{}` (
+            `hash_key` blob NOT NULL,
+            `range_key` blob NOT NULL,
+            `data` blob NOT NULL,
+            PRIMARY KEY (`hash_key`,`range_key`)
+            );'''.format(name))
+            conn.commit()
 
         self.name = name
 
         """
         Perform a full table cleanup. Might be a good idea in tests :)
         """
-        conn.execute('DELETE FROM `{}`'.format(self.name))
-        conn.commit()
+        with conn_lock:
+            conn.execute('DELETE FROM `{}`'.format(self.name))
+            conn.commit()
 
     def _get_by_hash_range(self, hash_key, range_key):
-        request = conn.execute('''SELECT `data` FROM `{}`
-                                    WHERE `hash_key`=? AND `range_key`=?'''
-                                    .format(self.name),
-                                    (hash_key, range_key))
-        item = request.fetchone()
+        with conn_lock:
+            request = conn.execute('''SELECT `data` FROM `{}`
+                                WHERE `hash_key`=? AND `range_key`=?'''
+                                .format(self.name),
+                                (hash_key, range_key))
+            item = request.fetchone()
+
         if item is None:
-            raise KeyError("No item found at ({}, {})".format(hash_key, range_key))
+            raise KeyError("No item found at ({}, {})".format(hash_key,
+                                                              range_key))
 
         return pickle.loads(str(item[0]))
 
     def _get_by_hash(self, hash_key):
-        items = conn.execute('''SELECT * FROM `{}`
-                                    WHERE `hash_key`=? '''.format(self.name),
-                                    (hash_key, ))
-        ret = {item[1]:pickle.loads(str(item[2])) for item in items}
+        with conn_lock:
+            items = conn.execute('''SELECT * FROM `{}`
+                                 WHERE `hash_key`=? '''.format(self.name),
+                                 (hash_key, ))
+
+        ret = {item[1]: pickle.loads(str(item[2])) for item in items}
 
         if not ret:
             raise KeyError("No item found at hash_key={}".format(hash_key))
+
         return ret
 
     def __getitem__(self, (hash_key, range_key)):
         """
-        Get item at (``hash_key``, ``range_key``) or the dict at ``hash_key`` if
-        ``range_key``  is None.
+        Get item at (``hash_key``, ``range_key``) or the dict at ``hash_key``
+        if ``range_key``  is None.
 
-        :param key: (``hash_key``, ``range_key``) Tuple. If ``range_key`` is None, all keys under ``hash_key`` are returned
+        :param key: (``hash_key``, ``range_key``) Tuple. If ``range_key`` is
+        None, all keys under ``hash_key`` are returned
         :return: Item or item dict
 
         :raise: KeyError
         :param item: the actual ``Item`` data structure to store
         """
         db_item = buffer(pickle.dumps(item, 2))
-        conn.execute('''INSERT OR REPLACE INTO `{}` (`hash_key`,`range_key`, `data`)
-                     VALUES (?, ?, ?)'''.format(self.name),
-                     (hash_key, range_key, db_item))
-        conn.commit()
+
+        with conn_lock:
+            conn.execute('''INSERT OR REPLACE INTO `{}`
+                         (`hash_key`,`range_key`, `data`)
+                         VALUES (?, ?, ?)'''.format(self.name),
+                         (hash_key, range_key, db_item))
+            conn.commit()
 
     def __delitem__(self, (hash_key, range_key)):
         """
 
         :raises: KeyError if not found
         """
-        conn.execute('DELETE FROM `{}` WHERE `hash_key`=? AND `range_key`=?'
-                          .format(self.name), (hash_key, range_key))
+        with conn_lock:
+            conn.execute('DELETE FROM `{}` WHERE `hash_key`=? AND '
+                         '`range_key`=?'
+                         .format(self.name), (hash_key, range_key))
 
     def __iter__(self):
         """
         Iterate all over the table, abstracting the ``hash_key`` and
         ``range_key`` complexity. Mostly used for ``Scan`` implementation.
         """
-        items = conn.execute('SELECT `data` FROM `{}`'.format(self.name))
+        with conn_lock:
+            items = conn.execute('SELECT `data` FROM `{}`'.format(self.name))
+
         for item in items:
             yield pickle.loads(str(item[0]))
+

File ddbmock/database/table.py

             size = new.get_size()
             if size > config.MAX_ITEM_SIZE:
                 self.store[hash_key, range_key] = old  # roll back
-                raise ValidationException("Items must be smaller than {} bytes. Got {} after applying update".format(config.MAX_ITEM_SIZE, size))
+                raise ValidationException(
+                    "Item size has exceeded the maximum allowed size of {}".format(config.MAX_ITEM_SIZE))
 
         return old, new
 
         item = Item(item)
 
         if item.get_size() > config.MAX_ITEM_SIZE:
-            raise ValidationException("Items must be smaller than {} bytes. Got {}".format(config.MAX_ITEM_SIZE, item.get_size()))
+            raise ValidationException(
+                "Item size has exceeded the maximum allowed size of {}".format(config.MAX_ITEM_SIZE))
 
         hash_key = item.read_key(self.hash_key, max_size=config.MAX_HK_SIZE)
         range_key = item.read_key(self.range_key, max_size=config.MAX_RK_SIZE)
         if start and start['HashKeyElement'] != hash_key:
             raise ValidationException("'HashKeyElement' element of 'ExclusiveStartKey' must be the same as the hash_key. Expected {}, got {}".format(hash_key, start['HashKeyElement']))
 
-        data = self.store[hash_value, None]
+        try:
+            data = self.store[hash_value, None]
+        except KeyError:
+            # fix #9: return empty result set if first key does not exist
+            return Results(results, size, lek, -1)
 
         keys = sorted(data.keys())
 

File ddbmock/utils/stat.py

 
 
 def average(data):
-    return sum(data)/len(data)
+    return sum(data) / len(data)
+
 
 class Stat(object):
     def __init__(self, name, resolution_interval=1, aggregation_interval=5*60, logger=null_logger):
         :param aggregation_interval: aggregates data on this period. Must be bigger than ``resolution_interval``
         """
 
+        # Keep a reference to global functions to avoid them going out of scope
+        # in atexit.
+        self._time = time
+        self._average = average
+
         # Load params
         self.name=name
         self.resolution_interval = resolution_interval
         self.log = logger
 
         # Set internal state
-        self.current_point_time = int(time())
+        self.current_point_time = int(self._time())
         self.current_point_list = []
         self.current_point_value = 0
         self.last_aggregation_time = self.current_point_time
         points = self.current_point_list
 
         interval = (self.current_point_time - self.last_aggregation_time) / 60.0
-        self.log.info("%s: interval=%s min=%s max=%s average=%s",
-                       self.name,
-                       round(interval),
-                       min(points),
-                       max(points),
-                       average(points))
+
+        if points:
+            self.log.info("%s: interval=%s min=%s max=%s average=%s",
+                        self.name,
+                        round(interval),
+                        min(points),
+                        max(points),
+                        self._average(points))
 
         #reset
         self.current_point_list = []
-        self.last_aggregation_time = int(time())
+        self.last_aggregation_time = int(self._time())
 
     def _aggregate(self):
         """Trigger aggregation and reset current data"""
 
         # reset
         self.current_point_value = 0
-        self.current_point_time = int(time())
+        self.current_point_time = int(self._time())
 
 
     def push(self, value):
 
         :param value: value to push
         """
-        current_time = int(time())
+        current_time = int(self._time())
 
         # aggregate ?
         if self.current_point_time + self.current_point_time <= current_time:

File ddbmock/validators/__init__.py

         validate = Schema(schema, required=True)
         return validate(post)
     except Invalid as e:
-        raise ValidationException(str(e.errors))
+        raise ValidationException(str(e))

File ddbmock/validators/types.py

 # Conditions supported by Scan
 scan_condition = Any(
     {
-        u"ComparisonOperator": Any(u"EQ", u"GT", u"GE", u"LT", u"LE"),
+        u"ComparisonOperator": Any(u"EQ", u"NE", u"GT", u"GE", u"LT", u"LE"),
         u"AttributeValueList": single_str_num_bin_list,
     },{
         u"ComparisonOperator": u"BETWEEN",

File docs/pages/extending.rst

 ::
 
     $ hg clone ssh://hg@bitbucket.org/Ludia/dynamodb-mock
-    $ pip install nose nosexcover coverage mock webtests boto
+    $ pip install nose nosexcover coverage mock webtest boto
     $ python setup.py develop
     $ nosetests # --no-skip to run boto integration tests too
 
 [metadata]
 name = ddbmock
-version = 1.0.0
+version = 1.0.2
 summary = Amazon DynamoDB mock implementation
 description-file = README.rst
 author = Jean-Tiare Le Bigot
 cover-erase = 1
 verbosity = 3
 with-id = 1
+with-yanc = 1

File tests/functional/boto/test_put_item.py

 
         db = connect_boto_patch()
 
-        self.assertRaisesRegexp(DynamoDBValidationError, 'Items.*smaller',
+        self.assertRaisesRegexp(DynamoDBValidationError, 'Item size.*exceeded',
             db.layer1.put_item,
             TABLE_NAME2, ITEM_HUGE)
 

File tests/functional/boto/test_query.py

 TABLE_RK_TYPE = u'S'
 
 HK_VALUE = u'123'
+HK_VALUE_404 = u'404'
 RK_VALUE1 = u'Waldo-1'
 RK_VALUE2 = u'Waldo-2'
 RK_VALUE3 = u'Waldo-3'
         ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE})
         self.assertEqual(expected, ret)
 
+    # Regression test for #9
+    def test_query_all_404(self):
+        from ddbmock import connect_boto_patch
+        from ddbmock.database.db import dynamodb
+
+        expected = {
+            u"Count": 0,
+            u'Items': [],
+            u"ConsumedCapacityUnits": 0.5,
+        }
+
+        db = connect_boto_patch()
+
+        ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE_404})
+        self.assertEqual(expected, ret)
+
     def test_query_2_first(self):
         from ddbmock import connect_boto_patch
         from ddbmock.database.db import dynamodb

File tests/functional/boto/test_update_item.py

         }
 
         # PUT explicite, existing field
-        self.assertRaisesRegexp(DynamoDBValidationError, 'Items.*smaller.*update',
+        self.assertRaisesRegexp(DynamoDBValidationError, 'Item size.*exceeded',
         db.layer1.update_item, TABLE_NAME2, key, {
             'relevant_data': {'Value': RELEVANT_HUGE_FIELD},
         })

File tests/functional/pyramid/test_batch_get_item.py

-# -*- coding: utf-8 -*-
 import json
 import unittest
 
-
 TABLE_NAME1 = 'Table-HR'
 TABLE_NAME2 = 'Table-H'
 TABLE_NAME_404 = 'Waldo'

File tests/functional/pyramid/test_batch_write_item.py

File contents unchanged.

File tests/functional/pyramid/test_create_table.py

File contents unchanged.

File tests/functional/pyramid/test_delete_item.py

File contents unchanged.

File tests/functional/pyramid/test_delete_table.py

File contents unchanged.

File tests/functional/pyramid/test_describe_table.py

File contents unchanged.

File tests/functional/pyramid/test_get_item.py

File contents unchanged.

File tests/functional/pyramid/test_list_tables.py

File contents unchanged.

File tests/functional/pyramid/test_put_item.py

File contents unchanged.

File tests/functional/pyramid/test_query.py

 TABLE_RK_TYPE = u'S'
 
 HK_VALUE = u'123'
+HK_VALUE_404 = u'404'
 RK_VALUE1 = u'Waldo-1'
 RK_VALUE2 = u'Waldo-2'
 RK_VALUE3 = u'Waldo-3'
         self.assertEqual(expected, json.loads(res.body))
         self.assertEqual('application/x-amz-json-1.0; charset=UTF-8',
                          res.headers['Content-Type'])
+
+    # Regression test for #9
+    def test_query_all_404(self):
+        request = {
+            "TableName": TABLE_NAME,
+            "HashKeyValue": {TABLE_HK_TYPE: HK_VALUE_404},
+        }
+
+        expected = {
+            u"Count": 0,
+            u'Items': [],
+            u"ConsumedCapacityUnits": 0.5,
+        }
+
+        # Protocol check
+        res = self.app.post_json('/', request, headers=HEADERS, status=200)
+        self.assertEqual(expected, json.loads(res.body))
+        self.assertEqual('application/x-amz-json-1.0; charset=UTF-8',
+                         res.headers['Content-Type'])

File tests/functional/pyramid/test_scan.py

File contents unchanged.

File tests/functional/pyramid/test_update_item.py

File contents unchanged.

File tests/functional/pyramid/test_update_table.py

File contents unchanged.

File tests/unit/test_database_storage_sqlite.py

 # -*- coding: utf-8 -*-
 
-import unittest, mock
-import sqlite3, cPickle as pickle
+import unittest
+import cPickle as pickle
+import multiprocessing
+from itertools import izip
 from ddbmock import config
 
 config.STORAGE_SQLITE_FILE = ':memory:'
 TABLE_NAME = 'test_table'
 
-ITEM1 = {"key":"value 1"}
-ITEM2 = {"key":"value 2"}
-ITEM3 = {"key":"value 3"}
-ITEM4 = {"key":"value 4"}
+ITEM1 = {"key": "value 1"}
+ITEM2 = {"key": "value 2"}
+ITEM3 = {"key": "value 3"}
+ITEM4 = {"key": "value 4"}
 
-ITEM5 = {"key":"value 5"}
-ITEM6 = {"key":"value 6"}
+ITEM5 = {"key": "value 5"}
+ITEM6 = {"key": "value 6"}
+
 
 class TestSQLiteStore(unittest.TestCase):
     def setUp(self):
         self.conn.execute('DROP TABLE `test_table`')
         self.conn.commit()
 
+    def test_multiprocessing(self):
+        """
+        Test that multiple processes can bang on sqlite without issue.
+
+        """
+        NUM_PROCS = 10
+
+        #: credit where credit is due:
+        #:  http://stackoverflow.com/a/5792404/1611953
+        #:
+        #: the two functions below allow us to target a non-module level
+        #: function with subprocesses
+        #:
+        def spawn(f):
+            def fun(pipe, x):
+                pipe.send(f(x))
+                pipe.close()
+            return fun
+
+        def parmap(f, X):
+            pipe = [multiprocessing.Pipe() for x in X]
+            proc = [multiprocessing.Process(target=spawn(f), args=(c, x))
+                    for x, (p, c) in izip(X, pipe)]
+            [p.start() for p in proc]
+            [p.join() for p in proc]
+            return [p.recv() for (p, c) in pipe]
+
+        def insert(pnum):
+            tup = (pnum + 100, ("process_%d" % pnum), 'val')
+            return self.conn.execute('INSERT INTO `test_table` VALUES '
+                                     '(?, ?, ?)', tup)
+
+        parmap(insert, range(NUM_PROCS))
+
+        # check that all rows have been added
+        for pnum in range(NUM_PROCS):
+            assert self.conn.execute('SELECT * from `test_table` '
+                                     'WHERE `hash_key`=?',
+                                     (pnum + 100,))
+
     def test_truncate(self):
         from ddbmock.database.storage.sqlite import Store
 

File tests/unit/test_item_field_comparison.py

         self.assertFalse(eq({u'S': u'waldo'}, {u'S': u'on-time'}))
         self.assertFalse(eq(None, {u'S': u'on-time'}))
 
+    def test_ne(self):
+        from ddbmock.database.comparison import ne
+
+        self.assertFalse(ne({u'S': u'waldo'}, {u'S': u'waldo'}))
+        self.assertTrue(ne({u'S': u'waldo'}, {u'S': u'on-time'}))
+        self.assertTrue(ne(None, {u'S': u'on-time'}))
+
     def test_le(self):
         from ddbmock.database.comparison import le