codernity avatar codernity committed 10699da

Tons of changes:
- IndexCreator => easier way of creating indexes
- updates in thread safe, super thread safe database (also the gevent one)
- added missing test files (sorry for that...)
- added more tests
- tox configs changes

Comments (0)

Files changed (20)

 .*eproject.cfg
 \#.*\#
 .tox
-.*test_db.*
+coverage.xml
+indexcreator_tests_db
 build/
 htmlcov/
 .*egg-info.*
 dist/
-.coverage/
+.coverage
 .project
-^\.coverage$
 .settings/
+.*\.swp
 htmlcov_.*
+docs/_build/*
 coverage.*\.xml
 junit-.*\.xml
 .*\.orig
Add a comment to this file

CodernityDB/__init__.py

File contents unchanged.

CodernityDB/database.py

 import os
 import io
 from inspect import getsource
-
+from indexcreator import Parser
 
 # for custom indexes
 from CodernityDB.storage import Storage, IU_Storage
     def _add_single_index(self, p, i, index):
         """
         Adds single index to a database.
-        It will use :py:meth:`inspect.getsource` to get class surce.
+        It will use :py:meth:`inspect.getsource` to get class source.
         Then it will build real index file, save it in ``_indexes`` directory.
         """
         code = getsource(index.__class__)
             name = f.readline()[2:].strip()
             _class = f.readline()[2:].strip()
             code = f.read()
-        obj = compile(code, '<Index: %s' % os.path.join(p, ind), 'exec')
-        exec obj in globals()
-        ind_obj = globals()[_class](self.path, name, **ind_kwargs)
-        ind_obj._order = int(ind[:2])
-        return ind_obj
+        try:
+            obj = compile(code, '<Index: %s' % os.path.join(p, ind), 'exec')
+            exec obj in globals()
+            ind_obj = globals()[_class](self.path, name, **ind_kwargs)
+            ind_obj._order = int(ind[:2])
+        except:
+            os.unlink(os.path.join(p, ind))
+            raise
+        else:
+            return ind_obj
 
     def __write_index(self, new_index, number=0, edit=False, ind_kwargs=None):
+        #print new_index
         if ind_kwargs is None:
             ind_kwargs = {}
         p = os.path.join(self.path, '_indexes')
         if isinstance(new_index, basestring) and not new_index.startswith("path:"):
-            name = new_index.splitlines()[0][2:]
-            name = name.strip()
+            if len(new_index.splitlines()) < 4 or new_index.splitlines()[3] != '# inserted automatically':
+                par = Parser()
+                s = par.parse(new_index).splitlines()
+                name = s[0][2:]
+                c = s[1][2:]
+                comented = ['\n\n#SIMPLIFIED CODE']
+                map(lambda x: comented.append("#" + x), new_index.splitlines())
+                comented.append('#SIMPLIFIED CODE END\n\n')
+
+                s = header_for_indexes(
+                    name, c) + "\n".join(s[2:]) + "\n".join(comented)
+                new_index = s
+
+            else:
+                name = new_index.splitlines()[0][2:]
+                name = name.strip()
 
             if name in self.indexes_names and not edit:
                 raise IndexConflict("Already exists")
                 raise PreconditionsException(
                     "Id index must be the first added")
             ind_path = "%.2d%s" % (number, name)
+
             with io.FileIO(os.path.join(p, ind_path + '.py'), 'w') as f:
                 f.write(new_index)
 
             ind_obj = self._read_index_single(p, ind_path + '.py')
+
         elif isinstance(new_index, basestring) and new_index.startswith("path:"):
             path = new_index[5:]
             if not path.endswith('.py'):
 
         :returns: new index name
         """
+
         if ind_kwargs is None:
             ind_kwargs = {}
         p = os.path.join(self.path, '_indexes')
             self.reindex_index(name)
         return name
 
-    def get_index_code(self, index_name):
+    def get_index_code(self, index_name, code_switch='All'):
         """
         It will return full index code from index file.
 
         name = "%.2d%s" % (ind._order, index_name)
         name += '.py'
         with io.FileIO(os.path.join(self.path, '_indexes', name), 'r') as f:
-            return f.read()
+            co = f.read()
+            if code_switch == 'All':
+                return co
+
+            if code_switch == 'S':
+                try:
+                    ind = co.index('#SIMPLIFIED CODE')
+                except ValueError:
+                    return " "
+                else:
+                    s = co[ind:]
+                    l = s.splitlines()[1:-2]
+                    ll = map(lambda x: x[1:], l)
+                    return '\n'.join(ll)
+            if code_switch == 'P':
+                try:
+                    ind = co.index('#SIMPLIFIED CODE')
+                except ValueError:
+                    return co
+                else:
+                    return co[:ind]
+
         return ""  # shouldn't happen
 
     def __set_main_storage(self):
 
     def initialize(self, path=None, makedir=True):
         """
-        Initialize new database (have to be called before open_new)
+        Initialize new database
 
         :param path: Path to a database (allows delayed path configuration), if not provided self.path will be used
         :param makedir: Make the ``_indexes`` directory or not
 
         :returns: the database path
         """
-        if self.opened == True:
+        if self.opened is True:
             raise DatabaseConflict("Already opened")
         if not path:
             path = self.path
             self.initialize(path)
         if not self.path:
             raise PreconditionsException("No path specified")
-        if self.opened == True:
+        if self.opened is True:
             raise DatabaseConflict("Already opened")
         self.__open_new(**kwargs)
         self.__set_main_storage()
 
         :param path: path with database to open
         """
-        if self.opened == True:
+        if self.opened is True:
             raise DatabaseConflict("Already opened")
 #        else:
         if path:
     def compact_index(self, index):
         """
         Compacts index
-        Used for better utilization of index metadata. The deleted documents will be not more in structure.
+        Used for better utilization of index metadata.
+        The deleted documents will be not more in structure.
 
         :param index: the index to destroy
         :type index: :py:class:`CodernityDB.index.Index`` instance, or string
         It's using **reference** on the given data dict object,
         to avoid it copy it before inserting!
 
-        If data **will not** have ``_id`` field, it will be generated (random 32 chars string)
+        If data **will not** have ``_id`` field,
+        it will be generated (random 32 chars string)
 
         :param data: data to insert
         """
         if with_storage and size:
             data = storage.get(start, size, status)
         else:
+
             data = {}
         if with_doc and index_name != 'id':
             doc = self.get('id', l_key, False)
 
     def get_many(self, index_name, key=None, limit=1, offset=0, with_doc=False, with_storage=True, start=None, end=None, **kwargs):
         """
-        Allows to get **multiple** data for given ``key`` for *Hash based indexes*. Also allows get **range** queries for *Tree based indexes* with ``start`` and ``end`` arguments.
+        Allows to get **multiple** data for given ``key`` for *Hash based indexes*.
+        Also allows get **range** queries for *Tree based indexes* with ``start`` and ``end`` arguments.
 
         :param index_name: Index to perform the operation
         :param key: key to look for (has to be ``None`` to use range queries)
 
     def run(self, index_name, target_funct, *args, **kwargs):
         """
-        Allows to execute given function on Database side (important for server mode)
+        Allows to execute given function on Database side
+        (important for server mode)
 
         If ``target_funct==sum`` then given index must have ``run_sum`` method.
 
         for key, value in db_index.__dict__.iteritems():
             if not callable(value):  # not using inspect etc...
                 props[key] = value
+
         return props
 
     def get_db_details(self):

CodernityDB/database_gevent.py

 from gevent.coros import RLock
 
 from CodernityDB.env import cdb_environment
-from CodernityDB.database import PreconditionsException
 
 cdb_environment['mode'] = "gevent"
 cdb_environment['rlock_obj'] = RLock
 
 
-from CodernityDB.database import Database
+#from CodernityDB.database import Database
+from CodernityDB.database_safe_shared import SafeDatabase
 
 
-class GeventDatabase(Database):
-    """
-    A database that works with Gevent
-    """
+class GeventDatabase(SafeDatabase):
+    pass
 
-    def __init__(self, path):
-        super(GeventDatabase, self).__init__(path)
-        self.main_lock = RLock()
-        self.close_open_lock = RLock()
-        self.indexes_locks = {}
+    # """
+    # A database that works with Gevent
+    # """
 
-    def initialize(self, *args, **kwargs):
-        res = None
-        try:
-            self.close_open_lock.acquire()
-            res = super(GeventDatabase, self).initialize(*args, **kwargs)
-            for name in self.indexes_names.iterkeys():
-                self.indexes_locks[name] = RLock()
-        finally:
-            self.close_open_lock.release()
-        return res
+    # def __init__(self, path):
+    #     super(GeventDatabase, self).__init__(path)
+    #     self.main_lock = RLock()
+    #     self.close_open_lock = RLock()
+    #     self.indexes_locks = {}
 
-    def open(self, *args, **kwargs):
-        res = None
-        try:
-            self.close_open_lock.acquire()
-            res = super(GeventDatabase, self).open(*args, **kwargs)
-            for name in self.indexes_names.iterkeys():
-                self.indexes_locks[name] = RLock()
-        finally:
-            self.close_open_lock.release()
-        return res
+    # def initialize(self, *args, **kwargs):
+    #     res = None
+    #     try:
+    #         self.close_open_lock.acquire()
+    #         res = super(GeventDatabase, self).initialize(*args, **kwargs)
+    #         for name in self.indexes_names.iterkeys():
+    #             self.indexes_locks[name] = RLock()
+    #     finally:
+    #         self.close_open_lock.release()
+    #     return res
 
-    def create(self, *args, **kwargs):
-        res = None
-        try:
-            self.close_open_lock.acquire()
-            res = super(GeventDatabase, self).create(*args, **kwargs)
-            for name in self.indexes_names.iterkeys():
-                self.indexes_locks[name] = RLock()
-        finally:
-            self.close_open_lock.release()
-        return res
+    # def open(self, *args, **kwargs):
+    #     res = None
+    #     try:
+    #         self.close_open_lock.acquire()
+    #         res = super(GeventDatabase, self).open(*args, **kwargs)
+    #         for name in self.indexes_names.iterkeys():
+    #             self.indexes_locks[name] = RLock()
+    #     finally:
+    #         self.close_open_lock.release()
+    #     return res
 
-    def close(self):
-        res = None
-        try:
-            self.close_open_lock.acquire()
-            res = super(GeventDatabase, self).close()
-        finally:
-            self.close_open_lock.release()
-        return res
+    # def create(self, *args, **kwargs):
+    #     res = None
+    #     try:
+    #         self.close_open_lock.acquire()
+    #         res = super(GeventDatabase, self).create(*args, **kwargs)
+    #         for name in self.indexes_names.iterkeys():
+    #             self.indexes_locks[name] = RLock()
+    #     finally:
+    #         self.close_open_lock.release()
+    #     return res
 
-    def destroy(self):
-        res = None
-        try:
-            self.close_open_lock.acquire()
-            res = super(GeventDatabase, self).destroy()
-        finally:
-            self.close_open_lock.release()
-            return res
+    # def close(self):
+    #     res = None
+    #     try:
+    #         self.close_open_lock.acquire()
+    #         res = super(GeventDatabase, self).close()
+    #     finally:
+    #         self.close_open_lock.release()
+    #     return res
 
-    def add_index(self, *args, **kwargs):
-        res = None
-        try:
-            self.main_lock.acquire()
-            res = super(GeventDatabase, self).add_index(*args, **kwargs)
-        finally:
-            if self.opened:
-                self.indexes_locks[res] = RLock()
-            self.main_lock.release()
-        return res
+    # def destroy(self):
+    #     res = None
+    #     try:
+    #         self.close_open_lock.acquire()
+    #         res = super(GeventDatabase, self).destroy()
+    #     finally:
+    #         self.close_open_lock.release()
+    #         return res
 
-    def edit_index(self, *args, **kwargs):
-        res = None
-        try:
-            self.main_lock.acquire()
-            res = super(GeventDatabase, self).edit_index(*args, **kwargs)
-        finally:
-            if self.opened:
-                self.indexes_locks[res] = RLock()
-            self.main_lock.release()
-        return res
+    # def add_index(self, *args, **kwargs):
+    #     res = None
+    #     try:
+    #         self.main_lock.acquire()
+    #         res = super(GeventDatabase, self).add_index(*args, **kwargs)
+    #     finally:
+    #         if self.opened:
+    #             self.indexes_locks[res] = RLock()
+    #         self.main_lock.release()
+    #     return res
 
-    def set_indexes(self, *args, **kwargs):
-        try:
-            self.main_lock.acquire()
-            super(GeventDatabase, self).set_indexes(*args, **kwargs)
-        finally:
-            self.main_lock.release()
+    # def edit_index(self, *args, **kwargs):
+    #     res = None
+    #     try:
+    #         self.main_lock.acquire()
+    #         res = super(GeventDatabase, self).edit_index(*args, **kwargs)
+    #     finally:
+    #         if self.opened:
+    #             self.indexes_locks[res] = RLock()
+    #         self.main_lock.release()
+    #     return res
 
-    def _read_indexes(self, *args, **kwargs):
-        try:
-            self.main_lock.acquire()
-            super(GeventDatabase, self)._read_indexes(*args, **kwargs)
-        finally:
-            self.main_lock.release()
+    # def set_indexes(self, *args, **kwargs):
+    #     try:
+    #         self.main_lock.acquire()
+    #         super(GeventDatabase, self).set_indexes(*args, **kwargs)
+    #     finally:
+    #         self.main_lock.release()
 
-    def insert_id_index(self, *args, **kwargs):
-        lock = self.indexes_locks['id']
-        try:
-            lock.acquire()
-            res = super(GeventDatabase, self).insert_id_index(*args, **kwargs)
-        finally:
-            lock.release()
-        return res
+    # def _read_indexes(self, *args, **kwargs):
+    #     try:
+    #         self.main_lock.acquire()
+    #         super(GeventDatabase, self)._read_indexes(*args, **kwargs)
+    #     finally:
+    #         self.main_lock.release()
 
-    def update_id_index(self, *args, **kwargs):
-        lock = self.indexes_locks['id']
-        res = None
-        try:
-            lock.acquire()
-            res = super(GeventDatabase, self).update_id_index(*args, **kwargs)
-        finally:
-            lock.release()
-        return res
+    # def insert_id_index(self, *args, **kwargs):
+    #     lock = self.indexes_locks['id']
+    #     try:
+    #         lock.acquire()
+    #         res = super(GeventDatabase, self).insert_id_index(*args, **kwargs)
+    #     finally:
+    #         lock.release()
+    #     return res
 
-    def delete_id_index(self, *args, **kwargs):
-        lock = self.indexes_locks['id']
-        res = None
-        try:
-            lock.acquire()
-            res = super(GeventDatabase, self).delete_id_index(*args, **kwargs)
-        finally:
-            lock.release()
-        return res
+    # def update_id_index(self, *args, **kwargs):
+    #     lock = self.indexes_locks['id']
+    #     res = None
+    #     try:
+    #         lock.acquire()
+    #         res = super(GeventDatabase, self).update_id_index(*args, **kwargs)
+    #     finally:
+    #         lock.release()
+    #     return res
 
-    def single_update_index(self, index, *args, **kwargs):
-        lock = self.indexes_locks[index.name]
-        try:
-            lock.acquire()
-            super(GeventDatabase, self).single_update_index(
-                index, *args, **kwargs)
-        finally:
-            lock.release()
+    # def delete_id_index(self, *args, **kwargs):
+    #     lock = self.indexes_locks['id']
+    #     res = None
+    #     try:
+    #         lock.acquire()
+    #         res = super(GeventDatabase, self).delete_id_index(*args, **kwargs)
+    #     finally:
+    #         lock.release()
+    #     return res
 
-    def single_insert_index(self, index, *args, **kwargs):
-        lock = self.indexes_locks[index.name]
-        try:
-            lock.acquire()
-            super(GeventDatabase, self).single_insert_index(
-                index, *args, **kwargs)
-        finally:
-            lock.release()
+    # def single_update_index(self, index, *args, **kwargs):
+    #     lock = self.indexes_locks[index.name]
+    #     try:
+    #         lock.acquire()
+    #         super(GeventDatabase, self).single_update_index(
+    #             index, *args, **kwargs)
+    #     finally:
+    #         lock.release()
 
-    def single_delete_index(self, index, *args, **kwargs):
-        lock = self.indexes_locks[index.name]
-        try:
-            lock.acquire()
-            super(GeventDatabase, self).single_delete_index(
-                index, *args, **kwargs)
-        finally:
-            lock.release()
+    # def single_insert_index(self, index, *args, **kwargs):
+    #     lock = self.indexes_locks[index.name]
+    #     try:
+    #         lock.acquire()
+    #         super(GeventDatabase, self).single_insert_index(
+    #             index, *args, **kwargs)
+    #     finally:
+    #         lock.release()
 
-    def single_compact_index(self, index, *args, **kwargs):
-        lock = self.indexes_locks[index.name]
-        try:
-            lock.acquire()
-            super(GeventDatabase, self).compact_index(index, *args, **kwargs)
-        finally:
-            lock.release()
+    # def single_delete_index(self, index, *args, **kwargs):
+    #     lock = self.indexes_locks[index.name]
+    #     try:
+    #         lock.acquire()
+    #         super(GeventDatabase, self).single_delete_index(
+    #             index, *args, **kwargs)
+    #     finally:
+    #         lock.release()
 
-    def reindex_index(self, index, *args, **kwargs):
-        if isinstance(index, basestring):
-            if not index in self.indexes_names:
-                raise PreconditionsException("No index named %s" % index)
-            index = self.indexes_names[index]
-        key = index.name + "reind"
-        self.main_lock.acquire()
-        if key in self.indexes_locks:
-            lock = self.indexes_locks[index.name + "reind"]
-        else:
-            self.indexes_locks[index.name + "reind"] = RLock()
-            lock = self.indexes_locks[index.name + "reind"]
-        self.main_lock.release()
-        try:
-            lock.acquire()
-            super(GeventDatabase, self).reindex_index(index, *args, **kwargs)
-        finally:
-            lock.release()
+    # def single_compact_index(self, index, *args, **kwargs):
+    #     lock = self.indexes_locks[index.name]
+    #     try:
+    #         lock.acquire()
+    #         super(GeventDatabase, self).compact_index(index, *args, **kwargs)
+    #     finally:
+    #         lock.release()
 
-    def destroy_index(self, index, *args, **kwargs):
-        if isinstance(index, basestring):
-            if not index in self.indexes_names:
-                raise PreconditionsException("No index named %s" % index)
-            index = self.indexes_names[index]
-        lock = self.indexes_locks[index.name]
-        try:
-            lock.acquire()
-            super(GeventDatabase, self).destroy_index(index, *args, **kwargs)
-        finally:
-            lock.release()
+    # def reindex_index(self, index, *args, **kwargs):
+    #     if isinstance(index, basestring):
+    #         if not index in self.indexes_names:
+    #             raise PreconditionsException("No index named %s" % index)
+    #         index = self.indexes_names[index]
+    #     key = index.name + "reind"
+    #     self.main_lock.acquire()
+    #     if key in self.indexes_locks:
+    #         lock = self.indexes_locks[index.name + "reind"]
+    #     else:
+    #         self.indexes_locks[index.name + "reind"] = RLock()
+    #         lock = self.indexes_locks[index.name + "reind"]
+    #     self.main_lock.release()
+    #     try:
+    #         lock.acquire()
+    #         super(GeventDatabase, self).reindex_index(index, *args, **kwargs)
+    #     finally:
+    #         lock.release()
 
-    def flush(self):
-        try:
-            self.main_lock.acquire()
-            super(GeventDatabase, self).flush()
-        finally:
-            self.main_lock.release()
+    # def destroy_index(self, index, *args, **kwargs):
+    #     if isinstance(index, basestring):
+    #         if not index in self.indexes_names:
+    #             raise PreconditionsException("No index named %s" % index)
+    #         index = self.indexes_names[index]
+    #     lock = self.indexes_locks[index.name]
+    #     try:
+    #         lock.acquire()
+    #         super(GeventDatabase, self).destroy_index(index, *args, **kwargs)
+    #     finally:
+    #         lock.release()
 
-    def fsync(self):
-        try:
-            self.main_lock.acquire()
-            super(GeventDatabase, self).fsync()
-        finally:
-            self.main_lock.release()
+    # def flush(self):
+    #     try:
+    #         self.main_lock.acquire()
+    #         super(GeventDatabase, self).flush()
+    #     finally:
+    #         self.main_lock.release()
+
+    # def fsync(self):
+    #     try:
+    #         self.main_lock.acquire()
+    #         super(GeventDatabase, self).fsync()
+    #     finally:
+    #         self.main_lock.release()

CodernityDB/database_safe_shared.py

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011-2012 Codernity (http://codernity.com)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from CodernityDB.env import cdb_environment
+from CodernityDB.database import PreconditionsException
+from database import Database
+
+from collections import defaultdict
+from functools import wraps
+from types import MethodType
+
+
+class th_safe_gen:
+
+    def __init__(self, name, gen, l=None):
+        self.lock = l
+        self.__gen = gen
+        self.name = name
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        with self.lock:
+            return self.__gen.next()
+
+    @staticmethod
+    def wrapper(method, index_name, meth_name, l=None):
+        @wraps(method)
+        def _inner(*args, **kwargs):
+            res = method(*args, **kwargs)
+            return th_safe_gen(index_name + "_" + meth_name, res, l)
+        return _inner
+
+
+def safe_wrapper(method, lock):
+    @wraps(method)
+    def _inner(*args, **kwargs):
+        with lock:
+            return method(*args, **kwargs)
+    return _inner
+
+
+class SafeDatabase(Database):
+
+    def __init__(self, path, *args, **kwargs):
+        super(SafeDatabase, self).__init__(path, *args, **kwargs)
+        self.indexes_locks = defaultdict(lambda: cdb_environment['rlock_obj']())
+        self.close_open_lock = cdb_environment['rlock_obj']()
+        self.main_lock = cdb_environment['rlock_obj']()
+
+    def __patch_index_gens(self, name):
+        ind = self.indexes_names[name]
+        for c in ('all', 'get_many'):
+            m = getattr(ind, c)
+            if getattr(ind, c + "_orig", None):
+                return
+            m_fixed = th_safe_gen.wrapper(m, name, c, self.indexes_locks[name])
+            setattr(ind, c, m_fixed)
+            setattr(ind, c + '_orig', m)
+
+    def __patch_index_methods(self, name):
+        ind = self.indexes_names[name]
+        lock = self.indexes_locks[name]
+        for curr in dir(ind):
+            meth = getattr(ind, curr)
+            if not curr.startswith('_') and isinstance(meth, MethodType):
+                setattr(ind, curr, safe_wrapper(meth, lock))
+        stor = ind.storage
+        for curr in dir(stor):
+            meth = getattr(stor, curr)
+            if not curr.startswith('_') and isinstance(meth, MethodType):
+                setattr(stor, curr, safe_wrapper(meth, lock))
+
+    def __patch_index(self, name):
+        self.__patch_index_methods(name)
+        self.__patch_index_gens(name)
+
+    def initialize(self, *args, **kwargs):
+        with self.close_open_lock:
+            self.close_open_lock.acquire()
+            res = super(SafeDatabase, self).initialize(*args, **kwargs)
+            for name in self.indexes_names.iterkeys():
+                self.indexes_locks[name] = cdb_environment['rlock_obj']()
+            return res
+
+    def open(self, *args, **kwargs):
+        with self.close_open_lock:
+            res = super(SafeDatabase, self).open(*args, **kwargs)
+            for name in self.indexes_names.iterkeys():
+                self.indexes_locks[name] = cdb_environment['rlock_obj']()
+                self.__patch_index(name)
+            return res
+
+    def create(self, *args, **kwargs):
+        with self.close_open_lock:
+            res = super(SafeDatabase, self).create(*args, **kwargs)
+            for name in self.indexes_names.iterkeys():
+                self.indexes_locks[name] = cdb_environment['rlock_obj']()
+                self.__patch_index(name)
+            return res
+
+    def close(self):
+        with self.close_open_lock:
+            return super(SafeDatabase, self).close()
+
+    def destroy(self):
+        with self.close_open_lock:
+            return super(SafeDatabase, self).destroy()
+
+    def add_index(self, *args, **kwargs):
+        with self.main_lock:
+            res = super(SafeDatabase, self).add_index(*args, **kwargs)
+            if self.opened:
+                self.indexes_locks[res] = cdb_environment['rlock_obj']()
+                self.__patch_index(res)
+            return res
+
+    def edit_index(self, *args, **kwargs):
+        with self.main_lock:
+            res = super(SafeDatabase, self).edit_index(*args, **kwargs)
+            if self.opened:
+                self.indexes_locks[res] = cdb_environment['rlock_obj']()
+                self.__patch_index(res)
+            return res
+
+    def set_indexes(self, *args, **kwargs):
+        try:
+            self.main_lock.acquire()
+            super(SafeDatabase, self).set_indexes(*args, **kwargs)
+        finally:
+            self.main_lock.release()
+
+    def reindex_index(self, index, *args, **kwargs):
+        if isinstance(index, basestring):
+            if not index in self.indexes_names:
+                raise PreconditionsException("No index named %s" % index)
+            index = self.indexes_names[index]
+        key = index.name + "reind"
+        self.main_lock.acquire()
+        if key in self.indexes_locks:
+            lock = self.indexes_locks[index.name + "reind"]
+        else:
+            self.indexes_locks[index.name + "reind"] = cdb_environment['rlock_obj']()
+            lock = self.indexes_locks[index.name + "reind"]
+        self.main_lock.release()
+        try:
+            lock.acquire()
+            super(SafeDatabase, self).reindex_index(
+                index, *args, **kwargs)
+        finally:
+            lock.release()
+
+    def flush(self):
+        try:
+            self.main_lock.acquire()
+            super(SafeDatabase, self).flush()
+        finally:
+            self.main_lock.release()
+
+    def fsync(self):
+        try:
+            self.main_lock.acquire()
+            super(SafeDatabase, self).fsync()
+        finally:
+            self.main_lock.release()

CodernityDB/database_super_thread_safe.py

 from functools import wraps
 from types import FunctionType, MethodType
 
+from CodernityDB.database_safe_shared import th_safe_gen
+
 
 class SuperLock(type):
 
         def _inner(*args, **kwargs):
             db = args[0]
             with db.super_lock:
+#                print '=>', f.__name__, repr(args[1:])
                 res = f(*args, **kwargs)
-                if db.opened:
-                    db.flush()
+#                if db.opened:
+#                    db.flush()
+#                print '<=', f.__name__, repr(args[1:])
                 return res
         return _inner
 
 
 
 class SuperThreadSafeDatabase(Database):
+    """
+    Thread safe version that always allows single thread to use db.
+    It adds the same lock for all methods, so only one operation can be
+    performed in given time. Completely different implementation
+    than ThreadSafe version (without super word)
+    """
 
+
+    def __patch_index_gens(self, name):
+        ind = self.indexes_names[name]
+        for c in ('all', 'get_many'):
+            m = getattr(ind, c)
+            if getattr(ind, c + "_orig", None):
+                return
+            m_fixed = th_safe_gen.wrapper(m, name, c, self.super_lock)
+            setattr(ind, c, m_fixed)
+            setattr(ind, c + '_orig', m)
+
+    def open(self, *args, **kwargs):
+        res = super(SuperThreadSafeDatabase, self).open(*args, **kwargs)
+        for name in self.indexes_names.iterkeys():
+            self.__patch_index_gens(name)
+        return res
+
+    def create(self, *args, **kwargs):
+        res = super(SuperThreadSafeDatabase, self).create(*args, **kwargs)
+        for name in self.indexes_names.iterkeys():
+            self.__patch_index_gens(name)
+            return res
+
+    def add_index(self, *args, **kwargs):
+        res = super(SuperThreadSafeDatabase, self).add_index(*args, **kwargs)
+        self.__patch_index_gens(res)
+        return res
+
+    def edit_index(self, *args, **kwargs):
+        res = super(SuperThreadSafeDatabase, self).edit_index(*args, **kwargs)
+        self.__patch_index_gens(res)
+        return res
     __metaclass__ = SuperLock
 
     def __init__(self, *args, **kwargs):

CodernityDB/database_thread_safe.py

 from threading import RLock
 
 from CodernityDB.env import cdb_environment
-from CodernityDB.database import PreconditionsException
 
 cdb_environment['mode'] = "threads"
 cdb_environment['rlock_obj'] = RLock
 
 
-from database import Database
+from database_safe_shared import SafeDatabase
 
 
-class ThreadSafeDatabase(Database):
-
-    def __init__(self, path):
-        super(ThreadSafeDatabase, self).__init__(path)
-        self.main_lock = RLock()
-        self.close_open_lock = RLock()
-        self.indexes_locks = {}
-
-    def initialize(self, *args, **kwargs):
-        res = None
-        try:
-            self.close_open_lock.acquire()
-            res = super(ThreadSafeDatabase, self).initialize(*args, **kwargs)
-            for name in self.indexes_names.iterkeys():
-                self.indexes_locks[name] = RLock()
-        finally:
-            self.close_open_lock.release()
-        return res
-
-    def open(self, *args, **kwargs):
-        res = None
-        try:
-            self.close_open_lock.acquire()
-            res = super(ThreadSafeDatabase, self).open(*args, **kwargs)
-            for name in self.indexes_names.iterkeys():
-                self.indexes_locks[name] = RLock()
-        finally:
-            self.close_open_lock.release()
-        return res
-
-    def create(self, *args, **kwargs):
-        res = None
-        try:
-            self.close_open_lock.acquire()
-            res = super(ThreadSafeDatabase, self).create(*args, **kwargs)
-            for name in self.indexes_names.iterkeys():
-                self.indexes_locks[name] = RLock()
-        finally:
-            self.close_open_lock.release()
-        return res
-
-    def close(self):
-        res = None
-        try:
-            self.close_open_lock.acquire()
-            res = super(ThreadSafeDatabase, self).close()
-        finally:
-            self.close_open_lock.release()
-        return res
-
-    def destroy(self):
-        res = None
-        try:
-            self.close_open_lock.acquire()
-            res = super(ThreadSafeDatabase, self).destroy()
-        finally:
-            self.close_open_lock.release()
-            return res
-
-    def add_index(self, *args, **kwargs):
-        res = None
-        try:
-            self.main_lock.acquire()
-            res = super(ThreadSafeDatabase, self).add_index(*args, **kwargs)
-        finally:
-            if self.opened:
-                self.indexes_locks[res] = RLock()
-            self.main_lock.release()
-        return res
-
-    def edit_index(self, *args, **kwargs):
-        res = None
-        try:
-            self.main_lock.acquire()
-            res = super(ThreadSafeDatabase, self).edit_index(*args, **kwargs)
-        finally:
-            if self.opened:
-                self.indexes_locks[res] = RLock()
-            self.main_lock.release()
-        return res
-
-    def set_indexes(self, *args, **kwargs):
-        try:
-            self.main_lock.acquire()
-            super(ThreadSafeDatabase, self).set_indexes(*args, **kwargs)
-        finally:
-            self.main_lock.release()
-
-    def _read_indexes(self, *args, **kwargs):
-        try:
-            self.main_lock.acquire()
-            super(ThreadSafeDatabase, self)._read_indexes(*args, **kwargs)
-        finally:
-            self.main_lock.release()
-
-    def _insert_id_index(self, *args, **kwargs):
-        lock = self.indexes_locks['id']
-        try:
-            lock.acquire()
-            res = super(ThreadSafeDatabase,
-                        self)._insert_id_index(*args, **kwargs)
-        finally:
-            lock.release()
-        return res
-
-    def _update_id_index(self, *args, **kwargs):
-        lock = self.indexes_locks['id']
-        res = None
-        try:
-            lock.acquire()
-            res = super(
-                ThreadSafeDatabase, self)._update_id_index(*args, **kwargs)
-        finally:
-            lock.release()
-        return res
-
-    def _delete_id_index(self, *args, **kwargs):
-        lock = self.indexes_locks['id']
-        res = None
-        try:
-            lock.acquire()
-            res = super(
-                ThreadSafeDatabase, self)._delete_id_index(*args, **kwargs)
-        finally:
-            lock.release()
-        return res
-
-    def _single_update_index(self, index, *args, **kwargs):
-        lock = self.indexes_locks[index.name]
-        try:
-            lock.acquire()
-            super(ThreadSafeDatabase,
-                  self)._single_update_index(index, *args, **kwargs)
-        finally:
-            lock.release()
-
-    def _single_insert_index(self, index, *args, **kwargs):
-        lock = self.indexes_locks[index.name]
-        try:
-            lock.acquire()
-            super(ThreadSafeDatabase,
-                  self)._single_insert_index(index, *args, **kwargs)
-        finally:
-            lock.release()
-
-    def _single_delete_index(self, index, *args, **kwargs):
-        lock = self.indexes_locks[index.name]
-        try:
-            lock.acquire()
-            super(ThreadSafeDatabase,
-                  self)._single_delete_index(index, *args, **kwargs)
-        finally:
-            lock.release()
-
-    def _single_compact_index(self, index, *args, **kwargs):
-        lock = self.indexes_locks[index.name]
-        try:
-            lock.acquire()
-            super(ThreadSafeDatabase, self).compact_index(
-                index, *args, **kwargs)
-        finally:
-            lock.release()
-
-    def reindex_index(self, index, *args, **kwargs):
-        if isinstance(index, basestring):
-            if not index in self.indexes_names:
-                raise PreconditionsException("No index named %s" % index)
-            index = self.indexes_names[index]
-        key = index.name + "reind"
-        self.main_lock.acquire()
-        if key in self.indexes_locks:
-            lock = self.indexes_locks[index.name + "reind"]
-        else:
-            self.indexes_locks[index.name + "reind"] = RLock()
-            lock = self.indexes_locks[index.name + "reind"]
-        self.main_lock.release()
-        try:
-            lock.acquire()
-            super(ThreadSafeDatabase, self).reindex_index(
-                index, *args, **kwargs)
-        finally:
-            lock.release()
-
-    def destroy_index(self, index, *args, **kwargs):
-        if isinstance(index, basestring):
-            if not index in self.indexes_names:
-                raise PreconditionsException("No index named %s" % index)
-            index = self.indexes_names[index]
-        lock = self.indexes_locks[index.name]
-        try:
-            lock.acquire()
-            super(ThreadSafeDatabase, self).destroy_index(
-                index, *args, **kwargs)
-        finally:
-            lock.release()
-
-    def flush(self):
-        try:
-            self.main_lock.acquire()
-            super(ThreadSafeDatabase, self).flush()
-        finally:
-            self.main_lock.release()
-
-    def fsync(self):
-        try:
-            self.main_lock.acquire()
-            super(ThreadSafeDatabase, self).fsync()
-        finally:
-            self.main_lock.release()
+class ThreadSafeDatabase(SafeDatabase):
+    """
+    Thread safe version of CodernityDB that uses several lock objects,
+    on different methods / different indexes etc. It's completely different
+    implementation of locking than SuperThreadSafe one.
+    """
+    pass

File contents unchanged.

CodernityDB/hash_index.py

                 break
             else:
                 if status != 'd':
-                        offset -= 1
+                    offset -= 1
 
         while limit:
             curr_data = self.buckets.read(self.entry_line_size)
                 break
             else:
                 if status != 'd':
-                        yield doc_id, rev, start, size, status
-                        limit -= 1
+                    yield doc_id, rev, start, size, status
+                    limit -= 1
 
     def get_many(self, *args, **kwargs):
         raise NotImplemented

File contents unchanged.

CodernityDB/indexcreator.py

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011-2012 Codernity (http://codernity.com)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import re
+import tokenize
+import token
+import uuid
+
+
+class IndexCreatorException(Exception):
+    def __init__(self, ex, line=None):
+        self.ex = ex
+        self.line = line
+
+    def __str__(self):
+        if self.line:
+            return repr(self.ex + "(in line: %d)" % self.line)
+        return repr(self.ex)
+
+
+class IndexCreatorFunctionException(IndexCreatorException):
+    pass
+
+
+class IndexCreatorValueException(IndexCreatorException):
+    pass
+
+
+class Parser(object):
+    def __init__(self):
+        pass
+
+    def parse(self, data, name=None):
+        if not name:
+            self.name = "_" + uuid.uuid4().hex
+        else:
+            self.name = name
+
+        self.ind = 0
+        self.stage = 0
+        self.logic = ['and', 'or', 'in']
+        self.logic2 = ['&', '|']
+        self.allowed_props = {'TreeBasedIndex': ['type','name','key_format','node_capacity','pointer_format','meta_format'],
+                              'HashIndex': ['type','name','key_format','hash_lim','entry_line_format']
+                             }
+        self.funcs = {'md5': (['md5'], ['.digest()']),
+                      'len': (['len'], []),
+                      'str': (['str'], []),
+                      'fix_r': (['self.fix_r'], [])
+                      }
+        self.funcs_with_body = {'fix_r':
+                                ("""   def fix_r(self,s,l):
+        e = len(s)
+        if e == l:
+            return s
+        elif e > l:
+            return s[:l]
+        else:
+            return s.rjust(l,'_')\n""", False)}
+        self.none = ['None', 'none', 'null']
+        self.props_assign = ['=', ':']
+        self.all_adj_num_comp = {token.NUMBER: (
+            token.NUMBER, token.NAME, '-', '('),
+            token.NAME: (token.NUMBER, token.NAME, '-', '('),
+            ')': (token.NUMBER, token.NAME, '-', '(')
+        }
+
+        self.all_adj_num_op = {token.NUMBER: (token.NUMBER, token.NAME, '('),
+                               token.NAME: (token.NUMBER, token.NAME, '('),
+                               ')': (token.NUMBER, token.NAME, '(')
+                               }
+        self.allowed_adjacent = {
+            "<=": self.all_adj_num_comp,
+            ">=": self.all_adj_num_comp,
+            ">": self.all_adj_num_comp,
+            "<": self.all_adj_num_comp,
+
+            "==": {token.NUMBER: (token.NUMBER, token.NAME, '('),
+                   token.NAME: (token.NUMBER, token.NAME, token.STRING, '('),
+                   token.STRING: (token.NAME, token.STRING, '('),
+                   ')': (token.NUMBER, token.NAME, token.STRING, '('),
+                   ']': (token.NUMBER, token.NAME, token.STRING, '(')
+                   },
+
+            "+": {token.NUMBER: (token.NUMBER, token.NAME, '('),
+                  token.NAME: (token.NUMBER, token.NAME, token.STRING, '('),
+                  token.STRING: (token.NAME, token.STRING, '('),
+                  ')': (token.NUMBER, token.NAME, token.STRING, '('),
+                  ']': (token.NUMBER, token.NAME, token.STRING, '(')
+                  },
+
+            "-": {token.NUMBER: (token.NUMBER, token.NAME, '('),
+                  token.NAME: (token.NUMBER, token.NAME, '('),
+                  ')': (token.NUMBER, token.NAME, '('),
+                  '<': (token.NUMBER, token.NAME, '('),
+                  '>': (token.NUMBER, token.NAME, '('),
+                  '<=': (token.NUMBER, token.NAME, '('),
+                  '>=': (token.NUMBER, token.NAME, '('),
+                  '==': (token.NUMBER, token.NAME, '('),
+                  ']': (token.NUMBER, token.NAME, '(')
+                  },
+            "*": self.all_adj_num_op,
+            "/": self.all_adj_num_op,
+            "%": self.all_adj_num_op,
+            ",": {token.NUMBER: (token.NUMBER, token.NAME, token.STRING, '{', '[', '('),
+                  token.NAME: (token.NUMBER, token.NAME, token.STRING, '(', '{', '['),
+                  token.STRING: (token.NAME, token.STRING, token.NUMBER, '(', '{', '['),
+                  ')': (token.NUMBER, token.NAME, token.STRING, '(', '{', '['),
+                  ']': (token.NUMBER, token.NAME, token.STRING, '(', '{', '['),
+                  '}': (token.NUMBER, token.NAME, token.STRING, '(', '{', '[')
+                  }
+        }
+
+        def is_num(s):
+            m = re.search('[^0-9*()+\-\s/]+', s)
+            return not m
+
+        def is_string(s):
+            m = re.search('\s*(?P<a>[\'\"]+).*?(?P=a)\s*', s)
+            return m
+        data = re.split('make_key_value\:', data)
+
+        if len(data) < 2:
+            raise IndexCreatorFunctionException(
+                "Couldn't find a definition of make_key_value function!\n")
+
+        spl1 = re.split('make_key\:', data[0])
+        spl2 = re.split('make_key\:', data[1])
+
+        self.funcs_rev = False
+
+        if len(spl1) > 1:
+            data = [spl1[0]] + [data[1]] + [spl1[1]]
+            self.funcs_rev = True
+        elif len(spl2) > 1:
+            data = [data[0]] + spl2
+        else:
+            data.append("key")
+
+        if data[1] == re.search('\s*', data[1], re.S | re.M).group(0):
+            raise IndexCreatorFunctionException("Empty function body ",
+                                                len(re.split('\n', data[0])) + (len(re.split('\n', data[2])) if self.funcs_rev else 1) - 1)
+        if data[2] == re.search('\s*', data[2], re.S | re.M).group(0):
+            raise IndexCreatorFunctionException("Empty function body ",
+                                                len(re.split('\n', data[0])) + (1 if self.funcs_rev else len(re.split('\n', data[1]))) - 1)
+        if data[0] == re.search('\s*', data[0], re.S | re.M).group(0):
+            raise IndexCreatorValueException("You didn't set any properity or you set them not at the begining of the code\n")
+
+        data = [re.split(
+            '\n', data[0]), re.split('\n', data[1]), re.split('\n', data[2])]
+        self.cnt_lines = (len(data[0]), len(data[1]), len(data[2]))
+        ind = 0
+        self.predata = data
+        self.data = [[], [], []]
+        for i, v in enumerate(self.predata[0]):
+            for k, w in enumerate(self.predata[0][i]):
+                if self.predata[0][i][k] in self.props_assign:
+                    if not is_num(self.predata[0][i][k + 1:]) and self.predata[0][i].strip()[:4] != 'type' and self.predata[0][i].strip()[:4] != 'name':
+                        s = self.predata[0][i][k + 1:]
+                        self.predata[0][i] = self.predata[0][i][:k + 1]
+
+                        m = re.search('\s+', s.strip())
+                        if not is_string(s) and not m:
+                            s = "'" + s.strip() + "'"
+                        self.predata[0][i] += s
+                        break
+
+        for n, i in enumerate(self.predata):
+            for k in i:
+                k = k.strip()
+                if k:
+                    self.data[ind].append(k)
+                    self.check_enclosures(k, n)
+            ind += 1
+
+        return self.parse_ex()
+
+    def readline(self, stage):
+        def foo():
+            if len(self.data[stage]) <= self.ind:
+                self.ind = 0
+                return ""
+            else:
+                self.ind += 1
+                return self.data[stage][self.ind - 1]
+        return foo
+
+    def add(self, l, i):
+        def add_aux(*args):
+            #print args,self.ind
+            if len(l[i]) < self.ind:
+                l[i].append([])
+            l[i][self.ind - 1].append(args)
+        return add_aux
+
+    def parse_ex(self):
+        self.index_name = ""
+        self.index_type = ""
+        self.curLine = -1
+        self.con = -1
+        self.brackets = -1
+        self.curFunc = None
+        self.colons = 0
+        self.line_cons = ([], [], [])
+        self.pre_tokens = ([], [], [])
+        self.known_dicts_in_mkv = []
+        self.prop_name = True
+        self.prop_assign = False
+        self.is_one_arg_enough = False
+        self.to_import = []
+        self.funcs_stack = []
+        self.last_line = [-1,-1,-1]
+        self.props_set = []
+
+        self.tokens = ['# %s\n' % self.name, 'class %s(' % self.name, '):\n', '   def __init__(self, *args, **kwargs):        ']
+
+        for i in xrange(3):
+            tokenize.tokenize(self.readline(i), self.add(self.pre_tokens, i))
+            # tokenize treats some keyword not in the right way, thats why we have to change some of them
+            for nk, k in enumerate(self.pre_tokens[i]):
+                for na, a in enumerate(k):
+                    if a[0] == token.NAME and a[1] in self.logic:
+                        self.pre_tokens[i][nk][
+                            na] = (token.OP, a[1], a[2], a[3], a[4])
+
+        for i in self.pre_tokens[1]:
+            self.line_cons[1].append(self.check_colons(i, 1))
+            self.check_adjacents(i, 1)
+            if self.check_for_2nd_arg(i) == -1 and not self.is_one_arg_enough:
+                raise IndexCreatorValueException("No 2nd value to return (did u forget about ',None'?", self.cnt_line_nr(i[0][4], 1))
+            self.is_one_arg_enough = False
+
+        for i in self.pre_tokens[2]:
+            self.line_cons[2].append(self.check_colons(i, 2))
+            self.check_adjacents(i, 2)
+
+        for i in self.pre_tokens[0]:
+            self.handle_prop_line(i)
+
+        self.cur_brackets = 0
+        self.tokens += ['\n        super(%s, self).__init__(*args, **kwargs)\n    def make_key_value(self, data):        ' % self.name]
+
+        for i in self.pre_tokens[1]:
+            for k in i:
+                self.handle_make_value(*k)
+
+        self.curLine = -1
+        self.con = -1
+        self.cur_brackets = 0
+        self.tokens += ['\n    def make_key(self, key):']
+
+        for i in self.pre_tokens[2]:
+            for k in i:
+                self.handle_make_key(*k)
+
+        if self.index_type == "":
+            raise IndexCreatorValueException("Missing index type definition\n")
+        if self.index_name == "":
+            raise IndexCreatorValueException("Missing index name\n")
+
+        self.tokens[0] = "# " + self.index_name + "\n" + self.tokens[0]
+
+        for i in self.funcs_with_body:
+            if self.funcs_with_body[i][1]:
+                self.tokens.insert(4, self.funcs_with_body[i][0])
+
+        for i in self.to_import:
+            self.tokens[0] += i
+        self.tokens[0] += self.tokens[1]
+        del self.tokens[1]
+
+        if self.index_type in self.allowed_props:
+            for i in self.props_set:
+                if i not in self.allowed_props[self.index_type]:
+                    raise IndexCreatorValueException("Properity %s is not allowed for index type: %s"%(i,self.index_type))
+
+        #print " ".join(self.tokens)
+        return " ".join(self.tokens)
+
+    # has to be run BEFORE tokenize
+    def check_enclosures(self, d, st):
+        encs = []
+        contr = {'(': ')', '{': '}', '[': ']', "'": "'", '"': '"'}
+        ends = [')', '}', ']', "'", '"']
+        for i in d:
+            if len(encs) > 0 and encs[-1] in ['"', "'"]:
+                if encs[-1] == i:
+                    del encs[-1]
+            elif i in contr:
+                encs += [i]
+            elif i in ends:
+                if len(encs) < 1 or contr[encs[-1]] != i:
+                    raise IndexCreatorValueException("Missing opening enclosure for \'%s\'" % i, self.cnt_line_nr(d, st))
+                del encs[-1]
+
+        if len(encs) > 0:
+            raise IndexCreatorValueException("Missing closing enclosure for \'%s\'" % encs[0], self.cnt_line_nr(d, st))
+
+    def check_adjacents(self, d, st):
+        def std_check(d, n):
+            if n == 0:
+                prev = -1
+            else:
+                prev = d[n - 1][1] if d[n - 1][0] == token.OP else d[n - 1][0]
+
+            cur = d[n][1] if d[n][0] == token.OP else d[n][0]
+
+            # there always is an endmarker at the end, but this is a precaution
+            if n + 2 > len(d):
+                nex = -1
+            else:
+                nex = d[n + 1][1] if d[n + 1][0] == token.OP else d[n + 1][0]
+
+            if prev not in self.allowed_adjacent[cur]:
+                raise IndexCreatorValueException("Wrong left value of the %s" % cur, self.cnt_line_nr(line, st))
+
+            # there is an assumption that whole data always ends with 0 marker, the idea prolly needs a rewritting to allow more whitespaces
+            # between tokens, so it will be handled anyway
+            elif nex not in self.allowed_adjacent[cur][prev]:
+                raise IndexCreatorValueException("Wrong right value of the %s" % cur, self.cnt_line_nr(line, st))
+
+        for n, (t, i, _, _, line) in enumerate(d):
+            if t == token.NAME or t == token.STRING:
+                if n + 1 < len(d) and d[n + 1][0] in [token.NAME, token.STRING]:
+                    raise IndexCreatorValueException("Did you forget about an operator in between?", self.cnt_line_nr(line, st))
+            elif i in self.allowed_adjacent:
+                std_check(d, n)
+
+    def check_colons(self, d, st):
+        cnt = 0
+        br = 0
+
+        def check_ret_args_nr(a, s):
+            c_b_cnt = 0
+            s_b_cnt = 0
+            n_b_cnt = 0
+            comas_cnt = 0
+            for _, i, _, _, line in a:
+
+                if c_b_cnt == n_b_cnt == s_b_cnt == 0:
+                    if i == ',':
+                        comas_cnt += 1
+                        if (s == 1 and comas_cnt > 1) or (s == 2 and comas_cnt > 0):
+                            raise IndexCreatorFunctionException("Too much arguments to return", self.cnt_line_nr(line, st))
+                        if s == 0 and comas_cnt > 0:
+                            raise IndexCreatorValueException("A coma here doesn't make any sense", self.cnt_line_nr(line, st))
+
+                    elif i == ':':
+                            if s == 0:
+                                raise IndexCreatorValueException("A colon here doesn't make any sense", self.cnt_line_nr(line, st))
+                            raise IndexCreatorFunctionException("Two colons don't make any sense", self.cnt_line_nr(line, st))
+
+                if i == '{':
+                    c_b_cnt += 1
+                elif i == '}':
+                    c_b_cnt -= 1
+                elif i == '(':
+                    n_b_cnt += 1
+                elif i == ')':
+                    n_b_cnt -= 1
+                elif i == '[':
+                    s_b_cnt += 1
+                elif i == ']':
+                    s_b_cnt -= 1
+
+        def check_if_empty(a):
+            for i in a:
+                if i not in [token.NEWLINE, token.INDENT, token.ENDMARKER]:
+                    return False
+            return True
+        if st == 0:
+            check_ret_args_nr(d, st)
+            return
+
+        for n, i in enumerate(d):
+            if i[1] == ':':
+                if br == 0:
+                    if len(d) < n or check_if_empty(d[n + 1:]):
+                        raise IndexCreatorValueException(
+                            "Empty return value", self.cnt_line_nr(i[4], st))
+                    elif len(d) >= n:
+                        check_ret_args_nr(d[n + 1:], st)
+                    return cnt
+                else:
+                    cnt += 1
+            elif i[1] == '{':
+                br += 1
+            elif i[1] == '}':
+                br -= 1
+        check_ret_args_nr(d, st)
+        return -1
+
+    def check_for_2nd_arg(self, d):
+        c_b_cnt = 0  # curly brackets counter '{}'
+        s_b_cnt = 0  # square brackets counter '[]'
+        n_b_cnt = 0  # normal brackets counter '()'
+
+        def check_2nd_arg(d, ind):
+            d = d[ind[0]:]
+            for t, i, (n, r), _, line in d:
+                if i == '{' or i is None:
+                    return 0
+                elif t == token.NAME:
+                    self.known_dicts_in_mkv.append((i, (n, r)))
+                    return 0
+                elif t == token.STRING or t == token.NUMBER:
+                    raise IndexCreatorValueException("Second return value of make_key_value function has to be a dictionary!", self.cnt_line_nr(line, 1))
+
+        for ind in enumerate(d):
+            t, i, _, _, _ = ind[1]
+            if s_b_cnt == n_b_cnt == c_b_cnt == 0:
+                if i == ',':
+                    return check_2nd_arg(d, ind)
+                elif (t == token.NAME and i not in self.funcs) or i == '{':
+                    self.is_one_arg_enough = True
+
+            if i == '{':
+                c_b_cnt += 1
+                self.is_one_arg_enough = True
+            elif i == '}':
+                c_b_cnt -= 1
+            elif i == '(':
+                n_b_cnt += 1
+            elif i == ')':
+                n_b_cnt -= 1
+            elif i == '[':
+                s_b_cnt += 1
+            elif i == ']':
+                s_b_cnt -= 1
+        return -1
+
+    def cnt_line_nr(self, l, stage):
+        nr = -1
+        for n, i in enumerate(self.predata[stage]):
+            #print i,"|||",i.strip(),"|||",l
+            if l == i.strip():
+                nr = n
+        if nr == -1:
+            return -1
+
+        if stage == 0:
+            return nr + 1
+        elif stage == 1:
+            return nr + self.cnt_lines[0] + (self.cnt_lines[2] - 1 if self.funcs_rev else 0)
+        elif stage == 2:
+            return nr + self.cnt_lines[0] + (self.cnt_lines[1] - 1 if not self.funcs_rev else 0)
+
+        return -1
+
+    def handle_prop_line(self, d):
+        d_len = len(d)
+        if d[d_len - 1][0] == token.ENDMARKER:
+            d_len -= 1
+
+        if d_len < 3:
+            raise IndexCreatorValueException("Can't handle properity assingment ", self.cnt_line_nr(d[0][4], 0))
+
+        if not d[1][1] in self.props_assign:
+            raise IndexCreatorValueException(
+                "Did you forget : or =?", self.cnt_line_nr(d[0][4], 0))
+
+        if d[0][0] == token.NAME or d[0][0] == token.STRING:
+            if d[0][1] in self.props_set:
+                raise IndexCreatorValueException("Properity %s is set more than once" % d[0][1],self.cnt_line_nr(d[0][4],0))
+            self.props_set += [d[0][1]]
+            if d[0][1] == "type" or d[0][1] == "name":
+                t, tk, _, _, line = d[2]
+
+                if d_len > 3:
+                    raise IndexCreatorValueException(
+                        "Wrong value to assign", self.cnt_line_nr(line, 0))
+
+                if t == token.STRING:
+                    m = re.search('\s*(?P<a>[\'\"]+)(.*?)(?P=a)\s*', tk)
+                    if m:
+                        tk = m.groups()[1]
+                elif t != token.NAME:
+                    raise IndexCreatorValueException(
+                        "Wrong value to assign", self.cnt_line_nr(line, 0))
+
+                if d[0][1] == "type":
+                    if d[2][1] == "TreeBasedIndex":
+                        self.to_import += ["from CodernityDB.tree_index import TreeBasedIndex\n"]
+                    self.tokens.insert(2, tk)
+                    self.index_type = tk
+                else:
+                    self.index_name = tk
+                return
+            else:
+                self.tokens += ['\n        kwargs["' + d[0][1] + '"]']
+        else:
+            raise IndexCreatorValueException("Can't handle properity assingment ", self.cnt_line_nr(d[0][4], 0))
+
+        self.tokens += ['=']
+
+        self.check_adjacents(d[2:], 0)
+        self.check_colons(d[2:], 0)
+
+        for i in d[2:]:
+            self.tokens += [i[1]]
+
+    def generate_func(self, t, tk, pos_start, pos_end, line, hdata, stage):
+        if self.last_line[stage] != -1 and pos_start[0] > self.last_line[stage] and line != '':
+            raise IndexCreatorFunctionException("This line will never be executed!",self.cnt_line_nr(line,stage))
+        if t == 0:
+            return
+
+        if pos_start[1] == 0:
+            if self.line_cons[stage][pos_start[0] - 1] == -1:
+                self.tokens += ['\n        return']
+                self.last_line[stage] = pos_start[0]
+            else:
+                self.tokens += ['\n        if']
+        elif tk == ':' and self.line_cons[stage][pos_start[0] - 1] > -1:
+            if self.line_cons[stage][pos_start[0] - 1] == 0:
+                self.tokens += [':\n            return']
+                return
+            self.line_cons[stage][pos_start[0] - 1] -= 1
+
+        if tk in self.logic2:
+            #print tk
+            if line[pos_start[1] - 1] != tk and line[pos_start[1] + 1] != tk:
+                self.tokens += [tk]
+            if line[pos_start[1] - 1] != tk and line[pos_start[1] + 1] == tk:
+                if tk == '&':
+                    self.tokens += ['and']
+                else:
+                    self.tokens += ['or']
+            return
+
+        if self.brackets != 0:
+            def search_through_known_dicts(a):
+                for i, (n, r) in self.known_dicts_in_mkv:
+                    if i == tk and r > pos_start[1] and n == pos_start[0] and hdata == 'data':
+                        return True
+                return False
+
+            if t == token.NAME and len(self.funcs_stack) > 0 and self.funcs_stack[-1][0] == 'md5' and search_through_known_dicts(tk):
+                raise IndexCreatorValueException("Second value returned by make_key_value for sure isn't a dictionary ", self.cnt_line_nr(line, 1))
+
+        if tk == ')':
+            self.cur_brackets -= 1
+            if len(self.funcs_stack) > 0 and self.cur_brackets == self.funcs_stack[-1][1]:
+                self.tokens += [tk]
+                self.tokens += self.funcs[self.funcs_stack[-1][0]][1]
+                del self.funcs_stack[-1]
+                return
+        if tk == '(':
+            self.cur_brackets += 1
+
+        if tk in self.none:
+            self.tokens += ['None']
+            return
+
+        if t == token.NAME and tk not in self.logic and tk != hdata:
+            if tk not in self.funcs:
+                self.tokens += [hdata + '["' + tk + '"]']
+            else:
+                self.tokens += self.funcs[tk][0]
+                if tk in self.funcs_with_body:
+                    self.funcs_with_body[tk] = (
+                        self.funcs_with_body[tk][0], True)
+                self.funcs_stack += [(tk, self.cur_brackets)]
+        else:
+            self.tokens += [tk]
+
+    def handle_make_value(self, t, tk, pos_start, pos_end, line):
+        self.generate_func(t, tk, pos_start, pos_end, line, 'data', 1)
+
+    def handle_make_key(self, t, tk, pos_start, pos_end, line):
+        self.generate_func(t, tk, pos_start, pos_end, line, 'key', 2)

CodernityDB/tree_index.py

                 new_leaf += struct.pack(
                     '<' + self.single_leaf_record_format *
                     (nr_of_records_to_rewrite + 1),
-                                      new_key,
-                                      new_doc_id,
-                                      new_start,
-                                      new_size,
-                                      'o',
-                                      *records_after)
+                    new_key,
+                    new_doc_id,
+                    new_start,
+                    new_size,
+                    'o',
+                    *records_after)
                 new_leaf += blanks
                 self.buckets.write(new_leaf)
                 self._update_leaf_size_and_pointers(leaf_start,
-                                                   old_leaf_size,
-                                                   prev_l,
-                                                   new_leaf_start)
+                                                    old_leaf_size,
+                                                    prev_l,
+                                                    new_leaf_start)
                 if next_l:  # pren next_l is 0 there is no next leaf to update, avoids writing data at 0 position of file
                     self._update_leaf_prev_pointer(
                         next_l, new_leaf_start)
                 else:
                     data_split_index += 1
             records_to_rewrite = records_to_rewrite[:data_split_index * 5]\
-                    + new_record_data\
-                    + records_to_rewrite[data_split_index * 5:]
+                + new_record_data\
+                + records_to_rewrite[data_split_index * 5:]
             self._update_leaf_ready_data(leaf_start,
-                                        start_position,
-                                        nr_of_elements + 1,
-                                        records_to_rewrite),
+                                         start_position,
+                                         nr_of_elements + 1,
+                                         records_to_rewrite),
             return True
         else:  # did not found any deleted records in leaf
             return False
     def _prepare_new_root_data(self, root_key, left_pointer, right_pointer, children_flag='n'):
         new_root = struct.pack(
             '<' + self.node_heading_format + self.single_node_record_format,
-                             1,
-                             children_flag,
-                             left_pointer,
-                             root_key,
-                             right_pointer)
+            1,
+            children_flag,
+            left_pointer,
+            root_key,
+            right_pointer)
         new_root += (self.key_size + self.pointer_size) * (self.
-            node_capacity - 1) * '\x00'
+                                                           node_capacity - 1) * '\x00'
         return new_root
 
     def _create_new_root_from_node(self, node_start, children_flag, nr_of_keys_to_rewrite, new_node_size, old_node_size, new_key, new_pointer):
             self.buckets.seek(self.data_start + self.node_heading_size)
             #read all keys with key>new_key
             data = self.buckets.read(self.pointer_size + self.
-                node_capacity * (self.key_size + self.pointer_size))
+                                     node_capacity * (self.key_size + self.pointer_size))
             old_node_data = struct.unpack('<' + self.pointer_format + self.node_capacity *
                                           (self.key_format + self.pointer_format), data)
             self.buckets.seek(0, 2)  # end of file
                 key_moved_to_root = new_key
                 #prepare new nodes data
                 left_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
-                                      old_node_size * (self.
-                                          key_format + self.pointer_format),
-                                      old_node_size,
-                                      children_flag,
-                                      *old_node_data[:old_node_size * 2 + 1])
+                                        old_node_size * (self.
+                                                         key_format + self.pointer_format),
+                                        old_node_size,
+                                        children_flag,
+                                        *old_node_data[:old_node_size * 2 + 1])
 
                 right_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
-                                      new_node_size * (self.
-                                          key_format + self.pointer_format),
-                                      new_node_size,
-                                      children_flag,
-                                      new_pointer,
-                                      *old_node_data[old_node_size * 2 + 1:])
+                                         new_node_size * (self.
+                                                          key_format + self.pointer_format),
+                                         new_node_size,
+                                         children_flag,
+                                         new_pointer,
+                                         *old_node_data[old_node_size * 2 + 1:])
             elif nr_of_keys_to_rewrite > new_node_size:
                 key_moved_to_root = old_node_data[old_node_size * 2 - 1]
                 #prepare new nodes data
                     keys_after = old_node_data[-(
                         nr_of_keys_to_rewrite) * 2:old_node_size * 2 - 1]
                 left_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
-                                      (self.node_capacity - nr_of_keys_to_rewrite) * (self.
-                                          key_format + self.pointer_format),
-                                      old_node_size,
-                                      children_flag,
-                                      *keys_before)
+                                        (self.node_capacity - nr_of_keys_to_rewrite) * (self.
+                                                                                        key_format + self.pointer_format),
+                                        old_node_size,
+                                        children_flag,
+                                        *keys_before)
                 left_node += struct.pack(
                     '<' + (self.key_format + self.pointer_format) *
-                           (nr_of_keys_to_rewrite - new_node_size),
-                                       new_key,
-                                       new_pointer,
-                                       *keys_after)
+                    (nr_of_keys_to_rewrite - new_node_size),
+                    new_key,
+                    new_pointer,
+                    *keys_after)
 
                 right_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
-                                      new_node_size * (self.
-                                          key_format + self.pointer_format),
-                                      new_node_size,
-                                      children_flag,
-                                      *old_node_data[old_node_size * 2:])
+                                         new_node_size * (self.
+                                                          key_format + self.pointer_format),
+                                         new_node_size,
+                                         children_flag,
+                                         *old_node_data[old_node_size * 2:])
             else:
 #               'inserting key into second half of node and creating new root'
                 key_moved_to_root = old_node_data[old_node_size * 2 + 1]
                 #prepare new nodes data
                 left_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
-                                      old_node_size * (self.
-                                          key_format + self.pointer_format),
-                                      old_node_size,
-                                      children_flag,
-                                      *old_node_data[:old_node_size * 2 + 1])
+                                        old_node_size * (self.
+                                                         key_format + self.pointer_format),
+                                        old_node_size,
+                                        children_flag,
+                                        *old_node_data[:old_node_size * 2 + 1])
                 if nr_of_keys_to_rewrite:
                     keys_before = old_node_data[(old_node_size +
-                        1) * 2:-nr_of_keys_to_rewrite * 2]
+                                                 1) * 2:-nr_of_keys_to_rewrite * 2]
                     keys_after = old_node_data[-nr_of_keys_to_rewrite * 2:]
                 else:
                     keys_before = old_node_data[(old_node_size + 1) * 2:]
                     keys_after = []
                 right_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
-                                      (new_node_size - nr_of_keys_to_rewrite - 1) * (self.
-                                          key_format + self.pointer_format),
-                                        new_node_size,
-                                        children_flag,
-                                        *keys_before)
+                                         (new_node_size - nr_of_keys_to_rewrite - 1) * (self.
+                                                                                        key_format + self.pointer_format),
+                                         new_node_size,
+                                         children_flag,
+                                         *keys_before)
                 right_node += struct.pack(
                     '<' + (nr_of_keys_to_rewrite + 1) *
-                           (self.key_format + self.pointer_format),
-                                        new_key,
-                                        new_pointer,
-                                        *keys_after)
+                    (self.key_format + self.pointer_format),
+                    new_key,
+                    new_pointer,
+                    *keys_after)
             new_root = self._prepare_new_root_data(key_moved_to_root,
-                                                    new_node_start,
-                                                    new_node_start + self.node_size)
+                                                   new_node_start,
+                                                   new_node_start + self.node_size)
             left_node += (self.node_capacity - old_node_size) * \
                 (self.key_size + self.pointer_size) * '\x00'
             #adding blanks after new node
             if nr_of_keys_to_rewrite == new_node_size:  # insert key into first half of node
                 #reading second half of node
                 self.buckets.seek(self._calculate_key_position(node_start,
-                                                              old_node_size,
-                                                              'n') + self.pointer_size)
+                                                               old_node_size,
+                                                               'n') + self.pointer_size)
                 #read all keys with key>new_key
                 data = self.buckets.read(nr_of_keys_to_rewrite *
-                    (self.key_size + self.pointer_size))
+                                         (self.key_size + self.pointer_size))
                 old_node_data = struct.unpack('<' + nr_of_keys_to_rewrite *
-                                                (self.key_format + self.pointer_format), data)
+                                              (self.key_format + self.pointer_format), data)
                 #write new node at end of file
                 self.buckets.seek(0, 2)
                 new_node_start = self.buckets.tell()
                 #prepare new node_data
                 new_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
-                                     (self.key_format +
-                                         self.pointer_format) * new_node_size,
-                                        new_node_size,
-                                        children_flag,
-                                        new_pointer,
-                                        *old_node_data)
+                                       (self.key_format +
+                                        self.pointer_format) * new_node_size,
+                                       new_node_size,
+                                       children_flag,
+                                       new_pointer,
+                                       *old_node_data)
                 new_node += blanks
                 #write new node
                 self.buckets.write(new_node)
                 new_node_start = self.buckets.tell()
                 #prepare new node_data
                 new_node = struct.pack('<' + self.node_heading_format +
-                                     self.pointer_format + (self.key_format +
-                                         self.pointer_format) * new_node_size,
-                                        new_node_size,
-                                        children_flag,
-                                        old_node_data[-new_node_size * 2 - 1],
-                                        *old_node_data[-new_node_size * 2:])
+                                       self.pointer_format + (self.key_format +
+                                                              self.pointer_format) * new_node_size,
+                                       new_node_size,
+                                       children_flag,
+                                       old_node_data[-new_node_size * 2 - 1],
+                                       *old_node_data[-new_node_size * 2:])
                 new_node += blanks
                 #write new node
                 self.buckets.write(new_node)
                 self.buckets.write(
                     struct.pack(
                         '<' + (self.key_format + self.pointer_format) *
-                               (nr_of_keys_to_rewrite - new_node_size),
-                                               new_key,
-                                               new_pointer,
-                                               *old_node_data[:-(new_node_size + 1) * 2]))
+                        (nr_of_keys_to_rewrite - new_node_size),
+                        new_key,
+                        new_pointer,
+                        *old_node_data[:-(new_node_size + 1) * 2]))
 
                 self._read_single_node_key.delete(node_start)
                 self._read_node_nr_of_elements_and_children_flag.delete(
             else:  # key goes into second half
                 #reading second half of node
                 self.buckets.seek(self._calculate_key_position(node_start,
-                                                              old_node_size,
-                                                              'n')
+                                                               old_node_size,
+                                                               'n')
                                   + self.pointer_size)
                 data = self.buckets.read(
                     new_node_size * (self.key_size + self.pointer_size))
                 old_node_data = struct.unpack('<' + new_node_size *
-                                                (self.key_format + self.pointer_format), data)
+                                              (self.key_format + self.pointer_format), data)
                 #find key which goes to parent node
                 key_moved_to_parent_node = old_node_data[0]
                 self.buckets.seek(0, 2)  # end of file
                     keys_before = old_node_data
                     keys_after = []
                 new_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
-                                     (self.key_format + self.pointer_format) *
-                                     (new_node_size -
-                                         nr_of_keys_to_rewrite - 1),
-                                     new_node_size,
-                                     children_flag,
-                                     first_leaf_pointer,
-                                     *keys_before)
+                                       (self.key_format + self.pointer_format) *
+                                       (new_node_size -
+                                        nr_of_keys_to_rewrite - 1),
+                                       new_node_size,
+                                       children_flag,
+                                       first_leaf_pointer,
+                                       *keys_before)
                 new_node += struct.pack('<' + (self.key_format + self.pointer_format) *
-                                      (nr_of_keys_to_rewrite + 1),
-                                      new_key,
-                                      new_pointer,
-                                      *keys_after)
+                                        (nr_of_keys_to_rewrite + 1),
+                                        new_key,
+                                        new_pointer,
+                                        *keys_after)
                 new_node += blanks
                 #write new node
                 self.buckets.write(new_node)
                 leaf_start, key, doc_id, start, size, status)
             return
         leaf_start, new_record_position, nr_of_records_to_rewrite, full_leaf, on_deleted\
-        = self._find_place_in_leaf(key, leaf_start, nr_of_elements)
+            = self._find_place_in_leaf(key, leaf_start, nr_of_elements)
         if full_leaf:
             try:  # check if leaf has parent node
                 leaf_parent_pointer = nodes_stack.pop()
             except IndexError:  # leaf is a root
                 leaf_parent_pointer = 0
             split_data = self._split_leaf(leaf_start,
-                                     nr_of_records_to_rewrite,
-                                     key,
-                                     doc_id,
-                                     start,
-                                     size,
-                                     status,
-                                     create_new_root=(False if leaf_parent_pointer else True))
+                                          nr_of_records_to_rewrite,
+                                          key,
+                                          doc_id,
+                                          start,
+                                          size,
+                                          status,
+                                          create_new_root=(False if leaf_parent_pointer else True))
             if split_data is not None:  # means that split created new root or replaced split with update_if_has_deleted
                 new_leaf_start_position, key_moved_to_parent_node = split_data
                 self._insert_new_key_into_node(leaf_parent_pointer,
-                                              key_moved_to_parent_node,
-                                              leaf_start,
-                                              new_leaf_start_position,
-                                              nodes_stack,
-                                              indexes)
+                                               key_moved_to_parent_node,
+                                               leaf_start,
+                                               new_leaf_start_position,
+                                               nodes_stack,
+                                               indexes)
         else:  # there is a place for record in leaf
             self.buckets.seek(leaf_start)
             self._update_leaf(
                 leaf_start, new_record_position, nr_of_elements, nr_of_records_to_rewrite,
-                             on_deleted, key, doc_id, start, size, status)
+                on_deleted, key, doc_id, start, size, status)
 
     def _update_node(self, new_key_position, nr_of_keys_to_rewrite, new_key, new_pointer):
         if nr_of_keys_to_rewrite == 0:
             self.buckets.seek(new_key_position)
             self.buckets.write(
                 struct.pack('<' + self.key_format + self.pointer_format,
-                                           new_key,
-                                           new_pointer))
+                            new_key,
+                            new_pointer))
             self.flush()
         else:
             self.buckets.seek(new_key_position)
             data = self.buckets.read(nr_of_keys_to_rewrite * (
-                 self.key_size + self.pointer_size))
+                                     self.key_size + self.pointer_size))
             keys_to_rewrite = struct.unpack(
                 '<' + nr_of_keys_to_rewrite * (self.key_format + self.pointer_format), data)
             self.buckets.seek(new_key_position)
             self.buckets.write(
                 struct.pack(
                     '<' + (nr_of_keys_to_rewrite + 1) *