1. Luis Henrique Fagundes
  2. moin-2.0

Commits

Thomas Waldmann  committed 1aa2003

New routing middleware based on namespaces, details see below.

Dispatch to different backends based on a namespace, so e.g.
SomePage -> use content/default backend
userprofiles:JoeDoe -> use userprofile backend

If you do not give a NAMESPACE item in the metadata, it will parse the NAME item
and create the NAMESPACE item (and modify the NAME item to only have the name
without namespace).

Uses 3 mappings:
* namespace_mapping: namespace prefix -> backend_name
* backend_mapping: backend_name -> Backend
* acl mapping: maps namespace:itemname prefixes to sets of acls

namespace_mapping and backend_mapping are kept separate so we can map different
namespaces into same backend (this is possible as NAMESPACE key is stored in
meta data). We also need to have a backend name anyway for serialization.

ACLs might not use it correctly right now (one has to give namespace:itemname)
to the acl check.

  • Participants
  • Parent commits 2d49d04
  • Branches namespaces

Comments (0)

Files changed (11)

File MoinMoin/app.py

View file
     # A ns_mapping consists of several lines, where each line is made up like this:
     # mountpoint, unprotected backend
     # Just initialize with unprotected backends.
-    app.router = routing.Backend(app.cfg.namespace_mapping)
+    app.router = routing.Backend(app.cfg.namespace_mapping, app.cfg.backend_mapping)
     if app.cfg.create_storage:
         app.router.create()
     app.router.open()

File MoinMoin/config/default.py

View file
             raise error.ConfigurationError("No storage configuration specified! You need to define a namespace_mapping. " + \
                                            "For further reference, please see HelpOnStorageConfiguration.")
 
+        if self.backend_mapping is None:
+            raise error.ConfigurationError("No storage configuration specified! You need to define a backend_mapping. " + \
+                                           "For further reference, please see HelpOnStorageConfiguration.")
+
         if self.acl_mapping is None:
             raise error.ConfigurationError("No acl configuration specified! You need to define a acl_mapping. " + \
                                            "For further reference, please see HelpOnStorageConfiguration.")
     ('interwiki_map', {},
      "Dictionary of wiki_name -> wiki_url"),
     ('namespace_mapping', None,
-    "This needs to point to a list of tuples, each tuple containing: Namespace identifier, backend. " + \
-    "E.g.: [('/', FSBackend('wiki/data')), ]. Please see HelpOnStorageConfiguration for further reference."),
+    "A list of tuples, each tuple containing: Namespace identifier, backend name. " + \
+    "E.g.: [('', 'default')), ]. Please see HelpOnStorageConfiguration for further reference."),
+    ('backend_mapping', None,
+    "A dictionary that maps backend names to backends. " + \
+    "E.g.: {'default': Backend(), }. Please see HelpOnStorageConfiguration for further reference."),
     ('acl_mapping', None,
     "This needs to point to a list of tuples, each tuple containing: name prefix, acl protection to be applied to matching items. " + \
     "E.g.: [('', dict(default='All:read,write,create')), ]. Please see HelpOnStorageConfiguration for further reference."),

File MoinMoin/conftest.py

View file
 
 
 def init_test_app(given_config):
-    namespace_mapping, acl_mapping = create_simple_mapping("stores:memory:", given_config.content_acl)
+    namespace_mapping, backend_mapping, acl_mapping = \
+        create_simple_mapping("stores:memory:", given_config.content_acl)
     more_config = dict(
         namespace_mapping=namespace_mapping,
+        backend_mapping=backend_mapping,
         acl_mapping=acl_mapping,
         create_storage = True, # create a fresh storage at each app start
         destroy_storage = True, # kill all storage contents at app shutdown

File MoinMoin/constants/keys.py

View file
 # metadata keys
 NAME = "name"
 NAME_OLD = "name_old"
+NAMESPACE = "namespace"
 
 # if an item is reverted, we store the revision number we used for reverting there:
 REVERTED_TO = "reverted_to"
 EMAIL = "email"
 OPENID = "openid"
 
+# in which backend is some revision stored?
+BACKENDNAME = "backendname"
+
 # index names
 LATEST_REVS = 'latest_revs'
 ALL_REVS = 'all_revs'

File MoinMoin/storage/__init__.py

View file
  |                                 listing, lookup by name, ACL checks, ...
  v
  Routing  Middleware               dispatches to multiple backends based on the
- |                 |               name, cares about absolute and relative names
+ |                 |               namespace
  v                 v
  "stores" Backend  Other Backend   simple stuff: store, get, destroy revisions
  |           |
 """
 
 
-CONTENT, USERPROFILES = 'content', 'userprofiles'
+CONTENT, USERPROFILES = u'content', u'userprofiles'
 
 BACKENDS_PACKAGE = 'MoinMoin.storage.backends'
 
     return module.MutableBackend.from_uri(backend_uri)
 
 
-def create_mapping(uri, mounts, acls):
-    namespace_mapping = [(mounts[nsname],
-                          backend_from_uri(uri % dict(nsname=nsname, kind="%(kind)s")))
-                         for nsname in mounts]
+def create_mapping(uri, namespaces, backends, acls):
+    namespace_mapping = namespaces.items()
     acl_mapping = acls.items()
+    backend_mapping = [(backend_name,
+                        backend_from_uri(uri % dict(backend=backend_name, kind="%(kind)s")))
+                        for backend_name in backends]
     # we need the longest mountpoints first, shortest last (-> '' is very last)
     namespace_mapping = sorted(namespace_mapping, key=lambda x: len(x[0]), reverse=True)
     acl_mapping = sorted(acl_mapping, key=lambda x: len(x[0]), reverse=True)
-    return namespace_mapping, acl_mapping
+    return namespace_mapping, dict(backend_mapping), acl_mapping
 
 def create_simple_mapping(uri='stores:fs:instance',
                           content_acl=None, user_profile_acl=None):
 
     :params uri: '<backend_name>:<backend_uri>' (general form)
                  backend_name must be a backend module name (e.g. stores)
-                 the backend_uri must have a %(nsname)s placeholder, it gets replaced
-                 by the CONTENT, USERPROFILES strings and result is given to
-                 to that backend's constructor
+                 the backend_uri must have a %(backend)s placeholder, it gets replaced
+                 by the name of the backend (a simple, ascii string) and result
+                 is given to to that backend's constructor
 
                  for the 'stores' backend, backend_uri looks like '<store_name>:<store_uri>'
                  store_name must be a store module name (e.g. fs)
                  by 'meta' or 'data' and the result is given to that store's constructor
 
                  e.g.:
-                 'stores:fs:/path/to/store/%(nsname)s/%(kind)s' will create a mapping
+                 'stores:fs:/path/to/store/%(backend)s/%(kind)s' will create a mapping
                  using the 'stores' backend with 'fs' stores and everything will be stored
                  to below /path/to/store/.
     """
         content_acl = dict(before=u'', default=u'All:read,write,create', after=u'', hierarchic=False)
     if not user_profile_acl:
         user_profile_acl = dict(before=u'All:', default=u'', after=u'', hierarchic=False)
-    mounts = {
-        CONTENT: '',
-        USERPROFILES: 'UserProfile',
+    namespaces = {
+        u'': CONTENT,
+        u'userprofiles:': USERPROFILES,
+    }
+    backends = {
+        CONTENT: None,
+        USERPROFILES: None,
     }
     acls = {
-        'UserProfile/': user_profile_acl,
+        'userprofiles:': user_profile_acl,
         '': content_acl,
     }
-    return create_mapping(uri, mounts, acls)
+    return create_mapping(uri, namespaces, backends, acls)
 

File MoinMoin/storage/middleware/_tests/test_indexing.py

View file
 from flask import g as flaskg
 
 from MoinMoin.config import NAME, SIZE, ITEMID, REVID, DATAID, HASH_ALGORITHM, CONTENT, COMMENT, \
-                            LATEST_REVS, ALL_REVS
+                            LATEST_REVS, ALL_REVS, NAMESPACE
 
 from ..indexing import IndexingMiddleware
 
         assert expected_revid == doc[REVID]
         assert unicode(data) == doc[CONTENT]
 
+    def test_namespaces(self):
+        item_name_n = u'normal'
+        item = self.imw[item_name_n]
+        rev_n = item.store_revision(dict(name=item_name_n, contenttype=u'text/plain'), StringIO(str(item_name_n)))
+        item_name_u = u'userprofiles:userprofile'
+        item = self.imw[item_name_u]
+        rev_u = item.store_revision(dict(name=item_name_u, contenttype=u'text/plain'), StringIO(str(item_name_u)))
+        item = self.imw[item_name_n]
+        rev_n = item.get_revision(rev_n.revid)
+        assert rev_n.meta[NAMESPACE] == u''
+        assert rev_n.meta[NAME] == item_name_n
+        item = self.imw[item_name_u]
+        rev_u = item.get_revision(rev_u.revid)
+        assert rev_u.meta[NAMESPACE] == u'userprofiles'
+        assert rev_u.meta[NAME] == item_name_u.split(':')[1]
+
 class TestProtectedIndexingMiddleware(object):
     reinit_storage = True # cleanup after each test method
 

File MoinMoin/storage/middleware/_tests/test_routing.py

View file
+# Copyright: 2011 MoinMoin:ThomasWaldmann
+# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
+
+"""
+MoinMoin - routing middleware tests
+"""
+
+
+from __future__ import absolute_import, division
+
+from StringIO import StringIO
+
+import pytest
+
+from MoinMoin.config import NAME, NAMESPACE, REVID
+
+from ..routing import Backend as RoutingBackend
+
+from MoinMoin.storage.backends.stores import MutableBackend as StoreBackend, Backend as ROBackend
+from MoinMoin.storage.stores.memory import BytesStore as MemoryBytesStore
+from MoinMoin.storage.stores.memory import FileStore as MemoryFileStore
+
+
+def make_ro_backend():
+    store = StoreBackend(MemoryBytesStore(), MemoryFileStore())
+    store.create()
+    store.open()
+    store.store({NAME: 'test'}, StringIO(''))
+    store.store({NAME: 'test2'}, StringIO(''))
+    return ROBackend(store.meta_store, store.data_store)
+
+
+
+def pytest_funcarg__router(request):
+    default_be = StoreBackend(MemoryBytesStore(), MemoryFileStore())
+    other_be = StoreBackend(MemoryBytesStore(), MemoryFileStore())
+    ro_be = make_ro_backend()
+    namespaces = [(u'other:', 'other'), (u'ro:', 'ro'), (u'', 'default')]
+    backends = {'other': other_be, 'ro': ro_be, 'default': default_be, }
+    router = RoutingBackend(namespaces, backends)
+    router.create()
+    router.open()
+
+    @request.addfinalizer
+    def finalize():
+        router.close()
+        router.destroy()
+
+    return router
+
+def test_store_get_del(router):
+    default_name = u'foo'
+    default_backend_name, default_revid = router.store(dict(name=default_name), StringIO(''))
+    other_name = u'other:bar'
+    other_backend_name, other_revid = router.store(dict(name=other_name), StringIO(''))
+
+    # check if store() updates the to-store metadata with correct NAMESPACE and NAME
+    default_meta, _ = router.retrieve(default_backend_name, default_revid)
+    other_meta, _ = router.retrieve(other_backend_name, other_revid)
+    assert (u'', default_name == default_meta[NAMESPACE], default_meta[NAME])
+    assert (other_name.split(':') == other_meta[NAMESPACE], other_meta[NAME])
+
+    # delete revs:
+    router.remove(default_backend_name, default_revid)
+    router.remove(other_backend_name, other_revid)
+
+
+def test_store_readonly_fails(router):
+    with pytest.raises(TypeError):
+        router.store(dict(name=u'ro:testing'), StringIO(''))
+
+def test_del_readonly_fails(router):
+    ro_be_name, ro_id = next(iter(router)) # we have only readonly items
+    print ro_be_name, ro_id
+    with pytest.raises(TypeError):
+        router.remove(ro_be_name, ro_id)
+
+
+def test_destroy_create_dont_touch_ro(router):
+    existing = set(router)
+    default_be_name, default_revid = router.store(dict(name=u'foo'), StringIO(''))
+    other_be_name, other_revid = router.store(dict(name=u'other:bar'), StringIO(''))
+
+    router.close()
+    router.destroy()
+    router.create()
+    router.open()
+
+    assert set(router) == existing
+
+
+def test_iter(router):
+    existing_before = set([revid for be_name, revid in router])
+    default_be_name, default_revid = router.store(dict(name=u'foo'), StringIO(''))
+    other_be_name, other_revid = router.store(dict(name=u'other:bar'), StringIO(''))
+    existing_now = set([revid for be_name, revid in router])
+    assert existing_now == set([default_revid, other_revid]) | existing_before
+

File MoinMoin/storage/middleware/_tests/test_serialization.py

View file
 
 
 contents = [
-    (u'Foo', {'name': u'Foo'}, ''),
-    (u'Foo', {'name': u'Foo'}, '2nd'),
-    (u'Subdir', {'name': u'Subdir'}, ''),
-    (u'Subdir/Foo', {'name': u'Subdir/Foo'}, ''),
-    (u'Subdir/Bar', {'name': u'Subdir/Bar'}, ''),
+    (u'Foo', {'name': u'Foo', 'contenttype': u'text/plain'}, ''),
+    (u'Foo', {'name': u'Foo', 'contenttype': u'text/plain'}, '2nd'),
+    (u'Subdir', {'name': u'Subdir', 'contenttype': u'text/plain'}, ''),
+    (u'Subdir/Foo', {'name': u'Subdir/Foo', 'contenttype': u'text/plain'}, ''),
+    (u'Subdir/Bar', {'name': u'Subdir/Bar', 'contenttype': u'text/plain'}, ''),
 ]
 
 
     meta_store = BytesStore()
     data_store = FileStore()
     _backend = MutableBackend(meta_store, data_store)
-    mapping = [('', _backend)]
-    backend = RoutingBackend(mapping)
+    namespaces = [('', u'backend')]
+    backends = {u'backend': _backend}
+    backend = RoutingBackend(namespaces, backends)
     backend.create()
     backend.open()
     request.addfinalizer(backend.destroy)

File MoinMoin/storage/middleware/indexing.py

View file
                             LANGUAGE, USERID, ADDRESS, HOSTNAME, SIZE, ACTION, COMMENT, \
                             CONTENT, ITEMLINKS, ITEMTRANSCLUSIONS, ACL, EMAIL, OPENID, \
                             ITEMID, REVID, CURRENT, PARENTID, \
-                            LATEST_REVS, ALL_REVS
+                            LATEST_REVS, ALL_REVS, BACKENDNAME
 from MoinMoin import user
 from MoinMoin.search.analyzers import item_name_analyzer, MimeTokenizer, AclTokenizer
 from MoinMoin.themes import utctimestamp
 INDEXES = [LATEST_REVS, ALL_REVS, ]
 
 
-def backend_to_index(meta, content, schema, wikiname):
+def backend_to_index(meta, content, schema, wikiname, backend_name):
     """
     Convert backend metadata/data to a whoosh document.
 
     doc[NAME_EXACT] = doc[NAME]
     doc[WIKINAME] = wikiname
     doc[CONTENT] = content
+    doc[BACKENDNAME] = backend_name
     return doc
 
 
             REVID: ID(unique=True, stored=True),
             # parent revision id
             PARENTID: ID(stored=True),
+            # backend name (which backend is this rev stored in?)
+            BACKENDNAME: ID(stored=True),
             # MTIME from revision metadata (converted to UTC datetime)
             MTIME: DATETIME(stored=True),
             # tokenized CONTENTTYPE from metadata
         self.destroy()
         os.rename(self.index_dir_tmp, self.index_dir)
 
-    def index_revision(self, meta, content, async=True):
+    def index_revision(self, meta, content, backend_name, async=False): # True
         """
         Index a single revision, add it to all-revs and latest-revs index.
 
         :param content: preprocessed (filtered) indexable content
         :param async: if True, use the AsyncWriter, otherwise use normal writer
         """
-        doc = backend_to_index(meta, content, self.schemas[ALL_REVS], self.wikiname)
+        doc = backend_to_index(meta, content, self.schemas[ALL_REVS], self.wikiname, backend_name)
         if async:
             writer = AsyncWriter(self.ix[ALL_REVS])
         else:
             writer = self.ix[ALL_REVS].writer()
         with writer as writer:
             writer.update_document(**doc) # update, because store_revision() may give us an existing revid
-        doc = backend_to_index(meta, content, self.schemas[LATEST_REVS], self.wikiname)
+        doc = backend_to_index(meta, content, self.schemas[LATEST_REVS], self.wikiname, backend_name)
         if async:
             writer = AsyncWriter(self.ix[LATEST_REVS])
         else:
             if docnum_remove is not None:
                 # we are removing a revid that is in latest revs index
                 try:
-                    latest_names_revids = self._find_latest_names_revids(self.ix[ALL_REVS], Term(ITEMID, itemid))
+                    latest_backends_revids = self._find_latest_backends_revids(self.ix[ALL_REVS], Term(ITEMID, itemid))
                 except AttributeError:
                     # workaround for bug #200 AttributeError: 'FieldCache' object has no attribute 'code'
-                    latest_names_revids = []
-                if latest_names_revids:
+                    latest_backends_revids = []
+                if latest_backends_revids:
                     # we have a latest revision, just update the document in the index:
-                    assert len(latest_names_revids) == 1 # this item must have only one latest revision
-                    latest_name_revid = latest_names_revids[0]
+                    assert len(latest_backends_revids) == 1 # this item must have only one latest revision
+                    latest_backend_revid = latest_backends_revids[0]
                     # we must fetch from backend because schema for LATEST_REVS is different than for ALL_REVS
                     # (and we can't be sure we have all fields stored, too)
-                    meta, _ = self.backend.retrieve(*latest_name_revid)
+                    meta, _ = self.backend.retrieve(*latest_backend_revid)
                     # we only use meta (not data), because we do not want to transform data->content again (this
                     # is potentially expensive) as we already have the transformed content stored in ALL_REVS index:
                     with self.ix[ALL_REVS].searcher() as searcher:
-                        doc = searcher.document(revid=latest_name_revid[1])
+                        doc = searcher.document(revid=latest_backend_revid[1])
                         content = doc[CONTENT]
-                    doc = backend_to_index(meta, content, self.schemas[LATEST_REVS], self.wikiname)
+                    doc = backend_to_index(meta, content, self.schemas[LATEST_REVS], self.wikiname, backend_name=latest_backend_revid[0])
                     writer.update_document(**doc)
                 else:
                     # this is no revision left in this item that could be the new "latest rev", just kill the rev
         else:
             writer = MultiSegmentWriter(index, procs, limitmb)
         with writer as writer:
-            for mountpoint, revid in revids:
+            for backend_name, revid in revids:
+                print backend_name, revid
                 if mode in ['add', 'update', ]:
-                    meta, data = self.backend.retrieve(mountpoint, revid)
+                    meta, data = self.backend.retrieve(backend_name, revid)
                     content = convert_to_indexable(meta, data, is_new=False)
-                    doc = backend_to_index(meta, content, schema, wikiname)
+                    doc = backend_to_index(meta, content, schema, wikiname, backend_name)
                 if mode == 'update':
                     writer.update_document(**doc)
                 elif mode == 'add':
                 else:
                     raise ValueError("mode must be 'update', 'add' or 'delete', not '{0}'".format(mode))
 
-    def _find_latest_names_revids(self, index, query=None):
+    def _find_latest_backends_revids(self, index, query=None):
         """
-        find the latest revids using the all-revs index
+        find the latest revision identifiers using the all-revs index
 
         :param index: an up-to-date and open ALL_REVS index
         :param query: query to search only specific revisions (optional, default: all items/revisions)
-        :returns: a list of tuples (name, latest revid)
+        :returns: a list of tuples (backend name, latest revid)
         """
         if query is None:
             query = Every()
             result = searcher.search(query, groupedby=ITEMID, sortedby=FieldFacet(MTIME, reverse=True))
             by_item = result.groups(ITEMID)
             # values in v list are in same relative order as in results, so latest MTIME is first:
-            latest_names_revids = [(searcher.stored_fields(v[0])[NAME],
-                                    searcher.stored_fields(v[0])[REVID])
-                                   for v in by_item.values()]
-        return latest_names_revids
+            latest_backends_revids = [(searcher.stored_fields(v[0])[BACKENDNAME],
+                                      searcher.stored_fields(v[0])[REVID])
+                                      for v in by_item.values()]
+        return latest_backends_revids
 
     def rebuild(self, tmp=False, procs=1, limitmb=256):
         """
             # build an index of all we have (so we know what we have)
             all_revids = self.backend # the backend is an iterator over all revids
             self._modify_index(index, self.schemas[ALL_REVS], self.wikiname, all_revids, 'add', procs, limitmb)
-            latest_names_revids = self._find_latest_names_revids(index)
+            latest_backends_revids = self._find_latest_backends_revids(index)
         finally:
             index.close()
         # now build the index of the latest revisions:
         index = open_dir(index_dir, indexname=LATEST_REVS)
         try:
-            self._modify_index(index, self.schemas[LATEST_REVS], self.wikiname, latest_names_revids, 'add', procs, limitmb)
+            self._modify_index(index, self.schemas[LATEST_REVS], self.wikiname, latest_backends_revids, 'add', procs, limitmb)
         finally:
             index.close()
 
         index_dir = self.index_dir_tmp if tmp else self.index_dir
         index_all = open_dir(index_dir, indexname=ALL_REVS)
         try:
-            # NOTE: self.backend iterator gives (mountpoint, revid) tuples, which is NOT
+            # NOTE: self.backend iterator gives (backend_name, revid) tuples, which is NOT
             # the same as (name, revid), thus we do the set operations just on the revids.
             # first update ALL_REVS index:
-            revids_mountpoints = dict((revid, mountpoint) for mountpoint, revid in self.backend)
-            backend_revids = set(revids_mountpoints)
+            revids_backends = dict((revid, backend_name) for backend_name, revid in self.backend)
+            backend_revids = set(revids_backends)
             with index_all.searcher() as searcher:
-                ix_revids_names = dict((doc[REVID], doc[NAME]) for doc in searcher.all_stored_fields())
-            revids_mountpoints.update(ix_revids_names) # this is needed for stuff that was deleted from storage
-            ix_revids = set(ix_revids_names)
+                ix_revids_backends = dict((doc[REVID], doc[BACKENDNAME]) for doc in searcher.all_stored_fields())
+            revids_backends.update(ix_revids_backends) # this is needed for stuff that was deleted from storage
+            ix_revids = set(ix_revids_backends)
             add_revids = backend_revids - ix_revids
             del_revids = ix_revids - backend_revids
             changed = add_revids or del_revids
-            add_revids = [(revids_mountpoints[revid], revid) for revid in add_revids]
-            del_revids = [(revids_mountpoints[revid], revid) for revid in del_revids]
+            add_revids = [(revids_backends[revid], revid) for revid in add_revids]
+            del_revids = [(revids_backends[revid], revid) for revid in del_revids]
             self._modify_index(index_all, self.schemas[ALL_REVS], self.wikiname, add_revids, 'add')
             self._modify_index(index_all, self.schemas[ALL_REVS], self.wikiname, del_revids, 'delete')
 
-            backend_latest_names_revids = set(self._find_latest_names_revids(index_all))
+            backend_latest_backends_revids = set(self._find_latest_backends_revids(index_all))
         finally:
             index_all.close()
         index_latest = open_dir(index_dir, indexname=LATEST_REVS)
             # now update LATEST_REVS index:
             with index_latest.searcher() as searcher:
                 ix_revids = set(doc[REVID] for doc in searcher.all_stored_fields())
-            backend_latest_revids = set(revid for name, revid in backend_latest_names_revids)
+            backend_latest_revids = set(revid for name, revid in backend_latest_backends_revids)
             upd_revids = backend_latest_revids - ix_revids
-            upd_revids = [(revids_mountpoints[revid], revid) for revid in upd_revids]
+            upd_revids = [(revids_backends[revid], revid) for revid in upd_revids]
             self._modify_index(index_latest, self.schemas[LATEST_REVS], self.wikiname, upd_revids, 'update')
             self._modify_index(index_latest, self.schemas[LATEST_REVS], self.wikiname, del_revids, 'delete')
         finally:
                 raise ValueError('need overwrite=True to overwrite existing revisions')
         meta, data, content = self.preprocess(meta, data)
         data.seek(0)  # rewind file
-        revid = backend.store(meta, data)
+        backend_name, revid = backend.store(meta, data)
         meta[REVID] = revid
-        self.indexer.index_revision(meta, content)
+        self.indexer.index_revision(meta, content, backend_name)
         if not overwrite:
             self._current = self.indexer._document(revid=revid)
         return Revision(self, revid)
         Destroy revision <revid>.
         """
         rev = Revision(self, revid)
-        self.backend.remove(rev.name, revid)
+        self.backend.remove(rev.backend_name, revid)
         self.indexer.remove_revision(revid)
 
     def destroy_all_revisions(self):
         self.item = item
         self.revid = revid
         self.backend = item.backend
+        self.backend_name = doc[BACKENDNAME]
         self._doc = doc
         self.meta = Meta(self, self._doc)
         self._data = None
         return self.meta.get(NAME, 'DoesNotExist')
 
     def _load(self):
-        meta, data = self.backend.retrieve(self._doc[NAME], self.revid) # raises KeyError if rev does not exist
+        meta, data = self.backend.retrieve(self.backend_name, self.revid) # raises KeyError if rev does not exist
         self.meta = Meta(self, self._doc, meta)
         self._data = data
         return meta, data

File MoinMoin/storage/middleware/routing.py

View file
+# Copyright: 2011 MoinMoin:ThomasWaldmann
+# Copyright: 2011 MoinMoin:RonnyPfannschmidt
+# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
+
+"""
+MoinMoin - namespaces middleware
+
+Routes requests to different backends depending on the namespace.
+"""
+
+
+from __future__ import absolute_import, division
+
+from MoinMoin.config import NAME, BACKENDNAME, NAMESPACE
+
+from MoinMoin.storage.backends import BackendBase, MutableBackendBase
+
+
+class Backend(MutableBackendBase):
+    """
+    namespace dispatcher, behaves readonly for readonly mounts
+    """
+    def __init__(self, namespaces, backends):
+        """
+        Initialize.
+
+        The namespace mapping given must satisfy the following criteria:
+            * Order matters.
+            * Namespaces are unicode strings like u'' (default ns), u'userprofiles:'
+              (used to store userprofiles) or u'files:' (could map to a fileserver
+              backend). Can be also a hierarchic ns spec like u'foo:bar:'.
+            * There *must* be a default namespace entry for u'' at the end of
+              the list.
+
+        namespaces = [
+            (u'userprofiles:', 'user_be'),
+            (u'', 'default_be'), # default (u'') must be last
+        ]
+
+        The backends mapping maps backend names to backend instances:
+
+        backends = {
+            'default_be': BackendInstance1,
+            'user_be': BackendInstance2,
+        }
+
+        :type namespaces: list of tuples of namespace specifier -> backend names
+        :param mapping: [(namespace, backend_name), ...]
+        :type backends: dict backend names -> backends
+        :param backends: {backend_name: backend, ...}
+        """
+        self.namespaces = namespaces
+        self.backends = backends
+
+    def open(self):
+        for backend in self.backends.values():
+            backend.open()
+
+    def close(self):
+        for backend in self.backends.values():
+            backend.close()
+
+    def _get_backend(self, fq_name):
+        """
+        For a given fully-qualified itemname (i.e. something like ns:itemname)
+        find the backend it belongs to, the itemname without namespace
+        spec and the namespace of the backend.
+
+        :param fq_name: fully-qualified itemname
+        :returns: tuple of (backend name, local item name, namespace)
+        """
+        for namespace, backend_name in self.namespaces:
+            if fq_name.startswith(namespace):
+                item_name = fq_name[len(namespace):]
+                return backend_name, item_name, namespace.rstrip(':')
+        raise AssertionError("No backend found for {0!r}. Namespaces: {1!r}".format(itemname, self.namespaces))
+
+    def __iter__(self):
+        # Note: yields enough information so we can retrieve the revision from
+        #       the right backend later (this is more than just the revid).
+        for backend_name, backend in self.backends.items():
+            for revid in backend: # TODO maybe directly yield the backend?
+                yield (backend_name, revid)
+
+    def retrieve(self, backend_name, revid):
+        backend = self.backends[backend_name]
+        meta, data = backend.retrieve(revid)
+        return meta, data
+
+    # writing part
+    def create(self):
+        for backend in self.backends.values():
+            if isinstance(backend, MutableBackendBase):
+                backend.create()
+
+    def destroy(self):
+        for backend in self.backends.values():
+            if isinstance(backend, MutableBackendBase):
+                backend.destroy()
+
+    def store(self, meta, data):
+        namespace = meta.get(NAMESPACE)
+        if namespace is None:
+            # if there is no NAMESPACE in metadata, we assume that the NAME
+            # is fully qualified and determine the namespace from it:
+            fq_name = meta[NAME]
+            backend_name, item_name, namespace = self._get_backend(fq_name)
+            # side effect: update the metadata with namespace and short item name (no ns)
+            meta[NAMESPACE] = namespace
+            meta[NAME] = item_name
+        else:
+            if namespace:
+                namespace += u':' # needed for _get_backend
+            backend_name, _, _ = self._get_backend(namespace)
+        backend = self.backends[backend_name]
+        if not isinstance(backend, MutableBackendBase):
+            raise TypeError('backend {0} is readonly!'.format(backend_name))
+        revid = backend.store(meta, data)
+        # add the BACKENDNAME after storing, so it gets only into
+        # the index, but not in stored metadata:
+        meta[BACKENDNAME] = backend_name
+        return backend_name, revid
+
+    def remove(self, backend_name, revid):
+        backend = self.backends[backend_name]
+        if not isinstance(backend, MutableBackendBase):
+            raise TypeError('backend {0} is readonly'.format(backend_name))
+        backend.remove(revid)
+

File wikiconfig.py

View file
     # 'fs:' indicates that you want to use the filesystem backend. You can also use
     # 'hg:' instead to indicate that you want to use the mercurial backend.
     # Alternatively you can set up the mapping yourself (see HelpOnStorageConfiguration).
-    namespace_mapping, acl_mapping = create_simple_mapping(
-                            uri='stores:fs:{0}/%(nsname)s/%(kind)s'.format(data_dir),
-                            # XXX we use rather relaxed ACLs for the development wiki:
-                            content_acl=dict(before=u'',
-                                             default=u'All:read,write,create,destroy,admin',
-                                             after=u'',
-                                             hierarchic=False, ),
-                            user_profile_acl=dict(before=u'',
-                                                  default=u'All:read,write,create,destroy,admin',
-                                                  after=u'',
-                                                  hierarchic=False, ),
-                            )
+    namespace_mapping, backend_mapping, acl_mapping = \
+        create_simple_mapping(uri='stores:fs:{0}/%(backend)s/%(kind)s'.format(data_dir),
+                              # XXX we use rather relaxed ACLs for the development wiki:
+                              content_acl=dict(before=u'',
+                                               default=u'All:read,write,create,destroy,admin',
+                                               after=u'',
+                                               hierarchic=False, ),
+                              user_profile_acl=dict(before=u'',
+                                                    default=u'All:read,write,create,destroy,admin',
+                                                    after=u'',
+                                                    hierarchic=False, ),
+                             )
 
     sitename = u'My MoinMoin'