Commits

Jacob Sondergaard committed 499d314

Major revision, documentation updates, full test coverage and code clean-up.

API breaking changes:

* All blocking methods will always raise error if necessary (the raise_error parameter has been removed)
* A callback function is now required on a number of the async methods
* HTTPErrors will always be relaxed to a CouchException

Comments (0)

Files changed (2)

+'''Blocking and non-blocking client interfaces to CouchDB using Tornado's
+builtin `httpclient`.
+
+This module wraps the CouchDB HTTP REST API and defines a common interface
+for making blocking and non-blocking operations on a CouchDB.
+'''
+
+import copy
+
 from tornado import httpclient
 from tornado.escape import json_decode, json_encode, url_escape
 
 
+class BlockingCouch(object):
+    '''Basic wrapper class for blocking operations on a CouchDB.
 
-class BlockingCouch(object):
-    '''Basic wrapper class for blocking operations on a CouchDB'''
+    Example usage::
 
-    def __init__(self, db_name, host='localhost', port=5984):
-        self.couch_url = 'http://{0}:{1}'.format(host, port)
+        import couch
+
+        db = couch.BlockingCouch('mydatabase')
+        db.create_db()
+        r = db.save_doc({'msg': 'My first document'})
+        doc = db.get_doc(r['id'])
+        db.delete_doc(doc)
+
+    For any methods of this class: If an error is returned from the database,
+    an appropriate CouchException is raised.
+    '''
+
+    def __init__(self, db_name='', couch_url='http://127.0.0.1:5984',
+            **request_args):
+        '''Creates a `BlockingCouch`.
+
+        All parameters are optional. Though `db_name` is required for most
+        methods to work.
+
+        :arg string db_name: Database name
+        :arg string couch_url: The url to the CouchDB including port number,
+            but without authentication credentials.
+        :arg keyword request_args: Arguments applied when making requests to
+            the database. This may include `auth_username` and `auth_password`
+            for basic authentication. See `httpclient.HTTPRequest` for other
+            possible arguments.
+            By default `use_gzip` is set to False. Accessing a non-local
+            CouchDB it may be relevant to set `use_gzip` to True.
+        '''
+        request_args.update({'use_gzip': False})
+        self.request_args = request_args
         self.client = httpclient.HTTPClient()
+        self.couch_url = couch_url
         self.db_name = db_name
 
+    #
+    # Database operations
+    #
 
-   # Database operations
+    def create_db(self):
+        '''Creates database'''
+        return self._http_put('/' + self.db_name)
 
-    def create_db(self, raise_error=True):
-        '''Creates database'''
-        return self._http_put(''.join(['/', self.db_name, '/']), raise_error=raise_error)
+    def delete_db(self):
+        '''Deletes database'''
+        return self._http_delete('/' + self.db_name)
 
-    def delete_db(self, raise_error=True):
-        '''Deletes database'''
-        return self._http_delete(''.join(['/', self.db_name, '/']), raise_error=raise_error)
+    def list_dbs(self):
+        '''List names of databases'''
+        return self._http_get('/_all_dbs')
 
-    def list_dbs(self, raise_error=True):
-        '''List names of databases'''
-        return self._http_get('/_all_dbs', raise_error=raise_error)
+    def info_db(self):
+        '''Get info about the database'''
+        return self._http_get('/' + self.db_name)
 
-    def info_db(self, raise_error=True):
-        '''Get info about the database'''
-        return self._http_get(''.join(['/', self.db_name, '/']), raise_error=raise_error)
-
-    def pull_db(self, source, create_target=False, raise_error=True):
-        '''Replicate changes from a source database to current (target) database'''
-        body = json_encode({'source': source, 'target': self.db_name, 'create_target': create_target})
-        return self._http_post('/_replicate', body, raise_error=raise_error, connect_timeout=120.0, request_timeout=120.0)
+    def pull_db(self, source, create_target=False):
+        '''Replicate changes from a source database to current (target)
+        database'''
+        body = json_encode({'source': source, 'target': self.db_name,
+                'create_target': create_target})
+        return self._http_post('/_replicate', body, request_timeout=120.0)
 
     def uuids(self, count=1):
         '''Get one or more uuids'''
-        if count > 1:
-            url = ''.join(['/_uuids?count=', str(count)])
-        else:
-            url = '/_uuids'
-        return self._http_get(url)['uuids']
+        return self._http_get('/_uuids?count={0}'.format(count))['uuids']
 
+    #
+    # Document operations
+    #
 
-    # Document operations
-    
-    def get_doc(self, doc_id, raise_error=True):
-        '''Get document with the given id.'''
-        url = ''.join(['/', self.db_name, '/', url_escape(doc_id)])
-        return self._http_get(url, raise_error=raise_error)
+    def get_doc(self, doc_id):
+        '''Get document with the given `doc_id`.'''
+        url = '/{0}/{1}'.format(self.db_name, url_escape(doc_id))
+        return self._http_get(url)
 
-    def get_docs(self, doc_ids, raise_error=True):
-        '''Get multiple documents with the given id's'''
-        url = ''.join(['/', self.db_name, '/_all_docs?include_docs=true'])
+    def get_docs(self, doc_ids):
+        '''Get multiple documents with the given list of `doc_ids`.
+
+        Returns a list containing the documents, in same order as the provided
+        document id's.
+
+        If one or more documents are not found in the database, an exception
+        is raised.
+        '''
+        url = '/{0}/_all_docs?include_docs=true'.format(self.db_name)
         body = json_encode({'keys': doc_ids})
-        resp = self._http_post(url, body, raise_error=raise_error)
-        return [row['doc'] if 'doc' in row else row for row in resp['rows']]
+        resp = self._http_post(url, body)
+        return [row['doc'] for row in resp['rows']]
 
-    def save_doc(self, doc, raise_error=True):
-        '''Save/create a document in the database. Returns a dict with id
-           and rev of the saved doc.'''
+    def save_doc(self, doc):
+        '''Save/create a document in the database.
+        Returns a dict with id and rev of the saved doc.'''
         body = json_encode(doc)
-        if '_rev' in doc:
+        if '_id' in doc and '_rev' in doc:
             # update an existing document
-            url = ''.join(['/', self.db_name, '/', url_escape(doc['_id'])])
-            return self._http_put(url, body, doc=doc, raise_error=raise_error)
+            url = '/{0}/{1}'.format(self.db_name, url_escape(doc['_id']))
+            return self._http_put(url, body)
         else:
             # save a new document
-            url = ''.join(['/', self.db_name])
-            return self._http_post(url, body, doc=doc, raise_error=raise_error)
+            url = '/' + self.db_name
+            return self._http_post(url, body)
 
-    def save_docs(self, docs, all_or_nothing=False, raise_error=True):
-        '''Save/create multiple documents. Returns a list of dicts with id
-           and rev of the saved docs.'''
+    def save_docs(self, docs, all_or_nothing=False):
+        '''Save/create multiple documents.
+        Returns a list of dicts with id and rev of the saved docs.'''
         # use bulk docs API to update the docs
-        url = ''.join(['/', self.db_name, '/_bulk_docs'])
+        url = '/{0}/_bulk_docs'.format(self.db_name)
         body = json_encode({'all_or_nothing': all_or_nothing, 'docs': docs})
-        return self._http_post(url, body, raise_error=raise_error)
-        
-    def delete_doc(self, doc, raise_error=True):
+        return self._http_post(url, body)
+
+    def delete_doc(self, doc):
         '''Delete a document'''
         if '_rev' not in doc or '_id' not in doc:
-            raise KeyError('No id or revision information in doc')
-        url = ''.join(['/', self.db_name, '/', url_escape(doc['_id']), '?rev=', doc['_rev']])
-        return self._http_delete(url, raise_error=raise_error)
+            raise KeyError('Missing id or revision information in doc')
+        url = '/{0}/{1}?rev={2}'.format(self.db_name, url_escape(doc['_id']),
+                doc['_rev'])
+        return self._http_delete(url)
 
-    def delete_docs(self, docs, all_or_nothing=False, raise_error=True):
+    def delete_docs(self, docs, all_or_nothing=False):
         '''Delete multiple documents'''
         if any('_rev' not in doc or '_id' not in doc for doc in docs):
-            raise KeyError('No id or revision information in one or more docs')
-        # mark docs as deleted
-        deleted = {'_deleted': True}
-        [doc.update(deleted) for doc in docs]
+            raise KeyError('Missing id or revision information in one or more '
+                    'docs')
+        # make list of docs to mark as deleted
+        deleted = [{'_id': doc['_id'], '_rev': doc['_rev'], '_deleted': True}
+                for doc in docs]
         # use bulk docs API to update the docs
-        url = ''.join(['/', self.db_name, '/_bulk_docs'])
-        body = json_encode({'all_or_nothing': all_or_nothing, 'docs': docs})
-        return self._http_post(url, body, raise_error=raise_error)
+        url = '/{0}/_bulk_docs'.format(self.db_name)
+        body = json_encode({'all_or_nothing': all_or_nothing, 'docs': deleted})
+        return self._http_post(url, body)
 
-    def get_attachment(self, doc, attachment_name, mimetype=None, raise_error=True):
-        '''Open a document attachment. The doc should at least contain an _id key.
-           If mimetype is not specified, the doc shall contain _attachments key with
-           info about the named attachment.'''
+    def get_attachment(self, doc, attachment_name, mimetype=None):
+        '''Get document attachment.
+        The parameter `doc` should at least contain an `_id` key.
+        If mimetype is not specified, `doc` shall contain an `_attachments`
+        key with info about the named attachment.'''
         if '_id' not in doc:
             raise ValueError('Missing key named _id in doc')
         if not mimetype:
             # get mimetype from the doc
             if '_attachments' not in doc:
-                raise ValueError('No attachments in doc, cannot get content type of attachment')
+                raise ValueError('No attachments in doc, cannot get content '
+                        'type of attachment')
             elif attachment_name not in doc['_attachments']:
-                raise ValueError('Document does not have an attachment by the given name')
+                raise ValueError('Document does not have an attachment by the '
+                        'given name')
             else:
                 mimetype = doc['_attachments'][attachment_name]['content_type']
-        url = ''.join(['/', self.db_name, '/', url_escape(doc['_id']), '/',
-                       url_escape(attachment_name)])
+        url = '/{0}/{1}/{2}'.format(self.db_name, url_escape(doc['_id']),
+                url_escape(attachment_name))
         headers = {'Accept': mimetype}
-        return self._http_get(url, headers=headers, raise_error=raise_error)
+        return self._http_get(url, headers=headers)
 
-    def save_attachment(self, doc, attachment, raise_error=True):
-        '''Save an attachment to the specified doc. The attachment shall be
-        a dict with keys: mimetype, name, data. The doc shall be a dict, at
-        least having the key _id, and if doc is existing in the database,
-        it shall also contain the key _rev'''
-        if any(key not in attachment for key in ['mimetype', 'name', 'data']):
-            raise KeyError('Attachment dict is missing one or more required keys')
-        if '_rev' in doc:
-            q = ''.join(['?rev=', doc['_rev']])
-        else:
-            q = ''
-        url = ''.join(['/', self.db_name, '/', url_escape(doc['_id']), '/',
-                       url_escape(attachment['name']), q])
+    def save_attachment(self, doc, attachment):
+        '''Save an attachment to the specified doc.
+        The attachment shall be a dict with keys: `mimetype`, `name`, `data`.
+        The doc shall be a dict, at least having the key `_id`, and if doc is
+        existing in the database, it shall also contain the key `_rev`'''
+        if any(key not in attachment for key in ('mimetype', 'name', 'data')):
+            raise KeyError('Attachment dict is missing one or more required '
+                    'keys')
+        url = '/{0}/{1}/{2}{3}'.format(self.db_name, url_escape(doc['_id']),
+                url_escape(attachment['name']),
+                '?rev={0}'.format(doc['_rev']) if '_rev' in doc else '')
         headers = {'Content-Type': attachment['mimetype']}
         body = attachment['data']
-        return self._http_put(url, body, headers=headers, raise_error=raise_error)
+        return self._http_put(url, body, headers=headers)
 
-    def delete_attachment(self, doc, attachment_name, raise_error=True):
-        '''Delete an attachment to the specified doc. The attatchment shall be
-           a dict with keys: mimetype, name, data. The doc shall be a dict, at
-           least with the keys: _id and _rev'''
+    def delete_attachment(self, doc, attachment_name):
+        '''Delete a named attachment to the specified doc.
+        The doc shall be a dict, at least with the keys: _id and _rev'''
         if '_rev' not in doc or '_id' not in doc:
-            raise KeyError('No id or revision information in doc')
-        url = ''.join(['/', self.db_name, '/', url_escape(doc['_id']), '/',
-                       attachment_name, '?rev=', doc['_rev']])
-        return self._http_delete(url, raise_error=raise_error)
+            raise KeyError('Missing id or revision information in doc')
+        url = '/{0}/{1}/{2}?rev={3}'.format(self.db_name,
+                url_escape(doc['_id']), url_escape(attachment_name),
+                doc['_rev'])
+        return self._http_delete(url)
 
-    def view(self, design_doc_name, view_name, raise_error=True, **kwargs):
+    def view(self, design_doc_name, view_name, **kwargs):
         '''Query a pre-defined view in the specified design doc.
         The following query parameters can be specified as keyword arguments.
 
-        Limit query results to those with the specified key or list of keys
+        Limit query results to those with the specified key or list of keys:
           key=<key-value>
           keys=<list of keys>
-          
-        Limit query results to those following the specified startkey
+
+        Limit query results to those following the specified startkey:
           startkey=<key-value>
-          
-        First document id to include in the output
+
+        First document id to include in the output:
           startkey_docid=<document id>
-          
-        Limit query results to those previous to the specified endkey
+
+        Limit query results to those previous to the specified endkey:
           endkey=<key-value>
-          
-        Last document id to include in the output
+
+        Last document id to include in the output:
           endkey_docid=<document id>
-          
-        Limit the number of documents in the output
+
+        Limit the number of documents in the output:
           limit=<number of docs>
-          
-        If stale=ok is set CouchDB will not refresh the view even if it is stalled.
-          stale=ok
-          
-        Reverse the output (default is false). Note that the descending option is
-        applied before any key filtering, so you may need to swap the values of the
-        startkey and endkey options to get the expected results.
+
+        Prevent CouchDB from refreshing a stale view:
+          stale='ok'
+          stale='update_after'
+
+        Reverse the output:
           descending=true
-          descending=false
-          
+          descending=false  (default value)
+
+        Note that the descending option is applied before any key filtering, so
+        you may need to swap the values of the startkey and endkey options to
+        get the expected results.
+
         Skip the specified number of docs in the query results:
-          skip=<number>
-          
-        The group option controls whether the reduce function reduces to a set of
-        distinct keys or to a single result row:
+          skip=<number>  (default value is 0)
+
+        The group option controls whether the reduce function reduces to a set
+        of distinct keys or to a single result row:
           group=true
-          group=false
-        
+          group=false  (default value)
+
           group_level=<number>
-          
-        Use the reduce function of the view. It defaults to true, if a reduce
-        function is defined and to false otherwise.
-          reduce=true
+
+        Use the reduce function of the view:
+          reduce=true  (default value)
           reduce=false
-        
+
+        Note that default value of reduce is true, only if a reduce function is
+        defined for the view.
+
         Automatically fetch and include the document which emitted each view
-        entry (default is false).
+        entry:
           include_docs=true
-          include_docs=false
-        
-        Controls whether the endkey is included in the result. It defaults to true.
-          inclusive_end=true
+          include_docs=false  (default value)
+
+        Determine whether the endkey is included in the result:
+          inclusive_end=true  (default value)
           inclusive_end=false
         '''
-        url = ''.join(['/', self.db_name, '/_design/', design_doc_name, '/_view/', view_name])
-        return self._view(url, raise_error=raise_error, **kwargs)
+        url = '/{0}/_design/{1}/_view/{2}'.format(self.db_name,
+                design_doc_name, view_name)
+        return self._view(url, **kwargs)
 
-    def view_all_docs(self, raise_error=True, **kwargs):
+    def view_all_docs(self, **kwargs):
         '''Query the _all_docs view.
-        Accepts same keyword parameters as view()
+        Accepts the same keyword parameters as `view()`.
         '''
-        url = ''.join(['/', self.db_name, '/_all_docs'])
-        return self._view(url, raise_error=raise_error, **kwargs)
+        url = '/{0}/_all_docs'.format(self.db_name)
+        return self._view(url, **kwargs)
 
-    def _view(self, url, raise_error=True, **kwargs):
+    def _view(self, url, **kwargs):
         body = None
         options = []
         if kwargs:
                     value = url_escape(json_encode(value))
                     options.append('='.join([key, value]))
         if options:
-            url = ''.join([url, '?', '&'.join(options)])
+            url = '{0}?{1}'.format(url, '&'.join(options))
         if body:
-            return self._http_post(url, body, raise_error=raise_error)
+            return self._http_post(url, body)
         else:
-            return self._http_get(url, raise_error=raise_error)
+            return self._http_get(url)
 
+    #
+    # Basic http methods and utility functions
+    #
 
-    # Basic http methods and utility functions
+    def _parse_response(self, resp):
+        # decode the JSON body and check for errors
+        obj = json_decode(resp.body)
 
-    def _parse_response(self, resp, raise_error=True):
-        # the JSON body and check for errors
-        obj = json_decode(resp.body)
-        if raise_error:
-            if 'error' in obj:
-                raise relax_exception(httpclient.HTTPError(resp.code, obj['reason'], resp))
-            elif isinstance(obj, list):
-                # check if there is an error in the list of dicts, raise the first error seen
-                for item in obj:
-                    if 'error' in item:
-                        raise relax_exception(httpclient.HTTPError(resp.code, item['reason'], resp))
-            elif 'rows' in obj:
-                # check if there is an error in the result rows, raise the first error seen
-                for row in obj['rows']:
-                    if 'error' in row:
-                        raise relax_exception(httpclient.HTTPError(resp.code, row['error'], resp))
+        if isinstance(obj, list):
+            # check if there is an error in the list of dicts,
+            # raise the first error seen
+            for item in obj:
+                if 'error' in item:
+                    raise relax_exception(httpclient.HTTPError(
+                            resp.code if item['error'] != 'not_found' else 404,
+                            item['reason'], resp))
+
+        elif 'error' in obj:
+            raise relax_exception(httpclient.HTTPError(resp.code,
+                    obj['reason'], resp))
+
+        elif 'rows' in obj:
+            # check if there is an error in the result rows,
+            # raise the first error seen
+            for row in obj['rows']:
+                if 'error' in row:
+                    raise relax_exception(httpclient.HTTPError(
+                            resp.code if row['error'] != 'not_found' else 404,
+                            row['error'], resp))
         return obj
 
-    def _fetch(self, request, raise_error, decode=True):
+    def _fetch(self, request, decode=True):
         try:
             resp = self.client.fetch(request)
         except httpclient.HTTPError as e:
-            if raise_error:
-                raise relax_exception(e)
-            else:
-                return json_decode(e.response.body)
+            raise relax_exception(e)
+        return self._parse_response(resp) if decode else resp.body
 
-        if decode:
-            return self._parse_response(resp, raise_error)
-        else:
-            return resp.body
-
-    def _http_get(self, uri, headers=None, raise_error=True):
-        if not isinstance(headers, dict):
+    def _http_get(self, uri, headers=None):
+        if headers is None:
             headers = {}
-        if 'Accept' not in headers:
-            headers['Accept'] = 'application/json'
+        req_args = copy.deepcopy(self.request_args)
+        req_args.setdefault('headers', {}).update(headers)
+        if 'Accept' not in req_args['headers']:
+            req_args['headers']['Accept'] = 'application/json'
             decode = True
         else:
-            # not a JSON response, don't try to decode 
+            # not a JSON response, don't try to decode
             decode = False
-        r = httpclient.HTTPRequest(self.couch_url + uri, method='GET',
-                                   headers=headers, use_gzip=False)
-        return self._fetch(r, raise_error, decode)
+        req = httpclient.HTTPRequest(self.couch_url + uri, method='GET',
+                **req_args)
+        return self._fetch(req, decode)
 
-    def _http_post(self, uri, body, doc=None, raise_error=True, **kwargs):
-        headers = {'Accept': 'application/json',
-                   'Content-Type': 'application/json'}
-        r = httpclient.HTTPRequest(self.couch_url + uri, method='POST',
-                                   headers=headers, body=body,
-                                   use_gzip=False, **kwargs)
-        return self._fetch(r, raise_error)
+    def _http_post(self, uri, body, **kwargs):
+        req_args = copy.deepcopy(self.request_args)
+        req_args.update(kwargs)
+        req_args.setdefault('headers', {}).update({
+                'Accept': 'application/json',
+                'Content-Type': 'application/json'})
+        req = httpclient.HTTPRequest(self.couch_url + uri, method='POST',
+                body=body, **req_args)
+        return self._fetch(req)
 
-    def _http_put(self, uri, body='', headers=None, doc=None, raise_error=True):
-        if not isinstance(headers, dict):
+    def _http_put(self, uri, body='', headers=None):
+        if headers is None:
             headers = {}
-        if body and 'Content-Type' not in headers:
-            headers['Content-Type'] = 'application/json'
-        if 'Accept' not in headers:
-            headers['Accept'] = 'application/json'
-        r = httpclient.HTTPRequest(self.couch_url + uri, method='PUT',
-                                   headers=headers, body=body, use_gzip=False)
-        return self._fetch(r, raise_error)
+        req_args = copy.deepcopy(self.request_args)
+        req_args.setdefault('headers', {}).update(headers)
+        if body and 'Content-Type' not in req_args['headers']:
+            req_args['headers']['Content-Type'] = 'application/json'
+        if 'Accept' not in req_args['headers']:
+            req_args['headers']['Accept'] = 'application/json'
+        req = httpclient.HTTPRequest(self.couch_url + uri, method='PUT',
+                body=body, **req_args)
+        return self._fetch(req)
 
-    def _http_delete(self, uri, raise_error=True):
-        r = httpclient.HTTPRequest(self.couch_url + uri, method='DELETE',
-                                   headers={'Accept': 'application/json'},
-                                   use_gzip=False)
-        return self._fetch(r, raise_error)
-
+    def _http_delete(self, uri):
+        req_args = copy.deepcopy(self.request_args)
+        req_args.setdefault('headers', {}).update({
+                'Accept': 'application/json'})
+        req = httpclient.HTTPRequest(self.couch_url + uri, method='DELETE',
+                **req_args)
+        return self._fetch(req)
 
 
 class AsyncCouch(object):
-    '''Basic wrapper class for asynchronous operations on a CouchDB'''
+    '''Basic wrapper class for asynchronous operations on a CouchDB
 
-    def __init__(self, db_name, host='localhost', port=5984):
-        self.couch_url = 'http://{0}:{1}'.format(host, port)
+    Example usage::
+
+        import couch
+
+        class TestCouch(object):
+
+            def __init_(self):
+                self.db = couch.AsyncCouch('mydatabase')
+                self.db.create_db(self.dbcreated)
+
+            def dbcreated(self, r):
+                self.db.save_doc({'msg': 'My first document'}, self.docsaved)
+
+            def docsaved(self, r):
+                self.db.get_doc(r['id'], self.gotdoc)
+
+            def gotdoc(self, doc):
+                self.db.delete_doc(doc)
+
+    For any methods of this class: If an error is returned from the database,
+    the argument to the callback will contain the appropriate CouchException.
+    '''
+
+    def __init__(self, db_name='', couch_url='http://127.0.0.1:5984',
+            **request_args):
+        '''Creates an `AsyncCouch`.
+
+        All parameters are optional. Though `db_name` is required for most
+        methods to work.
+
+        :arg string db_name: Database name
+        :arg string couch_url: The url to the CouchDB including port number,
+            but without authentication credentials.
+        :arg keyword request_args: Arguments applied when making requests to
+            the database. This may include `auth_username` and `auth_password`
+            for basic authentication. See `httpclient.HTTPRequest` for other
+            possible arguments.
+            By default `use_gzip` is set to False. Accessing a non-local
+            CouchDB it may be relevant to set `use_gzip` to True.
+        '''
+        request_args.update({'use_gzip': False})
+        self.request_args = request_args
         self.client = httpclient.AsyncHTTPClient()
+        self.couch_url = couch_url
         self.db_name = db_name
 
-
+    #
     # Database operations
+    #
 
     def create_db(self, callback=None):
         '''Creates a new database'''
-        self._http_put(''.join(['/', self.db_name, '/']), '', callback=callback)
+        self._http_put('/' + self.db_name, callback=callback)
 
     def delete_db(self, callback=None):
         '''Deletes the database'''
-        self._http_delete(''.join(['/', self.db_name, '/']), callback=callback)
+        self._http_delete('/' + self.db_name, callback=callback)
 
-    def list_dbs(self, callback=None):
-        '''List the databases on the server'''
+    def list_dbs(self, callback):
+        '''List names of databases'''
         self._http_get('/_all_dbs', callback=callback)
 
-    def info_db(self, callback=None):
+    def info_db(self, callback):
         '''Get info about the database'''
-        self._http_get(''.join(['/', self.db_name, '/']), callback=callback)
+        self._http_get('/' + self.db_name, callback=callback)
 
     def pull_db(self, source, callback=None, create_target=False):
-        '''Replicate changes from a source database to current (target) db'''
-        body = json_encode({'source': source, 'target': self.db_name, 'create_target': create_target})
-        self._http_post('/_replicate', body, callback=callback, connect_timeout=120.0, request_timeout=120.0)
+        '''Replicate changes from a source database to current (target)
+        database'''
+        body = json_encode({'source': source, 'target': self.db_name,
+                'create_target': create_target})
+        self._http_post('/_replicate', body, callback=callback,
+                request_timeout=120.0)
 
-    def uuids(self, count=1, callback=None):
-        def uuids_cb(resp):
-            if callback:
-                if isinstance(resp, Exception):
-                    callback(resp)
-                else:
-                    callback(resp['uuids'])
-        if count > 1:
-            url = ''.join(['/_uuids?count=', str(count)])
-        else:
-            url = '/_uuids'
-        self._http_get(url, callback=uuids_cb)
+    def uuids(self, callback, count=1):
+        '''Get one or more uuids'''
+        cb = lambda r: callback(r if isinstance(r, Exception) else r['uuids'])
+        self._http_get('/_uuids?count={0}'.format(count), callback=cb)
 
+    #
+    # Document operations
+    #
 
-    # Document operations
-    
-    def get_doc(self, doc_id, callback=None):
-        '''Open a document with the given id'''
-        url = ''.join(['/', self.db_name, '/', url_escape(doc_id)])
+    def get_doc(self, doc_id, callback):
+        '''Get document with the given `doc_id`.'''
+        url = '/{0}/{1}'.format(self.db_name, url_escape(doc_id))
         self._http_get(url, callback=callback)
 
-    def get_docs(self, doc_ids, callback=None):
-        '''Get multiple documents with the given id's'''
-        url = ''.join(['/', self.db_name, '/_all_docs?include_docs=true'])
+    def get_docs(self, doc_ids, callback):
+        '''Get multiple documents with the given list of `doc_ids`.
+
+        Calls back with a list containing the documents, in same order as the
+        provided document id's.
+
+        If one or more documents are not found in the database, the call back
+        will get an exception.
+        '''
+        url = '/{0}/_all_docs?include_docs=true'.format(self.db_name)
         body = json_encode({'keys': doc_ids})
+
         def get_docs_cb(resp):
             if isinstance(resp, Exception):
                 callback(resp)
             else:
-                callback([row['doc'] if 'doc' in row else row for row in resp['rows']])
+                callback([row['doc'] for row in resp['rows']])
         self._http_post(url, body, callback=get_docs_cb)
 
     def save_doc(self, doc, callback=None):
         '''Save/create a document to/in a given database. Calls back with
-           a dict with id and rev of the saved doc.'''
+        a dict with id and rev of the saved doc.'''
         body = json_encode(doc)
         if '_id' in doc and '_rev' in doc:
-            url = ''.join(['/', self.db_name, '/', url_escape(doc['_id'])])
-            self._http_put(url, body, doc=doc, callback=callback)
+            # update an existing document
+            url = '/{0}/{1}'.format(self.db_name, url_escape(doc['_id']))
+            self._http_put(url, body, callback=callback)
         else:
-            url = ''.join(['/', self.db_name])
-            self._http_post(url, body, doc=doc, callback=callback)
+            # save a new document
+            url = '/' + self.db_name
+            self._http_post(url, body, callback=callback)
 
     def save_docs(self, docs, callback=None, all_or_nothing=False):
-        '''Save/create multiple documents. Calls back with a list of dicts with id
-           and rev of the saved docs.'''
+        '''Save/create multiple documents.
+        Calls back with a list of dicts with id and rev of the saved docs.'''
         # use bulk docs API to update the docs
-        url = ''.join(['/', self.db_name, '/_bulk_docs'])
+        url = '/{0}/_bulk_docs'.format(self.db_name)
         body = json_encode({'all_or_nothing': all_or_nothing, 'docs': docs})
         self._http_post(url, body, callback=callback)
 
     def delete_doc(self, doc, callback=None):
         '''Delete a document'''
         if '_rev' not in doc or '_id' not in doc:
-            callback(KeyError('No id or revision information in doc'))
+            callback(KeyError('Missing id or revision information in doc'))
         else:
-            url = ''.join(['/', self.db_name, '/', url_escape(doc['_id']), '?rev=', doc['_rev']])
+            url = '/{0}/{1}?rev={2}'.format(self.db_name,
+                    url_escape(doc['_id']), doc['_rev'])
             self._http_delete(url, callback=callback)
 
     def delete_docs(self, docs, callback=None, all_or_nothing=False):
         '''Delete multiple documents'''
         if any('_rev' not in doc or '_id' not in doc for doc in docs):
-            callback(KeyError('No id or revision information in one or more docs'))
+            callback(KeyError('Missing id or revision information in one or '
+                    'more docs'))
         else:
-            # mark docs as deleted
-            map(lambda doc: doc.update({'_deleted': True}), docs)
+            # make list of docs to mark as deleted
+            deleted = [{'_id': doc['_id'], '_rev': doc['_rev'],
+                    '_deleted': True} for doc in docs]
             # use bulk docs API to update the docs
-            url = ''.join(['/', self.db_name, '/_bulk_docs'])
-            body = json_encode({'all_or_nothing': all_or_nothing, 'docs': docs})
+            url = '/{0}/_bulk_docs'.format(self.db_name)
+            body = json_encode({'all_or_nothing': all_or_nothing,
+                    'docs': deleted})
             self._http_post(url, body, callback=callback)
-        
-    def get_attachment(self, doc, attachment_name, mimetype=None, callback=None):
-        '''Open a document attachment. The doc should at least contain an _id key.
-           If mimetype is not specified, the doc shall contain _attachments key with
-           info about the named attachment.'''
+
+    def get_attachment(self, doc, attachment_name, mimetype=None,
+            callback=None):
+        '''Get document attachment.
+        The parameter `doc` should at least contain an `_id` key.
+        If mimetype is not specified, `doc` shall contain an `_attachments`
+        key with info about the named attachment.'''
         if '_id' not in doc:
             callback(ValueError('Missing key named _id in doc'))
         if not mimetype:
             # get mimetype from the doc
             if '_attachments' not in doc:
-                callback(ValueError('No attachments in doc, cannot get content type of attachment'))
+                callback(ValueError('No attachments in doc, cannot get content'
+                        ' type of attachment'))
             elif attachment_name not in doc['_attachments']:
-                callback(ValueError('Document does not have an attachment by the given name'))
+                callback(ValueError('Document does not have an attachment by'
+                        ' the given name'))
             else:
                 mimetype = doc['_attachments'][attachment_name]['content_type']
-            url = ''.join(['/', self.db_name, '/', url_escape(doc['_id']), '/',
-                           url_escape(attachment_name)])
-            headers = {'Accept': mimetype}
-            self._http_get(url, headers=headers, callback=callback)
+        url = '/{0}/{1}/{2}'.format(self.db_name, url_escape(doc['_id']),
+                url_escape(attachment_name))
+        headers = {'Accept': mimetype}
+        self._http_get(url, headers=headers, callback=callback)
 
     def save_attachment(self, doc, attachment, callback=None):
-        '''Save an attachment to the specified doc. The attachment shall be
-        a dict with keys: mimetype, name, data. The doc shall be a dict, at
-        least having the key _id, and if doc is existing in the database,
-        it shall also contain the key _rev'''
+        '''Save an attachment to the specified doc.
+        The attachment shall be a dict with keys: `mimetype`, `name`, `data`.
+        The doc shall be a dict, at least having the key `_id`, and if doc is
+        existing in the database, it shall also contain the key `_rev`'''
         if any(key not in attachment for key in ['mimetype', 'name', 'data']):
-            callback(KeyError('Attachment dict is missing one or more required keys'))
+            callback(KeyError('Attachment dict is missing one or more '
+                    'required keys'))
         else:
-            if '_rev' in doc:
-                q = ''.join(['?rev=', doc['_rev']])
-            else:
-                q = ''
-            url = ''.join(['/', self.db_name, '/', url_escape(doc['_id']), '/',
-                           url_escape(attachment['name']), q])
+            url = '/{0}/{1}/{2}{3}'.format(self.db_name,
+                    url_escape(doc['_id']), url_escape(attachment['name']),
+                    '?rev={0}'.format(doc['_rev']) if '_rev' in doc else '')
             headers = {'Content-Type': attachment['mimetype']}
-            self._http_put(url, body=attachment['data'], headers=headers, callback=callback)
+            body = attachment['data']
+            self._http_put(url, body, headers=headers, callback=callback)
 
     def delete_attachment(self, doc, attachment_name, callback=None):
-        '''Delete an attachment to the specified doc. The attatchment shall be
-           a dict with keys: mimetype, name, data. The doc shall be a dict, at
-           least with the keys: _id and _rev'''
+        '''Delete a named attachment to the specified doc.
+        The doc shall be a dict, at least with the keys: _id and _rev'''
         if '_rev' not in doc or '_id' not in doc:
-            callback(KeyError('No id or revision information in doc'))
-        url = ''.join(['/', self.db_name, '/', url_escape(doc['_id']), '/',
-                       attachment_name, '?rev=', doc['_rev']])
+            callback(KeyError('Missing id or revision information in doc'))
+        else:
+            url = '/{0}/{1}/{2}?rev={3}'.format(self.db_name,
+                    url_escape(doc['_id']), url_escape(attachment_name),
+                    doc['_rev'])
         self._http_delete(url, callback=callback)
 
-    def view(self, design_doc_name, view_name, callback=None, **kwargs):
+    def view(self, design_doc_name, view_name, callback, **kwargs):
         '''Query a pre-defined view in the specified design doc.
         The following query parameters can be specified as keyword arguments.
 
-        Limit query results to those with the specified key or list of keys
+        Limit query results to those with the specified key or list of keys:
           key=<key-value>
           keys=<list of keys>
-          
-        Limit query results to those following the specified startkey
+
+        Limit query results to those following the specified startkey:
           startkey=<key-value>
-          
-        First document id to include in the output
+
+        First document id to include in the output:
           startkey_docid=<document id>
-          
-        Limit query results to those previous to the specified endkey
+
+        Limit query results to those previous to the specified endkey:
           endkey=<key-value>
-          
-        Last document id to include in the output
+
+        Last document id to include in the output:
           endkey_docid=<document id>
-          
-        Limit the number of documents in the output
+
+        Limit the number of documents in the output:
           limit=<number of docs>
-          
-        If stale=ok is set CouchDB will not refresh the view even if it is stalled.
-          stale=ok
-          
-        Reverse the output (default is false). Note that the descending option is
-        applied before any key filtering, so you may need to swap the values of the
-        startkey and endkey options to get the expected results.
+
+        Prevent CouchDB from refreshing a stale view:
+          stale='ok'
+          stale='update_after'
+
+        Reverse the output:
           descending=true
-          descending=false
-          
+          descending=false  (default value)
+
+        Note that the descending option is applied before any key filtering, so
+        you may need to swap the values of the startkey and endkey options to
+        get the expected results.
+
         Skip the specified number of docs in the query results:
-          skip=<number>
-          
-        The group option controls whether the reduce function reduces to a set of
-        distinct keys or to a single result row:
+          skip=<number>  (default value is 0)
+
+        The group option controls whether the reduce function reduces to a set
+        of distinct keys or to a single result row:
           group=true
-          group=false
-        
+          group=false  (default value)
+
           group_level=<number>
-          
-        Use the reduce function of the view. It defaults to true, if a reduce
-        function is defined and to false otherwise.
-          reduce=true
+
+        Use the reduce function of the view:
+          reduce=true  (default value)
           reduce=false
-        
+
+        Note that default value of reduce is true, only if a reduce function is
+        defined for the view.
+
         Automatically fetch and include the document which emitted each view
-        entry (default is false).
+        entry:
           include_docs=true
-          include_docs=false
-        
-        Controls whether the endkey is included in the result. It defaults to true.
-          inclusive_end=true
+          include_docs=false  (default value)
+
+        Determine whether the endkey is included in the result:
+          inclusive_end=true  (default value)
           inclusive_end=false
         '''
-        url = ''.join(['/', self.db_name, '/_design/', design_doc_name, '/_view/', view_name])
+        url = '/{0}/_design/{1}/_view/{2}'.format(self.db_name,
+                design_doc_name, view_name)
         self._view(url, callback=callback, **kwargs)
 
-    def view_all_docs(self, callback=None, **kwargs):
+    def view_all_docs(self, callback, **kwargs):
         '''Query the _all_docs view.
-        Accepts same keyword parameters as view()
+        Accepts the same keyword parameters as `view()`.
         '''
-        url = ''.join(['/', self.db_name, '/_all_docs'])
+        url = '/{0}/_all_docs'.format(self.db_name)
         self._view(url, callback=callback, **kwargs)
 
     def _view(self, url, callback=None, **kwargs):
                     value = url_escape(json_encode(value))
                     options.append('='.join([key, value]))
         if options:
-            url = ''.join([url, '?', '&'.join(options)])
+            url = '{0}?{1}'.format(url, '&'.join(options))
         if body:
             self._http_post(url, body, callback=callback)
         else:
             self._http_get(url, callback=callback)
-        
-    # Basic http methods
+
+    #
+    # Basic http methods and utility functions
+    #
+
+    def _parse_response(self, resp, callback):
+        # decode the JSON body and check for errors
+        obj = json_decode(resp.body)
+
+        if isinstance(obj, list):
+            # check if there is an error in the list of dicts,
+            # raise the first error seen
+            for item in obj:
+                if 'error' in item:
+                    callback(relax_exception(httpclient.HTTPError(
+                            resp.code if item['error'] != 'not_found' else 404,
+                            item['reason'], resp)))
+                    return
+
+        elif 'error' in obj:
+            callback(relax_exception(httpclient.HTTPError(resp.code,
+                    obj['reason'], resp)))
+            return
+
+        elif 'rows' in obj:
+            # check if there is an error in the result rows,
+            # raise the first error seen
+            for row in obj['rows']:
+                if 'error' in row:
+                    callback(relax_exception(httpclient.HTTPError(
+                            resp.code if row['error'] != 'not_found' else 404,
+                            row['error'], resp)))
+                    return
+        callback(obj)
 
     def _http_callback(self, resp, callback, decode=True):
         if not callback:
             # error, with no response body, call back with exception
             callback(relax_exception(resp.error))
         elif decode:
-            # decode the JSON body and pass to the user callback function
-            obj = json_decode(resp.body)
-            if 'error' in obj:
-                callback(relax_exception(httpclient.HTTPError(resp.code, obj['reason'], resp)))
-            else:
-                callback(obj)
+            # decode JSON response body and pass it to the callback function
+            self._parse_response(resp, callback)
         else:
             # pass the response body directly to the user callback function
             callback(resp.body)
 
     def _http_get(self, uri, headers=None, callback=None):
-        if not isinstance(headers, dict):
+        if headers is None:
             headers = {}
-        if 'Accept' not in headers:
-            headers['Accept'] = 'application/json'
+        req_args = copy.deepcopy(self.request_args)
+        req_args.setdefault('headers', {}).update(headers)
+        if 'Accept' not in req_args['headers']:
+            req_args['headers']['Accept'] = 'application/json'
             decode = True
         else:
-            # user callback shall take perform decoding, as required
+            # not a JSON response, don't try to decode
             decode = False
-        r = httpclient.HTTPRequest(self.couch_url + uri, method='GET',
-                                   headers=headers, use_gzip=False)
-        
+        req = httpclient.HTTPRequest(self.couch_url + uri, method='GET',
+                **req_args)
         cb = lambda resp: self._http_callback(resp, callback, decode=decode)
-        self.client.fetch(r, cb)
+        self.client.fetch(req, cb)
 
-    def _http_post(self, uri, body, doc=None, callback=None, **kwargs):
-        headers = {'Accept': 'application/json',
-                   'Content-Type': 'application/json'}
-        r = httpclient.HTTPRequest(self.couch_url + uri, method='POST',
-                                   headers=headers, body=body,
-                                   use_gzip=False, **kwargs)
-        self.client.fetch(r, lambda resp: self._http_callback(resp, callback,
-                                                              doc=doc))
+    def _http_post(self, uri, body, callback=None, **kwargs):
+        req_args = copy.deepcopy(self.request_args)
+        req_args.update(kwargs)
+        req_args.setdefault('headers', {}).update({
+                'Accept': 'application/json',
+                'Content-Type': 'application/json'})
+        req = httpclient.HTTPRequest(self.couch_url + uri, method='POST',
+                body=body, **req_args)
+        cb = lambda resp: self._http_callback(resp, callback)
+        self.client.fetch(req, cb)
 
-    def _http_put(self, uri, body, headers=None, callback=None, doc=None):
-        if not isinstance(headers, dict):
+    def _http_put(self, uri, body='', headers=None, callback=None):
+        if headers is None:
             headers = {}
-        if 'Content-Type' not in headers and len(body) > 0:
-            headers['Content-Type'] = 'application/json'
-        if 'Accept' not in headers:
-            headers['Accept'] = 'application/json'
-        r = httpclient.HTTPRequest(self.couch_url + uri, method='PUT',
-                                   headers=headers, body=body, use_gzip=False)
-        self.client.fetch(r, lambda resp: self._http_callback(resp, callback,
-                                                              doc=doc))
+        req_args = copy.deepcopy(self.request_args)
+        req_args.setdefault('headers', {}).update(headers)
+        if body and 'Content-Type' not in req_args['headers']:
+            req_args['headers']['Content-Type'] = 'application/json'
+        if 'Accept' not in req_args['headers']:
+            req_args['headers']['Accept'] = 'application/json'
+        req = httpclient.HTTPRequest(self.couch_url + uri, method='PUT',
+                body=body, **req_args)
+        cb = lambda resp: self._http_callback(resp, callback)
+        self.client.fetch(req, cb)
 
     def _http_delete(self, uri, callback=None):
-        r = httpclient.HTTPRequest(self.couch_url + uri, method='DELETE',
-                                   headers={'Accept': 'application/json'},
-                                   use_gzip=False)
-        self.client.fetch(r, lambda resp: self._http_callback(resp, callback))
+        req_args = copy.deepcopy(self.request_args)
+        req_args.setdefault('headers', {}).update({
+                'Accept': 'application/json'})
+        req = httpclient.HTTPRequest(self.couch_url + uri, method='DELETE',
+                **req_args)
+        cb = lambda resp: self._http_callback(resp, callback)
+        self.client.fetch(req, cb)
 
 
 class CouchException(httpclient.HTTPError):
     '''Base class for Couch specific exceptions'''
-    def __init__(self, HTTPError, msg):
-        httpclient.HTTPError.__init__(self, HTTPError.code, msg, HTTPError.response)
+    def __init__(self, HTTPError, msg=None):
+        httpclient.HTTPError.__init__(self, HTTPError.code,
+                msg or HTTPError.message, HTTPError.response)
+
 
 class NotModified(CouchException):
     '''HTTP Error 304 (Not Modified)'''
     def __init__(self, HTTPError):
-        CouchException.__init__(self, HTTPError, 'The document has not been modified since the last update.')
+        CouchException.__init__(self, HTTPError,
+                'The document has not been modified since the last update.')
+
 
 class BadRequest(CouchException):
     '''HTTP Error 400 (Bad Request)'''
     def __init__(self, HTTPError):
-        CouchException.__init__(self, HTTPError, 'The syntax of the request was invalid or could not be processed.')
+        CouchException.__init__(self, HTTPError, 'The syntax of the request '
+                'was invalid or could not be processed.')
+
 
 class NotFound(CouchException):
     '''HTTP Error 404 (Not Found)'''
     def __init__(self, HTTPError):
-        CouchException.__init__(self, HTTPError, 'The requested resource was not found.')
+        CouchException.__init__(self, HTTPError,
+                'The requested resource was not found.')
+
 
 class MethodNotAllowed(CouchException):
     '''HTTP Error 405 (Method Not Allowed)'''
     def __init__(self, HTTPError):
-        CouchException.__init__(self, HTTPError, 'The request was made using an incorrect request method; '
-                                'for example, a GET was used where a POST was required.')
+        CouchException.__init__(self, HTTPError, 'The request was made using '
+                'an incorrect request method; for example, a GET was used '
+                'where a POST was required.')
+
 
 class Conflict(CouchException):
     '''HTTP Error 409 (Conflict)'''
     def __init__(self, HTTPError):
-        CouchException.__init__(self, HTTPError, 'The request failed because of a database conflict.')
+        CouchException.__init__(self, HTTPError, 'The request failed because '
+                'of a database conflict.')
+
 
 class PreconditionFailed(CouchException):
     '''HTTP Error 412 (Precondition Failed)'''
     def __init__(self, HTTPError):
-        CouchException.__init__(self, HTTPError, 'Could not create database - a database with that name already exists.')
+        CouchException.__init__(self, HTTPError, 'Could not create database - '
+                'a database with that name already exists.')
+
 
 class InternalServerError(CouchException):
     '''HTTP Error 500 (Internal Server Error)'''
     def __init__(self, HTTPError):
-        CouchException.__init__(self, HTTPError, 'The request was invalid and failed, or an error '
-                                'occurred within the CouchDB server that prevented it from processing the request.')
+        CouchException.__init__(self, HTTPError, 'The request was invalid and '
+                'failed, or an error occurred within the CouchDB server that '
+                'prevented it from processing the request.')
+
 
 def relax_exception(e, callback=None):
     '''Convert HTTPError exception to a Couch specific exception, if possible,
         elif e.code == 500:
             ce = InternalServerError(e)
         else:
-            # unknown HTTP Error
-            ce = e
+            # other HTTP Error
+            ce = CouchException(e)
     else:
         # unknown exception
         ce = e
-        
+
     if callback:
         callback(ce)
     else:
         return ce
-
-
 
 import couch
 
+import tornado.ioloop
+
 def run_blocking_tests():
     # set up tests
     doc1 = {'msg': 'Test doc 1'}
     db = couch.BlockingCouch('testdb')
     db2 = couch.BlockingCouch('testdb2')
 
-    db.delete_db(raise_error=False)
-    db2.delete_db(raise_error=False)
+    try:
+        db.delete_db()
+    except couch.NotFound:
+        pass
+    try:
+        db2.delete_db()
+    except couch.NotFound:
+        pass
 
     # create database
     resp = db.create_db()
-    assert resp == {u'ok': True}, 'Failed to create database'
+    assert 'ok' in resp, 'Failed to create database'
 
     # list databases
     resp = db.list_dbs()
     assert db.db_name in resp, 'Database not in list of databases'
-    
+
     # info_db
     resp = db.info_db()
     assert ('db_name' in resp) and (resp['db_name'] == db.db_name), 'No database info'
     # uuids
     resp = db.uuids()
     assert re.match('[0-9a-f]{32}', resp[0]), 'Failed to get uuid'
-    
+
     # save doc
     resp = db.save_doc(doc1)
     assert ('rev' in resp) and ('id' in resp), 'Failed to save doc'
     doc1.update({'_id':resp['id'], '_rev':resp['rev']})
-    resp = db.save_doc({'_id': doc1['_id'], '_rev': 'a'}, raise_error=False)
-    assert 'error' in resp, 'No error when overwriting doc with wrong rev'
+
+    # save doc with wrong rev number
+    try:
+        resp = db.save_doc({'_id': doc1['_id'], '_rev': 'a'})
+    except couch.CouchException:
+        pass
+    else:
+        raise AssertionError('No error when overwriting doc with wrong rev')
 
     # get doc
     resp = db.get_doc(doc1['_id'])
     assert doc1 == resp, 'Failed to get doc'
-    resp = db.get_doc('a', raise_error=False)
-    assert 'error' in resp, 'No error on request for unexisting doc'
+
+    # get non-existing doc
+    try:
+        resp = db.get_doc('a')
+    except couch.NotFound:
+        pass
+    else:
+        raise AssertionError('No error on request for unexisting doc')
 
     # save docs
     doc1['msg2'] = 'Another message'
     resp = db.save_docs([doc1, doc2])
     assert all('rev' in item and 'id' in item for item in resp), 'Failed to save docs'
     doc1['_rev'] = resp[0]['rev']
-    doc2.update({'_id':resp[1]['id'], '_rev':resp[1]['rev']})
+    doc2.update({'_id': resp[1]['id'], '_rev': resp[1]['rev']})
 
     # get docs
     resp = db.get_docs([doc1['_id'], doc2['_id']])
     assert [doc1, doc2] == resp, 'Failed to get docs'
-    resp = db.get_docs(['a'], raise_error=False)
-    assert 'error' in resp[0], 'No error on request for unexisting doc'
-   
+
+    # get non-existing docs
+    try:
+        resp = db.get_docs(['a', 'b'])
+    except couch.NotFound:
+        pass
+    else:
+        raise AssertionError('No error on request for unexisting docs')
+
     # list docs
     resp = db.view_all_docs(include_docs=True)
-    assert {doc1['_id']:doc1['_rev'], doc2['_id']:doc2['_rev']} == \
-        dict((row['doc']['_id'], row['doc']['_rev']) for row in resp['rows']), \
-        'Failed listing all docs'
+    assert {doc1['_id']:doc1['_rev'], doc2['_id']:doc2['_rev']} == dict((row['doc']['_id'], row['doc']['_rev']) for row in resp['rows']), 'Failed listing all docs'
 
     # pull database
     resp = db2.pull_db('testdb', create_target=True)
     # delete docs
     resp = db2.delete_docs([doc1, doc2])
     assert resp[0]['id']==doc1['_id'] and resp[1]['id']==doc2['_id'], 'Failed to delete docs'
-    assert not db2.view_all_docs()['rows'], 'Failed to delete docs, database not empty'
+    assert len(db2.view_all_docs()['rows'])==0, 'Failed to delete docs, database not empty'
 
     # delete database
     resp = db2.delete_db()
-    assert resp == {u'ok': True}
+    assert 'ok' in resp, 'Failed to delete database'
 
-    # view (and first upload design doc)
+    # upload design doc
     design = {
         '_id': '_design/test',
         'views': {
     resp = db.save_doc(design)
     assert 'ok' in resp, 'Failed to upload design doc'
     design['_rev'] = resp['rev']
+
+    # view
     resp = db.view('test', 'msg')
     assert [doc1['_id'], doc2['_id']] == [row['key'] for row in resp['rows']], 'Failed to get view results from design doc'
 
     # delete doc
     resp = db.delete_doc(doc2)
-    assert resp['id'] == doc2['_id']
+    assert resp['id'] == doc2['_id'], 'Failed to delete doc2'
 
     # save attachment
     data = {'msg3': 'This is a test'}
     doc1['_rev'] = resp['rev']
 
     # get attachment
-    resp = db.get_attachment(doc1, 'test attachment', attachment['mimetype'])
+    resp = db.get_attachment(doc1, attachment['name'], attachment['mimetype'])
     assert json.loads(resp) == data, 'Attachment not loaded'
 
     # delete attachment
-    resp = db.delete_attachment(doc1, 'test attachment')
+    resp = db.delete_attachment(doc1, attachment['name'])
     assert 'ok' in resp, 'Attachment not deleted'
     doc1['_rev'] = resp['rev']
-    
+
     db.delete_db()
 
+    print('All blocking tests passed')
+
+
+class AsyncTests(object):
+
+    def __init__(self):
+        # set up tests
+        self.doc1 = {'msg': 'Test doc 1'}
+        self.doc2 = {'msg': 'Test doc 2'}
+
+        self.db = couch.AsyncCouch('testdb')
+        self.db2 = couch.AsyncCouch('testdb2')
+
+        self.db.delete_db(self.init_deleted_db)
+
+        self.error = None
+        self.ioloop = tornado.ioloop.IOLoop.instance()
+        self.ioloop.start()
+
+        if self.error:
+            raise AssertionError(self.error)
+        else:
+            print('All async tests passed')
+
+    def check(self, value, msg):
+        if not value:
+            self.error = msg
+            self.ioloop.stop()
+            return False
+        return True
+
+    def init_deleted_db(self, resp):
+        self.db2.delete_db(self.init_deleted_db2)
+
+    def init_deleted_db2(self, resp):
+        # create database
+        self.db.create_db(self.created_db)
+
+    def created_db(self, resp):
+        if self.check('ok' in resp, 'Failed to create database'):
+            # list databases
+            self.db.list_dbs(self.listed_dbs)
+
+    def listed_dbs(self, resp):
+        if self.check(self.db.db_name in resp,
+                'Database not in list of databases'):
+            # info_db
+            self.db.info_db(self.info_db)
+
+    def info_db(self, resp):
+        if self.check(('db_name' in resp) and
+                (resp['db_name'] == self.db.db_name), 'No database info'):
+            # uuids
+            self.db.uuids(callback=self.uuids)
+
+    def uuids(self, resp):
+        if self.check(re.match('[0-9a-f]{32}', resp[0]), 'Failed to get uuid'):
+            # save doc
+            self.db.save_doc(self.doc1, self.saved_doc1)
+
+    def saved_doc1(self, resp):
+        if self.check(('rev' in resp) and ('id' in resp), 'Failed to save doc'):
+            self.doc1.update({'_id':resp['id'], '_rev':resp['rev']})
+
+            # save doc with wrong rev number
+            self.db.save_doc({'_id': self.doc1['_id'], '_rev': 'a'},
+                    self.saved_doc1_norev)
+
+    def saved_doc1_norev(self, resp):
+        if self.check(isinstance(resp, couch.CouchException),
+                'No error when overwriting doc with wrong rev'):
+            # get doc
+            self.db.get_doc(self.doc1['_id'], self.got_doc1)
+
+    def got_doc1(self, resp):
+        if self.check(self.doc1 == resp, 'Failed to get doc'):
+            # get non-existing doc
+            self.db.get_doc('a', self.got_nodoc)
+
+    def got_nodoc(self, resp):
+        if self.check(isinstance(resp, couch.NotFound),
+                'No error on request for unexisting doc'):
+            # save docs
+            self.doc1['msg2'] = 'Another message'
+            self.db.save_docs([self.doc1, self.doc2], self.saved_docs)
+
+    def saved_docs(self, resp):
+        if self.check(all('rev' in item and 'id' in item for item in resp),
+                'Failed to save docs'):
+            self.doc1['_rev'] = resp[0]['rev']
+            self.doc2.update({'_id': resp[1]['id'], '_rev': resp[1]['rev']})
+
+            # get docs
+            self.db.get_docs([self.doc1['_id'], self.doc2['_id']],
+                    self.got_docs)
+
+    def got_docs(self, resp):
+        if self.check([self.doc1, self.doc2] == resp, 'Failed to get docs'):
+            # get non-existing docs
+            self.db.get_docs(['a', 'b'], self.got_nodocs)
+
+    def got_nodocs(self, resp):
+        if self.check(isinstance(resp, couch.NotFound),
+                'No error on request for unexisting docs'):
+            # list docs
+            self.db.view_all_docs(self.list_docs, include_docs=True)
+
+    def list_docs(self, resp):
+        if self.check({self.doc1['_id']: self.doc1['_rev'],
+                self.doc2['_id']: self.doc2['_rev']} ==
+                dict((row['doc']['_id'], row['doc']['_rev'])
+                for row in resp['rows']), 'Failed listing all docs'):
+            # pull database
+            self.db2.pull_db('testdb', self.pulled_db, create_target=True)
+
+    def pulled_db(self, resp):
+        if self.check('ok' in resp, 'Replication failed'):
+           self.db2.list_dbs(self.pulled_db_verified)
+
+    def pulled_db_verified(self, resp):
+        if self.check('testdb2' in resp, 'Replication failed, new database '
+                'replication not found'):
+            # delete docs
+            self.db2.delete_docs([self.doc1, self.doc2], self.deleted_docs)
+
+    def deleted_docs(self, resp):
+        if self.check(resp[0]['id']==self.doc1['_id'] and
+                resp[1]['id']==self.doc2['_id'], 'Failed to delete docs'):
+            self.db2.view_all_docs(self.deleted_docs_verified)
+
+    def deleted_docs_verified(self, resp):
+        if self.check(len(resp['rows'])==0, 'Failed to delete docs, database not empty'):
+            # delete database
+            self.db2.delete_db(self.deleted_db2)
+
+    def deleted_db2(self, resp):
+        if self.check('ok' in resp, 'Failed to delete database'):
+            # upload design doc
+            self.design = {
+                '_id': '_design/test',
+                'views': {
+                    'msg': {
+                        'map': 'function(doc) { if (doc.msg) { emit(doc._id, doc.msg); } }'
+                    }
+                }
+            }
+            self.db.save_doc(self.design, self.saved_design)
+
+    def saved_design(self, resp):
+        if self.check('ok' in resp, 'Failed to upload design doc'):
+            self.design['_rev'] = resp['rev']
+
+            # view
+            self.db.view('test', 'msg', self.viewed)
+
+    def viewed(self, resp):
+        if self.check([self.doc1['_id'], self.doc2['_id']] ==
+                [row['key'] for row in resp['rows']],
+                'Failed to get view results from design doc'):
+            # delete doc
+            self.db.delete_doc(self.doc2, self.deleted_doc2)
+
+    def deleted_doc2(self, resp):
+        if self.check(resp['id'] == self.doc2['_id'], 'Failed to delete doc2'):
+            # save attachment
+            self.data = {'msg3': 'This is a test'}
+            self.attachment = {'mimetype': 'application/json',
+                    'name': 'test attachment', 'data': json.dumps(self.data)}
+
+            self.db.save_attachment(self.doc1, self.attachment,
+                    self.saved_attachment)
+
+    def saved_attachment(self, resp):
+        if self.check('ok' in resp, 'Attachment not saved'):
+            self.doc1['_rev'] = resp['rev']
+
+            # get attachment
+            self.db.get_attachment(self.doc1, self.attachment['name'],
+                self.attachment['mimetype'], callback=self.got_attachment)
+
+    def got_attachment(self, resp):
+        if self.check(json.loads(resp) == self.data, 'Attachment not loaded'):
+            # delete attachment
+            self.db.delete_attachment(self.doc1, self.attachment['name'],
+                self.deleted_attachment)
+
+    def deleted_attachment(self, resp):
+        if self.check('ok' in resp, 'Attachment not deleted'):
+            self.doc1['_rev'] = resp['rev']
+
+            # done testing
+            cb = lambda r: self.ioloop.stop()
+            self.db.delete_db(cb)
+
+
 if __name__ == '__main__':
     run_blocking_tests()
-
+    AsyncTests()
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.