Commits

Robert Brewer committed 1a663d7

Major refactor. Some tests pass; not done yet, but you can see the direction.

Comments (0)

Files changed (153)

-include cherrypy/cherryd
-include cherrypy/favicon.ico
-include cherrypy/LICENSE.txt
-include cherrypy/scaffold/*.conf
-include cherrypy/scaffold/static/*.png
-include cherrypy/test/style.css
-include cherrypy/test/test.pem
-include cherrypy/test/static/*.html
-include cherrypy/test/static/*.jpg
-include cherrypy/tutorial/*.conf
-include cherrypy/tutorial/*.pdf
-include cherrypy/tutorial/*.html
-include cherrypy/tutorial/README.txt
+include cheroot/LICENSE.txt
+include cheroot/test/test.pem
+
 
     python setup.py install
 
-* To learn how to use it, look at the examples under cherrypy/tutorial/ or go to http://www.cherrypy.org for more info.
-
-* To run the regression tests, just go to the cherrypy/test/ directory and type:
+* To run the regression tests, just go to the cheroot/test/ directory and type:
 
     nosetests -s ./
 

cheroot/LICENSE.txt

+Copyright (c) 2004-2011, CherryPy Team (team@cherrypy.org)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, 
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright notice, 
+      this list of conditions and the following disclaimer in the documentation 
+      and/or other materials provided with the distribution.
+    * Neither the name of the CherryPy Team nor the names of its contributors 
+      may be used to endorse or promote products derived from this software 
+      without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

cheroot/__init__.py

+"""Cheroot is a highly-optimized, pure-python HTTP server."""
+
+__version__ = "3.3.0alpha"
+

cheroot/_compat.py

+"""Compatibility code for using Cheroot with various versions of Python.
+
+Cheroot 3.3 is compatible with Python versions 2.3+. This module provides a
+useful abstraction over the differences between Python versions, sometimes by
+preferring a newer idiom, sometimes an older one, and sometimes a custom one.
+
+In particular, Python 2 uses str and '' for byte strings, while Python 3
+uses str and '' for unicode strings. We will call each of these the 'native
+string' type for each version. Because of this major difference, this module
+provides new 'bytestr', 'unicodestr', and 'nativestr' attributes, as well as
+two functions: 'ntob', which translates native strings (of type 'str') into
+byte strings regardless of Python version, and 'ntou', which translates native
+strings to unicode strings. This also provides a 'BytesIO' name for dealing
+specifically with bytes, and a 'StringIO' name for dealing with native strings.
+It also provides a 'base64_decode' function with native strings as input and
+output.
+"""
+import os
+import re
+import sys
+
+if sys.version_info >= (3, 0):
+    py3k = True
+    bytestr = bytes
+    unicodestr = str
+    nativestr = unicodestr
+    basestring = (bytes, str)
+    def ntob(n, encoding='ISO-8859-1'):
+        """Return the given native string as a byte string in the given encoding."""
+        # In Python 3, the native string type is unicode
+        return n.encode(encoding)
+    def ntou(n, encoding='ISO-8859-1'):
+        """Return the given native string as a unicode string with the given encoding."""
+        # In Python 3, the native string type is unicode
+        return n
+    def tonative(n, encoding='ISO-8859-1'):
+        """Return the given string as a native string in the given encoding."""
+        # In Python 3, the native string type is unicode
+        if isinstance(n, bytes):
+            return n.decode(encoding)
+        return n
+    # type("")
+    from io import StringIO
+    # bytes:
+    from io import BytesIO as BytesIO
+else:
+    # Python 2
+    py3k = False
+    bytestr = str
+    unicodestr = unicode
+    nativestr = bytestr
+    basestring = basestring
+    def ntob(n, encoding='ISO-8859-1'):
+        """Return the given native string as a byte string in the given encoding."""
+        # In Python 2, the native string type is bytes. Assume it's already
+        # in the given encoding, which for ISO-8859-1 is almost always what
+        # was intended.
+        return n
+    def ntou(n, encoding='ISO-8859-1'):
+        """Return the given native string as a unicode string with the given encoding."""
+        # In Python 2, the native string type is bytes.
+        # First, check for the special encoding 'escape'. The test suite uses this
+        # to signal that it wants to pass a string with embedded \uXXXX escapes,
+        # but without having to prefix it with u'' for Python 2, but no prefix
+        # for Python 3.
+        if encoding == 'escape':
+            return unicode(
+                re.sub(r'\\u([0-9a-zA-Z]{4})',
+                       lambda m: unichr(int(m.group(1), 16)),
+                       n.decode('ISO-8859-1')))
+        # Assume it's already in the given encoding, which for ISO-8859-1 is almost
+        # always what was intended.
+        return n.decode(encoding)
+    def tonative(n, encoding='ISO-8859-1'):
+        """Return the given string as a native string in the given encoding."""
+        # In Python 2, the native string type is bytes.
+        if isinstance(n, unicode):
+            return n.encode(encoding)
+        return n
+    try:
+        # type("")
+        from cStringIO import StringIO
+    except ImportError:
+        # type("")
+        from StringIO import StringIO
+    # bytes:
+    BytesIO = StringIO
+
+try:
+    set = set
+except NameError:
+    from sets import Set as set
+
+try:
+    # Python 3.1+
+    from base64 import decodebytes as _base64_decodebytes
+except ImportError:
+    # Python 3.0-
+    # since Cheroot claims compability with Python 2.3, we must use
+    # the legacy API of base64
+    from base64 import decodestring as _base64_decodebytes
+
+def base64_decode(n, encoding='ISO-8859-1'):
+    """Return the native string base64-decoded (as a native string)."""
+    if isinstance(n, unicodestr):
+        b = n.encode(encoding)
+    else:
+        b = n
+    b = _base64_decodebytes(b)
+    if nativestr is unicodestr:
+        return b.decode(encoding)
+    else:
+        return b
+
+try:
+    # Python 2.5+
+    from hashlib import md5
+except ImportError:
+    from md5 import new as md5
+
+try:
+    # Python 2.5+
+    from hashlib import sha1 as sha
+except ImportError:
+    from sha import new as sha
+
+try:
+    sorted = sorted
+except NameError:
+    def sorted(i):
+        i = i[:]
+        i.sort()
+        return i
+
+try:
+    reversed = reversed
+except NameError:
+    def reversed(x):
+        i = len(x)
+        while i > 0:
+            i -= 1
+            yield x[i]
+
+try:
+    # Python 3
+    from urllib.parse import urljoin, urlencode
+    from urllib.parse import quote, quote_plus
+    from urllib.request import unquote, urlopen
+    from urllib.request import parse_http_list, parse_keqv_list
+except ImportError:
+    # Python 2
+    from urlparse import urljoin
+    from urllib import urlencode, urlopen
+    from urllib import quote, quote_plus
+    from urllib import unquote
+    from urllib2 import parse_http_list, parse_keqv_list
+
+try:
+    dict.iteritems
+    # Python 2
+    iteritems = lambda d: d.iteritems()
+    copyitems = lambda d: d.items()
+except AttributeError:
+    # Python 3
+    iteritems = lambda d: d.items()
+    copyitems = lambda d: list(d.items())
+
+try:
+    dict.iterkeys
+    # Python 2
+    iterkeys = lambda d: d.iterkeys()
+    copykeys = lambda d: d.keys()
+except AttributeError:
+    # Python 3
+    iterkeys = lambda d: d.keys()
+    copykeys = lambda d: list(d.keys())
+
+try:
+    dict.itervalues
+    # Python 2
+    itervalues = lambda d: d.itervalues()
+    copyvalues = lambda d: d.values()
+except AttributeError:
+    # Python 3
+    itervalues = lambda d: d.values()
+    copyvalues = lambda d: list(d.values())
+
+try:
+    # Python 3
+    import builtins
+except ImportError:
+    # Python 2
+    import __builtin__ as builtins
+
+try:
+    # Python 2.
+    from httplib import BadStatusLine, HTTPConnection, HTTPSConnection, IncompleteRead, NotConnected
+    from BaseHTTPServer import BaseHTTPRequestHandler
+except ImportError:
+    # Python 3
+    from http.client import BadStatusLine, HTTPConnection, HTTPSConnection, IncompleteRead, NotConnected
+    from http.server import BaseHTTPRequestHandler
+
+try:
+    # Python 2.
+    from httplib import HTTPSConnection
+except ImportError:
+    try:
+        # Python 3
+        from http.client import HTTPSConnection
+    except ImportError:
+        # Some platforms which don't have SSL don't expose HTTPSConnection
+        HTTPSConnection = None
+
+try:
+    # Python 2
+    xrange = xrange
+except NameError:
+    # Python 3
+    xrange = range
+
+import threading
+if hasattr(threading.Thread, "daemon"):
+    # Python 2.6+
+    def get_daemon(t):
+        return t.daemon
+    def set_daemon(t, val):
+        t.daemon = val
+else:
+    def get_daemon(t):
+        return t.isDaemon()
+    def set_daemon(t, val):
+        t.setDaemon(val)
+
+try:
+    from email.utils import formatdate
+    def HTTPDate(timeval=None):
+        return formatdate(timeval, usegmt=True)
+except ImportError:
+    from rfc822 import formatdate as HTTPDate
+
+try:
+    # Python 3
+    from urllib.parse import unquote as parse_unquote
+    def unquote_qs(atom, encoding, errors='strict'):
+        return parse_unquote(atom.replace('+', ' '), encoding=encoding, errors=errors)
+except ImportError:
+    # Python 2
+    from urllib import unquote as parse_unquote
+    def unquote_qs(atom, encoding, errors='strict'):
+        return parse_unquote(atom.replace('+', ' ')).decode(encoding, errors)
+
+try:
+    # Prefer simplejson, which is usually more advanced than the builtin module.
+    import simplejson as json
+    json_decode = json.JSONDecoder().decode
+    json_encode = json.JSONEncoder().iterencode
+except ImportError:
+    if py3k:
+        # Python 3.0: json is part of the standard library,
+        # but outputs unicode. We need bytes.
+        import json
+        json_decode = json.JSONDecoder().decode
+        _json_encode = json.JSONEncoder().iterencode
+        def json_encode(value):
+            for chunk in _json_encode(value):
+                yield chunk.encode('utf8')
+    elif sys.version_info >= (2, 6):
+        # Python 2.6: json is part of the standard library
+        import json
+        json_decode = json.JSONDecoder().decode
+        json_encode = json.JSONEncoder().iterencode
+    else:
+        json = None
+        def json_decode(s):
+            raise ValueError('No JSON library is available')
+        def json_encode(s):
+            raise ValueError('No JSON library is available')
+
+try:
+    import cPickle as pickle
+except ImportError:
+    # In Python 2, pickle is a Python version.
+    # In Python 3, pickle is the sped-up C version.
+    import pickle
+
+try:
+    os.urandom(20)
+    import binascii
+    def random20():
+        return binascii.hexlify(os.urandom(20)).decode('ascii')
+except (AttributeError, NotImplementedError):
+    import random
+    # os.urandom not available until Python 2.4. Fall back to random.random.
+    def random20():
+        return sha('%s' % random.random()).hexdigest()
+
+try:
+    from _thread import get_ident as get_thread_ident
+except ImportError:
+    from thread import get_ident as get_thread_ident
+
+try:
+    # Python 3
+    next = next
+except NameError:
+    # Python 2
+    def next(i):
+        return i.next()

cheroot/errors.py

+import errno
+
+
+class NoSSLError(Exception):
+    """Exception raised when a client speaks HTTP to an HTTPS socket."""
+    pass
+
+
+class FatalSSLAlert(Exception):
+    """Exception raised when the SSL implementation signals a fatal alert."""
+    pass
+
+
+class MaxSizeExceeded(Exception):
+    pass
+
+
+def plat_specific_errors(*errnames):
+    """Return error numbers for all errors in errnames on this platform.
+    
+    The 'errno' module contains different global constants depending on
+    the specific platform (OS). This function will return the list of
+    numeric values for a given list of potential names.
+    """
+    errno_names = dir(errno)
+    nums = [getattr(errno, k) for k in errnames if k in errno_names]
+    # de-dupe the list
+    return list(dict.fromkeys(nums).keys())
+
+socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
+
+socket_errors_to_ignore = plat_specific_errors(
+    "EPIPE",
+    "EBADF", "WSAEBADF",
+    "ENOTSOCK", "WSAENOTSOCK",
+    "ETIMEDOUT", "WSAETIMEDOUT",
+    "ECONNREFUSED", "WSAECONNREFUSED",
+    "ECONNRESET", "WSAECONNRESET",
+    "ECONNABORTED", "WSAECONNABORTED",
+    "ENETRESET", "WSAENETRESET",
+    "EHOSTDOWN", "EHOSTUNREACH",
+    )
+socket_errors_to_ignore.append("timed out")
+socket_errors_to_ignore.append("The read operation timed out")
+
+socket_errors_nonblocking = plat_specific_errors(
+    'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
+

cheroot/py2makefile.py

+import socket
+_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
+
+from cheroot._compat import StringIO
+from cheroot import errors
+
+
+class makefile(socket._fileobject):
+    """Faux file object attached to a socket object."""
+
+    def __init__(self, *args, **kwargs):
+        self.bytes_read = 0
+        self.bytes_written = 0
+        socket._fileobject.__init__(self, *args, **kwargs)
+    
+    def sendall(self, data):
+        """Sendall for non-blocking sockets."""
+        while data:
+            try:
+                bytes_sent = self.send(data)
+                data = data[bytes_sent:]
+            except socket.error, e:
+                if e.args[0] not in errors.socket_errors_nonblocking:
+                    raise
+
+    def send(self, data):
+        bytes_sent = self._sock.send(data)
+        self.bytes_written += bytes_sent
+        return bytes_sent
+
+    def flush(self):
+        if self._wbuf:
+            buffer = "".join(self._wbuf)
+            self._wbuf = []
+            self.sendall(buffer)
+
+    def recv(self, size):
+        while True:
+            try:
+                data = self._sock.recv(size)
+                self.bytes_read += len(data)
+                return data
+            except socket.error, e:
+                if (e.args[0] not in errors.socket_errors_nonblocking
+                    and e.args[0] not in errors.socket_error_eintr):
+                    raise
+
+    if not _fileobject_uses_str_type:
+        def read(self, size=-1):
+            # Use max, disallow tiny reads in a loop as they are very inefficient.
+            # We never leave read() with any leftover data from a new recv() call
+            # in our internal buffer.
+            rbufsize = max(self._rbufsize, self.default_bufsize)
+            # Our use of StringIO rather than lists of string objects returned by
+            # recv() minimizes memory usage and fragmentation that occurs when
+            # rbufsize is large compared to the typical return value of recv().
+            buf = self._rbuf
+            buf.seek(0, 2)  # seek end
+            if size < 0:
+                # Read until EOF
+                self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
+                while True:
+                    data = self.recv(rbufsize)
+                    if not data:
+                        break
+                    buf.write(data)
+                return buf.getvalue()
+            else:
+                # Read until size bytes or EOF seen, whichever comes first
+                buf_len = buf.tell()
+                if buf_len >= size:
+                    # Already have size bytes in our buffer?  Extract and return.
+                    buf.seek(0)
+                    rv = buf.read(size)
+                    self._rbuf = StringIO()
+                    self._rbuf.write(buf.read())
+                    return rv
+
+                self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
+                while True:
+                    left = size - buf_len
+                    # recv() will malloc the amount of memory given as its
+                    # parameter even though it often returns much less data
+                    # than that.  The returned data string is short lived
+                    # as we copy it into a StringIO and free it.  This avoids
+                    # fragmentation issues on many platforms.
+                    data = self.recv(left)
+                    if not data:
+                        break
+                    n = len(data)
+                    if n == size and not buf_len:
+                        # Shortcut.  Avoid buffer data copies when:
+                        # - We have no data in our buffer.
+                        # AND
+                        # - Our call to recv returned exactly the
+                        #   number of bytes we were asked to read.
+                        return data
+                    if n == left:
+                        buf.write(data)
+                        del data  # explicit free
+                        break
+                    assert n <= left, "recv(%d) returned %d bytes" % (left, n)
+                    buf.write(data)
+                    buf_len += n
+                    del data  # explicit free
+                    #assert buf_len == buf.tell()
+                return buf.getvalue()
+
+        def readline(self, size=-1):
+            buf = self._rbuf
+            buf.seek(0, 2)  # seek end
+            if buf.tell() > 0:
+                # check if we already have it in our buffer
+                buf.seek(0)
+                bline = buf.readline(size)
+                if bline.endswith('\n') or len(bline) == size:
+                    self._rbuf = StringIO()
+                    self._rbuf.write(buf.read())
+                    return bline
+                del bline
+            if size < 0:
+                # Read until \n or EOF, whichever comes first
+                if self._rbufsize <= 1:
+                    # Speed up unbuffered case
+                    buf.seek(0)
+                    buffers = [buf.read()]
+                    self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
+                    data = None
+                    recv = self.recv
+                    while data != "\n":
+                        data = recv(1)
+                        if not data:
+                            break
+                        buffers.append(data)
+                    return "".join(buffers)
+
+                buf.seek(0, 2)  # seek end
+                self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
+                while True:
+                    data = self.recv(self._rbufsize)
+                    if not data:
+                        break
+                    nl = data.find('\n')
+                    if nl >= 0:
+                        nl += 1
+                        buf.write(data[:nl])
+                        self._rbuf.write(data[nl:])
+                        del data
+                        break
+                    buf.write(data)
+                return buf.getvalue()
+            else:
+                # Read until size bytes or \n or EOF seen, whichever comes first
+                buf.seek(0, 2)  # seek end
+                buf_len = buf.tell()
+                if buf_len >= size:
+                    buf.seek(0)
+                    rv = buf.read(size)
+                    self._rbuf = StringIO()
+                    self._rbuf.write(buf.read())
+                    return rv
+                self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
+                while True:
+                    data = self.recv(self._rbufsize)
+                    if not data:
+                        break
+                    left = size - buf_len
+                    # did we just receive a newline?
+                    nl = data.find('\n', 0, left)
+                    if nl >= 0:
+                        nl += 1
+                        # save the excess data to _rbuf
+                        self._rbuf.write(data[nl:])
+                        if buf_len:
+                            buf.write(data[:nl])
+                            break
+                        else:
+                            # Shortcut.  Avoid data copy through buf when returning
+                            # a substring of our first recv().
+                            return data[:nl]
+                    n = len(data)
+                    if n == size and not buf_len:
+                        # Shortcut.  Avoid data copy through buf when
+                        # returning exactly all of our first recv().
+                        return data
+                    if n >= left:
+                        buf.write(data[:left])
+                        self._rbuf.write(data[left:])
+                        break
+                    buf.write(data)
+                    buf_len += n
+                    #assert buf_len == buf.tell()
+                return buf.getvalue()
+    else:
+        def read(self, size=-1):
+            if size < 0:
+                # Read until EOF
+                buffers = [self._rbuf]
+                self._rbuf = ""
+                if self._rbufsize <= 1:
+                    recv_size = self.default_bufsize
+                else:
+                    recv_size = self._rbufsize
+
+                while True:
+                    data = self.recv(recv_size)
+                    if not data:
+                        break
+                    buffers.append(data)
+                return "".join(buffers)
+            else:
+                # Read until size bytes or EOF seen, whichever comes first
+                data = self._rbuf
+                buf_len = len(data)
+                if buf_len >= size:
+                    self._rbuf = data[size:]
+                    return data[:size]
+                buffers = []
+                if data:
+                    buffers.append(data)
+                self._rbuf = ""
+                while True:
+                    left = size - buf_len
+                    recv_size = max(self._rbufsize, left)
+                    data = self.recv(recv_size)
+                    if not data:
+                        break
+                    buffers.append(data)
+                    n = len(data)
+                    if n >= left:
+                        self._rbuf = data[left:]
+                        buffers[-1] = data[:left]
+                        break
+                    buf_len += n
+                return "".join(buffers)
+
+        def readline(self, size=-1):
+            data = self._rbuf
+            if size < 0:
+                # Read until \n or EOF, whichever comes first
+                if self._rbufsize <= 1:
+                    # Speed up unbuffered case
+                    assert data == ""
+                    buffers = []
+                    while data != "\n":
+                        data = self.recv(1)
+                        if not data:
+                            break
+                        buffers.append(data)
+                    return "".join(buffers)
+                nl = data.find('\n')
+                if nl >= 0:
+                    nl += 1
+                    self._rbuf = data[nl:]
+                    return data[:nl]
+                buffers = []
+                if data:
+                    buffers.append(data)
+                self._rbuf = ""
+                while True:
+                    data = self.recv(self._rbufsize)
+                    if not data:
+                        break
+                    buffers.append(data)
+                    nl = data.find('\n')
+                    if nl >= 0:
+                        nl += 1
+                        self._rbuf = data[nl:]
+                        buffers[-1] = data[:nl]
+                        break
+                return "".join(buffers)
+            else:
+                # Read until size bytes or \n or EOF seen, whichever comes first
+                nl = data.find('\n', 0, size)
+                if nl >= 0:
+                    nl += 1
+                    self._rbuf = data[nl:]
+                    return data[:nl]
+                buf_len = len(data)
+                if buf_len >= size:
+                    self._rbuf = data[size:]
+                    return data[:size]
+                buffers = []
+                if data:
+                    buffers.append(data)
+                self._rbuf = ""
+                while True:
+                    data = self.recv(self._rbufsize)
+                    if not data:
+                        break
+                    buffers.append(data)
+                    left = size - buf_len
+                    nl = data.find('\n', 0, left)
+                    if nl >= 0:
+                        nl += 1
+                        self._rbuf = data[nl:]
+                        buffers[-1] = data[:nl]
+                        break
+                    n = len(data)
+                    if n >= left:
+                        self._rbuf = data[left:]
+                        buffers[-1] = data[:left]
+                        break
+                    buf_len += n
+                return "".join(buffers)
+

cheroot/py3makefile.py

+import socket
+import sys
+
+if sys.version_info < (3,1):
+    import io
+else:
+    import _pyio as io
+DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE
+
+
+class BufferedWriter(io.BufferedWriter):
+    """Faux file object attached to a socket object."""
+
+    def write(self, b):
+        self._checkClosed()
+        if isinstance(b, str):
+            raise TypeError("can't write str to binary stream")
+        
+        with self._write_lock:
+            self._write_buf.extend(b)
+            self._flush_unlocked()
+            return len(b)
+    
+    def _flush_unlocked(self):
+        self._checkClosed("flush of closed file")
+        while self._write_buf:
+            try:
+                # ssl sockets only except 'bytes', not bytearrays
+                # so perhaps we should conditionally wrap this for perf?
+                n = self.raw.write(bytes(self._write_buf))
+            except io.BlockingIOError as e:
+                n = e.characters_written
+            del self._write_buf[:n]
+
+
+def makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
+    if 'r' in mode:
+        return io.BufferedReader(socket.SocketIO(sock, mode), bufsize)
+    else:
+        return BufferedWriter(socket.SocketIO(sock, mode), bufsize)
+

cheroot/server.py

+"""A high-speed, production ready, thread pooled, generic HTTP server.
+
+Simplest example on how to use this module directly::
+
+    import cheroot
+    
+    def my_crazy_app(environ, start_response):
+        status = '200 OK'
+        response_headers = [('Content-type','text/plain')]
+        start_response(status, response_headers)
+        return ['Hello world!']
+    
+    server = wsgi.WSGIServer(
+                ('0.0.0.0', 8070),
+                server_name='www.cheroot.example',
+                wsgi_app=my_crazy_app)
+    server.start()
+
+The Cheroot HTTP server can serve as many WSGI applications 
+as you want in one instance by using a WSGIPathInfoDispatcher::
+    
+    d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
+    server = WSGIServer(('0.0.0.0', 80), wsgi_app=d)
+
+Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
+
+For those of you wanting to understand internals of this module, here's the
+basic call flow. The server's listening thread runs a very tight loop,
+sticking incoming connections onto a Queue::
+
+    server = HTTPServer(...)
+    server.start()
+    while True:
+        tick()
+        # This blocks until a request comes in:
+        child = socket.accept()
+        conn = HTTPConnection(child, ...)
+        server.requests.put(conn)
+
+Worker threads are kept in a pool and poll the Queue, popping off and then
+handling each connection in turn. Each connection can consist of an arbitrary
+number of requests and their responses, so we run a nested loop::
+
+    while True:
+        conn = server.requests.get()
+        conn.communicate()
+        ->  while True:
+                req = HTTPRequest(...)
+                req.parse_request()
+                ->  # Read the Request-Line, e.g. "GET /page HTTP/1.1"
+                    req.rfile.readline()
+                    read_headers(req.rfile, req.inheaders)
+                req.respond()
+                ->  response = app(...)
+                    try:
+                        for chunk in response:
+                            if chunk:
+                                req.write(chunk)
+                    finally:
+                        if hasattr(response, "close"):
+                            response.close()
+                if req.close_connection:
+                    return
+"""
+
+__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
+           'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
+           'WorkerThread', 'ThreadPool', 'Gateway',]
+
+from cheroot._compat import bytestr, unicodestr, basestring, ntob, py3k
+
+LF = ntob('\n')
+CRLF = ntob('\r\n')
+TAB = ntob('\t')
+SPACE = ntob(' ')
+COLON = ntob(':')
+SEMICOLON = ntob(';')
+COMMA = ntob(',')
+PERCENT = ntob('%')
+EMPTY = ntob('')
+NUMBER_SIGN = ntob('#')
+QUESTION_MARK = ntob('?')
+ASTERISK = ntob('*')
+FORWARD_SLASH = ntob('/')
+
+import os
+try:
+    import queue
+except:
+    import Queue as queue
+import re
+if py3k:
+    import email.utils
+    def formatdate():
+        return email.utils.formatdate(usegmt=True).encode('ISO-8859-1')
+else:
+    from rfc822 import formatdate
+import socket
+import sys
+if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
+    socket.IPPROTO_IPV6 = 41
+
+if py3k:
+    if sys.version_info < (3,1):
+        import io
+    else:
+        import _pyio as io
+    DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE
+else:
+    DEFAULT_BUFFER_SIZE = -1
+
+import threading
+import time
+try:
+    from traceback import format_exc
+except ImportError:
+    def format_exc(limit=None):
+        """Like print_exc() but return a string. Backport for Python 2.3."""
+        try:
+            etype, value, tb = sys.exc_info()
+            return ''.join(traceback.format_exception(etype, value, tb, limit))
+        finally:
+            etype = value = tb = None
+
+if py3k:
+    from urllib.parse import urlparse
+    def unquote(path):
+        """takes quoted string and unquotes % encoded values""" 
+        res = path.split(PERCENT)
+        for i in range(1, len(res)):
+            item = res[i]
+            res[i] = bytes([int(item[:2], 16)]) + item[2:]
+        return EMPTY.join(res)
+else:
+    from urllib import unquote
+    from urlparse import urlparse
+import warnings
+
+from cheroot import errors
+if py3k:
+    from cheroot.py3makefile import makefile
+else:
+    from cheroot.py2fileobject import makefile
+def write(wfile, output):
+    if hasattr(wfile, 'sendall'):
+        wfile.sendall(output)
+    else:
+        wfile.write(output)
+
+quoted_slash = re.compile(ntob("(?i)%2F"))
+
+comma_separated_headers = [ntob(h) for h in
+    ['Accept', 'Accept-Charset', 'Accept-Encoding',
+     'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
+     'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
+     'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
+     'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
+     'WWW-Authenticate']]
+
+
+import logging
+if not hasattr(logging, 'statistics'): logging.statistics = {}
+
+
+def read_headers(rfile, hdict=None):
+    """Read headers from the given stream into the given header dict.
+    
+    If hdict is None, a new header dict is created. Returns the populated
+    header dict.
+    
+    Headers which are repeated are folded together using a comma if their
+    specification so dictates.
+    
+    This function raises ValueError when the read bytes violate the HTTP spec.
+    You should probably return "400 Bad Request" if this happens.
+    """
+    if hdict is None:
+        hdict = {}
+    
+    while True:
+        line = rfile.readline()
+        if not line:
+            # No more data--illegal end of headers
+            raise ValueError("Illegal end of headers.")
+        
+        if line == CRLF:
+            # Normal end of headers
+            break
+        if not line.endswith(CRLF):
+            raise ValueError("HTTP requires CRLF terminators")
+        
+        if line[0] in (SPACE, TAB):
+            # It's a continuation line.
+            v = line.strip()
+        else:
+            try:
+                k, v = line.split(COLON, 1)
+            except ValueError:
+                raise ValueError("Illegal header line.")
+            # TODO: what about TE and WWW-Authenticate?
+            k = k.strip().title()
+            v = v.strip()
+            hname = k
+        
+        if k in comma_separated_headers:
+            existing = hdict.get(hname)
+            if existing:
+                v = ntob(", ").join((existing, v))
+        hdict[hname] = v
+    
+    return hdict
+
+
+class SizeCheckWrapper(object):
+    """Wraps a file-like object, raising MaxSizeExceeded if too large."""
+    
+    def __init__(self, rfile, maxlen):
+        self.rfile = rfile
+        self.maxlen = maxlen
+        self.bytes_read = 0
+    
+    def _check_length(self):
+        if self.maxlen and self.bytes_read > self.maxlen:
+            raise errors.MaxSizeExceeded()
+    
+    def read(self, size=None):
+        data = self.rfile.read(size)
+        self.bytes_read += len(data)
+        self._check_length()
+        return data
+    
+    def readline(self, size=None):
+        if size is not None:
+            data = self.rfile.readline(size)
+            self.bytes_read += len(data)
+            self._check_length()
+            return data
+        
+        # User didn't specify a size ...
+        # We read the line in chunks to make sure it's not a 100MB line !
+        res = []
+        while True:
+            data = self.rfile.readline(256)
+            self.bytes_read += len(data)
+            self._check_length()
+            res.append(data)
+            # See http://www.cherrypy.org/ticket/421
+            if len(data) < 256 or data[-1:] == "\n":
+                return EMPTY.join(res)
+    
+    def readlines(self, sizehint=0):
+        # Shamelessly stolen from StringIO
+        total = 0
+        lines = []
+        line = self.readline()
+        while line:
+            lines.append(line)
+            total += len(line)
+            if 0 < sizehint <= total:
+                break
+            line = self.readline()
+        return lines
+    
+    def close(self):
+        self.rfile.close()
+    
+    def __iter__(self):
+        return self
+    
+    def __next__(self):
+        data = next(self.rfile)
+        self.bytes_read += len(data)
+        self._check_length()
+        return data
+    
+    def next(self):
+        data = self.rfile.next()
+        self.bytes_read += len(data)
+        self._check_length()
+        return data
+
+
+class KnownLengthRFile(object):
+    """Wraps a file-like object, returning an empty string when exhausted."""
+    
+    def __init__(self, rfile, content_length):
+        self.rfile = rfile
+        self.remaining = content_length
+    
+    def read(self, size=None):
+        if self.remaining == 0:
+            return EMPTY
+        if size is None:
+            size = self.remaining
+        else:
+            size = min(size, self.remaining)
+        
+        data = self.rfile.read(size)
+        self.remaining -= len(data)
+        return data
+    
+    def readline(self, size=None):
+        if self.remaining == 0:
+            return EMPTY
+        if size is None:
+            size = self.remaining
+        else:
+            size = min(size, self.remaining)
+        
+        data = self.rfile.readline(size)
+        self.remaining -= len(data)
+        return data
+    
+    def readlines(self, sizehint=0):
+        # Shamelessly stolen from StringIO
+        total = 0
+        lines = []
+        line = self.readline(sizehint)
+        while line:
+            lines.append(line)
+            total += len(line)
+            if 0 < sizehint <= total:
+                break
+            line = self.readline(sizehint)
+        return lines
+    
+    def close(self):
+        self.rfile.close()
+    
+    def __iter__(self):
+        return self
+    
+    def __next__(self):
+        data = next(self.rfile)
+        self.remaining -= len(data)
+        return data
+
+
+class ChunkedRFile(object):
+    """Wraps a file-like object, returning an empty string when exhausted.
+    
+    This class is intended to provide a conforming wsgi.input value for
+    request entities that have been encoded with the 'chunked' transfer
+    encoding.
+    """
+    
+    def __init__(self, rfile, maxlen, bufsize=8192):
+        self.rfile = rfile
+        self.maxlen = maxlen
+        self.bytes_read = 0
+        self.buffer = EMPTY
+        self.bufsize = bufsize
+        self.closed = False
+    
+    def _fetch(self):
+        if self.closed:
+            return
+        
+        line = self.rfile.readline()
+        self.bytes_read += len(line)
+        
+        if self.maxlen and self.bytes_read > self.maxlen:
+            raise errors.MaxSizeExceeded("Request Entity Too Large", self.maxlen)
+        
+        line = line.strip().split(SEMICOLON, 1)
+        
+        try:
+            chunk_size = line.pop(0)
+            chunk_size = int(chunk_size, 16)
+        except ValueError:
+            raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
+        
+        if chunk_size <= 0:
+            self.closed = True
+            return
+        
+##            if line: chunk_extension = line[0]
+        
+        if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
+            raise IOError("Request Entity Too Large")
+        
+        chunk = self.rfile.read(chunk_size)
+        self.bytes_read += len(chunk)
+        self.buffer += chunk
+        
+        crlf = self.rfile.read(2)
+        if crlf != CRLF:
+            raise ValueError(
+                 "Bad chunked transfer coding (expected '\\r\\n', "
+                 "got " + repr(crlf) + ")")
+    
+    def read(self, size=None):
+        data = EMPTY
+        while True:
+            if size and len(data) >= size:
+                return data
+            
+            if not self.buffer:
+                self._fetch()
+                if not self.buffer:
+                    # EOF
+                    return data
+            
+            if size:
+                remaining = size - len(data)
+                data += self.buffer[:remaining]
+                self.buffer = self.buffer[remaining:]
+            else:
+                data += self.buffer
+    
+    def readline(self, size=None):
+        data = EMPTY
+        while True:
+            if size and len(data) >= size:
+                return data
+            
+            if not self.buffer:
+                self._fetch()
+                if not self.buffer:
+                    # EOF
+                    return data
+            
+            newline_pos = self.buffer.find(LF)
+            if size:
+                if newline_pos == -1:
+                    remaining = size - len(data)
+                    data += self.buffer[:remaining]
+                    self.buffer = self.buffer[remaining:]
+                else:
+                    remaining = min(size - len(data), newline_pos)
+                    data += self.buffer[:remaining]
+                    self.buffer = self.buffer[remaining:]
+            else:
+                if newline_pos == -1:
+                    data += self.buffer
+                else:
+                    data += self.buffer[:newline_pos]
+                    self.buffer = self.buffer[newline_pos:]
+    
+    def readlines(self, sizehint=0):
+        # Shamelessly stolen from StringIO
+        total = 0
+        lines = []
+        line = self.readline(sizehint)
+        while line:
+            lines.append(line)
+            total += len(line)
+            if 0 < sizehint <= total:
+                break
+            line = self.readline(sizehint)
+        return lines
+    
+    def read_trailer_lines(self):
+        if not self.closed:
+            raise ValueError(
+                "Cannot read trailers until the request body has been read.")
+        
+        while True:
+            line = self.rfile.readline()
+            if not line:
+                # No more data--illegal end of headers
+                raise ValueError("Illegal end of headers.")
+            
+            self.bytes_read += len(line)
+            if self.maxlen and self.bytes_read > self.maxlen:
+                raise IOError("Request Entity Too Large")
+            
+            if line == CRLF:
+                # Normal end of headers
+                break
+            if not line.endswith(CRLF):
+                raise ValueError("HTTP requires CRLF terminators")
+            
+            yield line
+    
+    def close(self):
+        self.rfile.close()
+    
+    def __iter__(self):
+        # Shamelessly stolen from StringIO
+        total = 0
+        line = self.readline(sizehint)
+        while line:
+            yield line
+            total += len(line)
+            if 0 < sizehint <= total:
+                break
+            line = self.readline(sizehint)
+
+
+class HTTPRequest(object):
+    """An HTTP Request (and response).
+    
+    A single HTTP connection may consist of multiple request/response pairs.
+    """
+    
+    server = None
+    """The HTTPServer object which is receiving this request."""
+    
+    conn = None
+    """The HTTPConnection object on which this request connected."""
+    
+    inheaders = {}
+    """A dict of request headers."""
+    
+    outheaders = []
+    """A list of header tuples to write in the response."""
+    
+    ready = False
+    """When True, the request has been parsed and is ready to begin generating
+    the response. When False, signals the calling Connection that the response
+    should not be generated and the connection should close."""
+    
+    close_connection = False
+    """Signals the calling Connection that the request should close. This does
+    not imply an error! The client and/or server may each request that the
+    connection be closed."""
+    
+    chunked_write = False
+    """If True, output will be encoded with the "chunked" transfer-coding.
+    
+    This value is set automatically inside send_headers."""
+    
+    def __init__(self, server, conn):
+        self.server= server
+        self.conn = conn
+        
+        self.ready = False
+        self.started_request = False
+        self.scheme = ntob("http")
+        if self.server.ssl_adapter is not None:
+            self.scheme = ntob("https")
+        # Use the lowest-common protocol in case read_request_line errors.
+        self.response_protocol = 'HTTP/1.0'
+        self.inheaders = {}
+        
+        self.status = ""
+        self.outheaders = []
+        self.sent_headers = False
+        self.close_connection = self.__class__.close_connection
+        self.chunked_read = False
+        self.chunked_write = self.__class__.chunked_write
+    
+    def parse_request(self):
+        """Parse the next HTTP request start-line and message-headers."""
+        self.rfile = SizeCheckWrapper(self.conn.rfile,
+                                      self.server.max_request_header_size)
+        try:
+            success = self.read_request_line()
+        except errors.MaxSizeExceeded:
+            self.simple_response("414 Request-URI Too Long",
+                "The Request-URI sent with the request exceeds the maximum "
+                "allowed bytes.")
+            return
+        else:
+            if not success:
+                return
+        
+        try:
+            success = self.read_request_headers()
+        except errors.MaxSizeExceeded:
+            self.simple_response("413 Request Entity Too Large",
+                "The headers sent with the request exceed the maximum "
+                "allowed bytes.")
+            return
+        else:
+            if not success:
+                return
+        
+        self.ready = True
+    
+    def read_request_line(self):
+        # HTTP/1.1 connections are persistent by default. If a client
+        # requests a page, then idles (leaves the connection open),
+        # then rfile.readline() will raise socket.error("timed out").
+        # Note that it does this based on the value given to settimeout(),
+        # and doesn't need the client to request or acknowledge the close
+        # (although your TCP stack might suffer for it: cf Apache's history
+        # with FIN_WAIT_2).
+        request_line = self.rfile.readline()
+        
+        # Set started_request to True so communicate() knows to send 408
+        # from here on out.
+        self.started_request = True
+        if not request_line:
+            return False
+        
+        if request_line == CRLF:
+            # RFC 2616 sec 4.1: "...if the server is reading the protocol
+            # stream at the beginning of a message and receives a CRLF
+            # first, it should ignore the CRLF."
+            # But only ignore one leading line! else we enable a DoS.
+            request_line = self.rfile.readline()
+            if not request_line:
+                return False
+        
+        if not request_line.endswith(CRLF):
+            self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
+            return False
+        
+        try:
+            method, uri, req_protocol = request_line.strip().split(SPACE, 2)
+            if py3k:
+                # The [x:y] slicing is necessary for byte strings to avoid getting ord's
+                rp = int(req_protocol[5:6]), int(req_protocol[7:8])
+            else:
+                rp = int(req_protocol[5]), int(req_protocol[7])
+        except (ValueError, IndexError):
+            self.simple_response("400 Bad Request", "Malformed Request-Line")
+            return False
+        
+        self.uri = uri
+        self.method = method
+        
+        # uri may be an abs_path (including "http://host.domain.tld");
+        scheme, authority, path = self.parse_request_uri(uri)
+        if NUMBER_SIGN in path:
+            self.simple_response("400 Bad Request",
+                                 "Illegal #fragment in Request-URI.")
+            return False
+        
+        if scheme:
+            self.scheme = scheme
+        
+        qs = EMPTY
+        if QUESTION_MARK in path:
+            path, qs = path.split(QUESTION_MARK, 1)
+        
+        # Unquote the path+params (e.g. "/this%20path" -> "/this path").
+        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
+        #
+        # But note that "...a URI must be separated into its components
+        # before the escaped characters within those components can be
+        # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
+        # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
+        try:
+            atoms = [unquote(x) for x in quoted_slash.split(path)]
+        except ValueError:
+            ex = sys.exc_info()[1]
+            self.simple_response("400 Bad Request", ex.args[0])
+            return False
+        path = ntob("%2F").join(atoms)
+        self.path = path
+        
+        # Note that, like wsgiref and most other HTTP servers,
+        # we "% HEX HEX"-unquote the path but not the query string.
+        self.qs = qs
+        
+        # Compare request and server HTTP protocol versions, in case our
+        # server does not support the requested protocol. Limit our output
+        # to min(req, server). We want the following output:
+        #     request    server     actual written   supported response
+        #     protocol   protocol  response protocol    feature set
+        # a     1.0        1.0           1.0                1.0
+        # b     1.0        1.1           1.1                1.0
+        # c     1.1        1.0           1.0                1.0
+        # d     1.1        1.1           1.1                1.1
+        # Notice that, in (b), the response will be "HTTP/1.1" even though
+        # the client only understands 1.0. RFC 2616 10.5.6 says we should
+        # only return 505 if the _major_ version is different.
+        if py3k:
+            # The [x:y] slicing is necessary for byte strings to avoid getting ord's
+            sp = int(self.server.protocol[5:6]), int(self.server.protocol[7:8])
+        else:
+            sp = int(self.server.protocol[5]), int(self.server.protocol[7])
+        
+        if sp[0] != rp[0]:
+            self.simple_response("505 HTTP Version Not Supported")
+            return False
+
+        self.request_protocol = req_protocol
+        self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
+
+        return True
+
+    def read_request_headers(self):
+        """Read self.rfile into self.inheaders. Return success."""
+        
+        # then all the http headers
+        try:
+            read_headers(self.rfile, self.inheaders)
+        except ValueError:
+            ex = sys.exc_info()[1]
+            self.simple_response("400 Bad Request", ex.args[0])
+            return False
+        
+        mrbs = self.server.max_request_body_size
+        if mrbs and int(self.inheaders.get(ntob("Content-Length"), 0)) > mrbs:
+            self.simple_response("413 Request Entity Too Large",
+                "The entity sent with the request exceeds the maximum "
+                "allowed bytes.")
+            return False
+        
+        # Persistent connection support
+        if self.response_protocol == "HTTP/1.1":
+            # Both server and client are HTTP/1.1
+            if self.inheaders.get(ntob("Connection"), EMPTY) == ntob("close"):
+                self.close_connection = True
+        else:
+            # Either the server or client (or both) are HTTP/1.0
+            if self.inheaders.get(ntob("Connection"), EMPTY) != ntob("Keep-Alive"):
+                self.close_connection = True
+        
+        # Transfer-Encoding support
+        te = None
+        if self.response_protocol == "HTTP/1.1":
+            te = self.inheaders.get(ntob("Transfer-Encoding"))
+            if te:
+                te = [x.strip().lower() for x in te.split(COMMA) if x.strip()]
+        
+        self.chunked_read = False
+        
+        if te:
+            for enc in te:
+                if enc == ntob("chunked"):
+                    self.chunked_read = True
+                else:
+                    # Note that, even if we see "chunked", we must reject
+                    # if there is an extension we don't recognize.
+                    self.simple_response("501 Unimplemented")
+                    self.close_connection = True
+                    return False
+        
+        # From PEP 333:
+        # "Servers and gateways that implement HTTP 1.1 must provide
+        # transparent support for HTTP 1.1's "expect/continue" mechanism.
+        # This may be done in any of several ways:
+        #   1. Respond to requests containing an Expect: 100-continue request
+        #      with an immediate "100 Continue" response, and proceed normally.
+        #   2. Proceed with the request normally, but provide the application
+        #      with a wsgi.input stream that will send the "100 Continue"
+        #      response if/when the application first attempts to read from
+        #      the input stream. The read request must then remain blocked
+        #      until the client responds.
+        #   3. Wait until the client decides that the server does not support
+        #      expect/continue, and sends the request body on its own.
+        #      (This is suboptimal, and is not recommended.)
+        #
+        # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
+        # but it seems like it would be a big slowdown for such a rare case.
+        if self.inheaders.get(ntob("Expect"), EMPTY) == ntob("100-continue"):
+            # Don't use simple_response here, because it emits headers
+            # we don't want. See http://www.cherrypy.org/ticket/951
+            msg = ntob(self.server.protocol,'ascii') + ntob(" 100 Continue\r\n\r\n")
+            try:
+                write(self.conn.wfile, msg)
+            except socket.error:
+                x = sys.exc_info()[1]
+                if x.args[0] not in errors.socket_errors_to_ignore:
+                    raise
+        return True
+    
+    def parse_request_uri(self, uri):
+        """Parse a Request-URI into (scheme, authority, path).
+        
+        Note that Request-URI's must be one of::
+            
+            Request-URI    = "*" | absoluteURI | abs_path | authority
+        
+        Therefore, a Request-URI which starts with a double forward-slash
+        cannot be a "net_path"::
+        
+            net_path      = "//" authority [ abs_path ]
+        
+        Instead, it must be interpreted as an "abs_path" with an empty first
+        path segment::
+        
+            abs_path      = "/"  path_segments
+            path_segments = segment *( "/" segment )
+            segment       = *pchar *( ";" param )
+            param         = *pchar
+        """
+        if uri == ASTERISK:
+            return None, None, uri
+        
+        i = uri.find(ntob('://'))
+        if i > 0 and QUESTION_MARK not in uri[:i]:
+            # An absoluteURI.
+            # If there's a scheme (and it must be http or https), then:
+            # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
+            scheme, remainder = uri[:i].lower(), uri[i + 3:]
+            authority, path = remainder.split(FORWARD_SLASH, 1)
+            path = FORWARD_SLASH + path
+            return scheme, authority, path
+        
+        if uri.startswith(FORWARD_SLASH):
+            # An abs_path.
+            return None, None, uri
+        else:
+            # An authority.
+            return None, uri, None
+
+    def respond(self):
+        """Call the gateway and write its iterable output."""
+        mrbs = self.server.max_request_body_size
+        if self.chunked_read:
+            self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
+        else:
+            cl = int(self.inheaders.get(ntob("Content-Length"), 0))
+            if mrbs and mrbs < cl:
+                if not self.sent_headers:
+                    self.simple_response("413 Request Entity Too Large",
+                        "The entity sent with the request exceeds the maximum "
+                        "allowed bytes.")
+                return
+            self.rfile = KnownLengthRFile(self.conn.rfile, cl)
+        
+        self.server.gateway(self).respond()
+        
+        if (self.ready and not self.sent_headers):
+            self.sent_headers = True
+            self.send_headers()
+        if self.chunked_write:
+            write(self.conn.wfile, ntob("0\r\n\r\n"))
+    
+    def simple_response(self, status, msg=""):
+        """Write a simple response back to the client."""
+        status = str(status)
+        buf = [ntob(self.server.protocol, "ascii") + SPACE +
+               ntob(status, "ISO-8859-1") + CRLF,
+               ntob("Content-Length: %s\r\n" % len(msg), "ISO-8859-1"),
+               ntob("Content-Type: text/plain\r\n")]
+        
+        if status[:3] in ("413", "414"):
+            # Request Entity Too Large / Request-URI Too Long
+            self.close_connection = True
+            if self.response_protocol == 'HTTP/1.1':
+                # This will not be true for 414, since read_request_line
+                # usually raises 414 before reading the whole line, and we
+                # therefore cannot know the proper response_protocol.
+                buf.append(ntob("Connection: close\r\n"))
+            else:
+                # HTTP/1.0 had no 413/414 status nor Connection header.
+                # Emit 400 instead and trust the message body is enough.
+                status = "400 Bad Request"
+        
+        buf.append(CRLF)
+        if msg:
+            if isinstance(msg, unicodestr):
+                msg = msg.encode("ISO-8859-1")
+            buf.append(msg)
+        
+        try:
+            write(self.conn.wfile, EMPTY.join(buf))
+        except socket.error:
+            x = sys.exc_info()[1]
+            if x.args[0] not in errors.socket_errors_to_ignore:
+                raise
+    
+    def write(self, chunk):
+        """Write unbuffered data to the client."""
+        if self.chunked_write and chunk:
+            buf = [ntob(hex(len(chunk)), 'ASCII')[2:], CRLF, chunk, CRLF]
+            write(self.conn.wfile, EMPTY.join(buf))
+        else:
+            write(self.conn.wfile, chunk)
+    
+    def send_headers(self):
+        """Assert, process, and send the HTTP response message-headers.
+        
+        You must set self.status, and self.outheaders before calling this.
+        """
+        hkeys = [key.lower() for key, value in self.outheaders]
+        status = int(self.status[:3])
+        
+        if status == 413:
+            # Request Entity Too Large. Close conn to avoid garbage.
+            self.close_connection = True
+        elif ntob("content-length") not in hkeys:
+            # "All 1xx (informational), 204 (no content),
+            # and 304 (not modified) responses MUST NOT
+            # include a message-body." So no point chunking.
+            if status < 200 or status in (204, 205, 304):
+                pass
+            else:
+                if (self.response_protocol == 'HTTP/1.1'
+                    and self.method != ntob('HEAD')):
+                    # Use the chunked transfer-coding
+                    self.chunked_write = True
+                    self.outheaders.append((ntob("Transfer-Encoding"), ntob("chunked")))
+                else:
+                    # Closing the conn is the only way to determine len.
+                    self.close_connection = True
+        
+        if ntob("connection") not in hkeys:
+            if self.response_protocol == 'HTTP/1.1':
+                # Both server and client are HTTP/1.1 or better
+                if self.close_connection:
+                    self.outheaders.append((ntob("Connection"), ntob("close")))
+            else:
+                # Server and/or client are HTTP/1.0
+                if not self.close_connection:
+                    self.outheaders.append((ntob("Connection"), ntob("Keep-Alive")))
+        
+        if (not self.close_connection) and (not self.chunked_read):
+            # Read any remaining request body data on the socket.
+            # "If an origin server receives a request that does not include an
+            # Expect request-header field with the "100-continue" expectation,
+            # the request includes a request body, and the server responds
+            # with a final status code before reading the entire request body
+            # from the transport connection, then the server SHOULD NOT close
+            # the transport connection until it has read the entire request,
+            # or until the client closes the connection. Otherwise, the client
+            # might not reliably receive the response message. However, this
+            # requirement is not be construed as preventing a server from
+            # defending itself against denial-of-service attacks, or from
+            # badly broken client implementations."
+            remaining = getattr(self.rfile, 'remaining', 0)
+            if remaining > 0:
+                self.rfile.read(remaining)
+        
+        if ntob("date") not in hkeys:
+            self.outheaders.append((ntob("Date"), formatdate()))
+        
+        if ntob("server") not in hkeys:
+            self.outheaders.append(
+                (ntob("Server"), ntob(self.server.server_name)))
+        
+        buf = [ntob(self.server.protocol, 'ascii') + SPACE + self.status + CRLF]
+        for k, v in self.outheaders:
+            buf.append(k + COLON + SPACE + v + CRLF)
+        buf.append(CRLF)
+        write(self.conn.wfile, EMPTY.join(buf))
+
+
+class HTTPConnection(object):
+    """An HTTP connection (active socket).
+    
+    server: the Server object which received this connection.
+    socket: the raw socket object (usually TCP) for this connection.
+    makefile: a class for reading from the socket.
+    """
+    
+    remote_addr = None
+    remote_port = None
+    ssl_env = None
+    rbufsize = DEFAULT_BUFFER_SIZE
+    wbufsize = DEFAULT_BUFFER_SIZE
+    RequestHandlerClass = HTTPRequest
+    
+    def __init__(self, server, sock, makefile=makefile):
+        self.server = server
+        self.socket = sock
+        self.rfile = makefile(sock, "rb", self.rbufsize)
+        self.wfile = makefile(sock, "wb", self.wbufsize)
+        self.requests_seen = 0
+    
+    def communicate(self):
+        """Read each request and respond appropriately."""
+        request_seen = False
+        try:
+            while True:
+                # (re)set req to None so that if something goes wrong in
+                # the RequestHandlerClass constructor, the error doesn't
+                # get written to the previous request.
+                req = None
+                req = self.RequestHandlerClass(self.server, self)
+                
+                # This order of operations should guarantee correct pipelining.
+                req.parse_request()
+                if self.server.stats['Enabled']:
+                    self.requests_seen += 1
+                if not req.ready:
+                    # Something went wrong in the parsing (and the server has
+                    # probably already made a simple_response). Return and
+                    # let the conn close.
+                    return
+                
+                request_seen = True
+                req.respond()
+                if req.close_connection:
+                    return
+        except socket.error:
+            e = sys.exc_info()[1]
+            errnum = e.args[0]
+            # sadly SSL sockets return a different (longer) time out string
+            if errnum == 'timed out' or errnum == 'The read operation timed out':
+                # Don't error if we're between requests; only error
+                # if 1) no request has been started at all, or 2) we're
+                # in the middle of a request.
+                # See http://www.cherrypy.org/ticket/853
+                if (not request_seen) or (req and req.started_request):
+                    # Don't bother writing the 408 if the response
+                    # has already started being written.
+                    if req and not req.sent_headers:
+                        try:
+                            req.simple_response("408 Request Timeout")
+                        except errors.FatalSSLAlert:
+                            # Close the connection.
+                            return
+            elif errnum not in errors.socket_errors_to_ignore:
+                self.server.error_log("socket.error %s" % repr(errnum),
+                                      level=logging.WARNING, traceback=True)
+                if req and not req.sent_headers:
+                    try:
+                        req.simple_response("500 Internal Server Error")
+                    except errors.FatalSSLAlert:
+                        # Close the connection.
+                        return
+            return
+        except (KeyboardInterrupt, SystemExit):
+            raise
+        except errors.FatalSSLAlert:
+            # Close the connection.
+            return
+        except errors.NoSSLError:
+            if req and not req.sent_headers:
+                # Unwrap our wfile
+                self.wfile = makefile(self.socket._sock, "wb", self.wbufsize)
+                req.simple_response("400 Bad Request",
+                    "The client sent a plain HTTP request, but "
+                    "this server only speaks HTTPS on this port.")
+                self.linger = True
+        except Exception:
+            e = sys.exc_info()[1]
+            self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
+            if req and not req.sent_headers:
+                try:
+                    req.simple_response("500 Internal Server Error")
+                except errors.FatalSSLAlert:
+                    # Close the connection.
+                    return
+    
+    linger = False
+    
+    def close(self):
+        """Close the socket underlying this connection."""
+        self.rfile.close()
+        
+        if not self.linger:
+            # Python's socket module does NOT call close on the kernel socket
+            # when you call socket.close(). We do so manually here because we
+            # want this server to send a FIN TCP segment immediately. Note this
+            # must be called *before* calling socket.close(), because the latter
+            # drops its reference to the kernel socket.
+            # Python 3 *probably* fixed this with socket._real_close; hard to tell.
+            if not py3k:
+                if hasattr(self.socket, '_sock'):
+                    self.socket._sock.close()
+            self.socket.close()
+        else:
+            # On the other hand, sometimes we want to hang around for a bit
+            # to make sure the client has a chance to read our entire
+            # response. Skipping the close() calls here delays the FIN
+            # packet until the socket object is garbage-collected later.
+            # Someday, perhaps, we'll do the full lingering_close that
+            # Apache does, but not today.
+            pass
+
+
+class TrueyZero(object):
+    """An object which equals and does math like the integer '0' but evals True."""
+    def __add__(self, other):
+        return other
+    def __radd__(self, other):
+        return other
+trueyzero = TrueyZero()
+
+
+_SHUTDOWNREQUEST = None
+
+class WorkerThread(threading.Thread):
+    """Thread which continuously polls a Queue for Connection objects.
+    
+    Due to the timing issues of polling a Queue, a WorkerThread does not
+    check its own 'ready' flag after it has started. To stop the thread,
+    it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
+    (one for each running WorkerThread).
+    """
+    
+    conn = None
+    """The current connection pulled off the Queue, or None."""
+    
+    server = None
+    """The HTTP Server which spawned this thread, and which owns the
+    Queue and is placing active connections into it."""
+    
+    ready = False
+    """A simple flag for the calling server to know when this thread
+    has begun polling the Queue."""
+    
+    
+    def __init__(self, server):
+        self.ready = False
+        self.server = server
+        
+        self.requests_seen = 0
+        self.bytes_read = 0
+        self.bytes_written = 0
+        self.start_time = None
+        self.work_time = 0
+        self.stats = {
+            'Requests': lambda s: self.requests_seen + ((self.start_time is None) and trueyzero or self.conn.requests_seen),
+            'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and trueyzero or self.conn.rfile.bytes_read),
+            'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and trueyzero or self.conn.wfile.bytes_written),
+            'Work Time': lambda s: self.work_time + ((self.start_time is None) and trueyzero or time.time() - self.start_time),
+            'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
+            'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
+        }
+        threading.Thread.__init__(self)
+    
+    def run(self):
+        self.server.stats['Worker Threads'][self.getName()] = self.stats
+        try:
+            self.ready = True
+            while True:
+                conn = self.server.requests.get()
+                if conn is _SHUTDOWNREQUEST:
+                    return
+                
+                self.conn = conn
+                if self.server.stats['Enabled']:
+                    self.start_time = time.time()
+                try:
+                    conn.communicate()
+                finally:
+                    conn.close()
+                    if self.server.stats['Enabled']:
+                        self.requests_seen += self.conn.requests_seen
+                        self.bytes_read += self.conn.rfile.bytes_read
+                        self.bytes_written += self.conn.wfile.bytes_written
+                        self.work_time += time.time() - self.start_time
+                        self.start_time = None
+                    self.conn = None
+        except (KeyboardInterrupt, SystemExit):
+            exc = sys.exc_info()[1]
+            self.server.interrupt = exc
+
+
+class ThreadPool(object):
+    """A Request Queue for an HTTPServer which pools threads.
+    
+    ThreadPool objects must provide min, get(), put(obj), start()
+    and stop(timeout) attributes.
+    """
+    
+    def __init__(self, server, min=10, max=-1):
+        self.server = server
+        self.min = min
+        self.max = max
+        self._threads = []
+        self._queue = queue.Queue()
+        self.get = self._queue.get
+    
+    def start(self):
+        """Start the pool of threads."""
+        for i in range(self.min):
+            self._threads.append(WorkerThread(self.server))
+        for worker in self._threads:
+            worker.setName("CP Server " + worker.getName())
+            worker.start()
+        for worker in self._threads:
+            while not worker.ready:
+                time.sleep(.1)
+    
+    def _get_idle(self):
+        """Number of worker threads which are idle. Read-only."""
+        return len([t for t in self._threads if t.conn is None])
+    idle = property(_get_idle, doc=_get_idle.__doc__)
+    
+    def put(self, obj):
+        self._queue.put(obj)
+        if obj is _SHUTDOWNREQUEST:
+            return
+    
+    def grow(self, amount):
+        """Spawn new worker threads (not above self.max)."""
+        for i in range(amount):
+            if self.max > 0 and len(self._threads) >= self.max:
+                break
+            worker = WorkerThread(self.server)
+            worker.setName("CP Server " + worker.getName())
+            self._threads.append(worker)
+            worker.start()
+    
+    def shrink(self, amount):
+        """Kill off worker threads (not below self.min)."""
+        # Grow/shrink the pool if necessary.
+        # Remove any dead threads from our list
+        for t in self._threads:
+            if not t.isAlive():
+                self._threads.remove(t)
+                amount -= 1
+        
+        if amount > 0:
+            for i in range(min(amount, len(self._threads) - self.min)):
+                # Put a number of shutdown requests on the queue equal
+                # to 'amount'. Once each of those is processed by a worker,
+                # that worker will terminate and be culled from our list
+                # in self.put.
+                self._queue.put(_SHUTDOWNREQUEST)
+    
+    def stop(self, timeout=5):
+        # Must shut down threads here so the code that calls
+        # this method can know when all threads are stopped.