Commits

__russ__ committed fd245c6

Separated out basic TCP handling from HTTP handling (via TCP/HTTPRequest, TCP/HTTPConnection, TCP/HTTPServer)
Cleaned up and clarified (somehwat) server stop handling
All unit tests pass (except for intentional skips)
Areas with "TODO" need attention

Comments (0)

Files changed (5)

cheroot/dprint.py

-if 1: #flip this between 1 and 0 (true/false) to enable/disable dprinting
+if 0: #flip this between 1 and 0 (true/false) to enable/disable dprinting
     def dprint(msg, skipnewline = False):
         if skipnewline:
             print msg,

cheroot/server.py

     
     """
     ssl_env = None
-    rbufsize = DEFAULT_BUFFER_SIZE
-    wbufsize = DEFAULT_BUFFER_SIZE
     RequestHandlerClass = TCPRequest
     
-    def __init__(self, server, sock):
+    def __init__(self, server, sock, makefile):
         self.server = server
         self.socket = sock
         self.requests_seen = 0
         self.remote_addr = None
         self.remote_port = None
         self.linger = False
-    
+        self.makefile = makefile #file-like socket wrapper, if needed
     
     def communicate(self, recycle_threads):
         """Read each request and respond appropriately.
                 
                 req.respond()
                 if recycle_threads:
-                    dprint("** communicate RETURNING %d from fd %d" % \
+                    dprint("** communicate RETURNING %r from fd %d" % \
                            (req.close_connection, self.socket.fileno()))
                     return req.close_connection
                 else:
             e = sys.exc_info()[1]
             return self.handle_exc_Exception(e, req)
     
-    def handle_exc_KeyboardInterupt(self, exc, req):
-        #Generically, this is managed by the server for proper shutdown.
-        raise exc
-    
-    def handle_exc_SystemExit(self, exc, req):
-        #Generically, this is managed by the server for proper shutdown.
-        raise exc
+    def _notify_server_of_exception(self, exc, req):
+        self.server._interrupt_from_worker = exc
+        
+    #KeyboardInterrupt and SystemExit shoudl be passed back to the server.
+    handle_exc_KeyboardInterrupt = _notify_server_of_exception
+    handle_exc_SystemExit = _notify_server_of_exception
     
     def _generic_exc_handler(self, exc, req):
         self.server.error_log(repr(exc), level=logging.ERROR, traceback=True)
     
     def close(self):
         """Close the socket underlying this connection."""
-        if not self.linger:
+        if self.linger == False:
             # Python's socket module does NOT call close on the kernel socket
-            # when you call socket.close(). We do so manually here because we
+            # when you call socket.). We do so manually here because we
             # want this server to send a FIN TCP segment immediately. Note this
             # must be called *before* calling socket.close(), because the latter
             # drops its reference to the kernel socket.
             if not py3k:
                 if hasattr(self.socket, '_sock'):
                     self.socket._sock.close()
-            self.socket.close()
+            try:
+                self.socket.close()
+            except:
+                #Cant do much about it, so eat it.
+                #TODO: log it?
+                #TODO: figure out why old code seemingly called close() after _close() without error
+                pass
         else:
             # On the other hand, sometimes we want to hang around for a bit
             # to make sure the client has a chance to read our entire
         self.rfile.close()
         TCPConnection.close(self)
 
-class old_HTTPConnection(object):
-    """An HTTP connection (active socket).
-    
-    server: the Server object which received this connection.
-    socket: the raw socket object (usually TCP) for this connection.
-    makefile: a class for reading from the socket.
-    """
-    
-    ssl_env = None
-    rbufsize = DEFAULT_BUFFER_SIZE
-    wbufsize = DEFAULT_BUFFER_SIZE
-    RequestHandlerClass = HTTPRequest
-    
-    def __init__(self, server, sock, makefile=makefile):
-        self.server = server
-        self.socket = sock
-        self.rfile = makefile(sock, "rb", self.rbufsize)
-        self.wfile = makefile(sock, "wb", self.wbufsize)
-        self.requests_seen = 0
-        self.remote_addr = None
-        self.remote_port = None
-    
-    def communicate(self, recycle_threads):
-        """Read each request and respond appropriately.
-        
-        Returns true if the connection should be closed when done.
-        
-        When recycle_threads==False, the return value is meaningless since the
-        calling logic will always close the connection in this mode. In this
-        mode, this method continually processes incoming requests, not
-        returning until the client closes the connection or an error occurs.
-        During this time, the current Worker thread can do nothing else
-        (idling between requests).
-        
-        If recycle_threads==True, the boolean return value is very important.
-        Determination of this return value is via the close_connection
-        property of the RequestHandlerClass in use, or is True in the event of
-        error. If the connection is to be kept alive, once the request is
-        completely handled the socket is returned to the main server to
-        monitor for future incoming requests (at which time the request will
-        be given to any available Worker).
-        
-        """
-        dprint("COMMUNICATE STARTED!!!")
-        #NOTE: the loop in this routine has been kept in order to replicate
-        #legacy operation where recycle_threads=False.
-        request_seen = False
-        try:
-            while True:
-                req = None #this is pointless if recycle_threads=True
-                req = self.RequestHandlerClass(self.server, self)
-                
-                # This order of operations should guarantee correct pipelining.
-                req.parse_request()
-                if self.server.stats['Enabled']:
-                    self.requests_seen += 1
-                if not req.ready:
-                    # Something went wrong in the parsing (and the server has
-                    # probably already made a simple_response). Return and
-                    # let the conn close.
-                    return True #indicates to force socket close
-                
-                request_seen = True
-                req.respond()
-                if recycle_threads:
-                    dprint("** communicate RETURNING %d from fd %d" % (req.close_connection, self.socket.fileno()))
-                    return req.close_connection
-                else:
-                    if req.close_connection:
-                        return True #indicates to force socket close
-        
-        except socket.error:
-            e = sys.exc_info()[1]
-            errnum = e.args[0]
-            # sadly SSL sockets return a different (longer) time out string
-            if errnum == 'timed out' or errnum == 'The read operation timed out':
-                # Don't error if we're between requests; only error
-                # if 1) no request has been started at all, or 2) we're
-                # in the middle of a request.
-                # See http://www.cherrypy.org/ticket/853
-                if (not request_seen) or (req and req.started_request):
-                    # Don't bother writing the 408 if the response
-                    # has already started being written.
-                    if req and not req.sent_headers:
-                        try:
-                            req.simple_response("408 Request Timeout")
-                        except errors.FatalSSLAlert:
-                            # Close the connection.
-                            return True
-            elif errnum not in errors.socket_errors_to_ignore:
-                self.server.error_log("socket.error %s" % repr(errnum),
-                                      level=logging.WARNING, traceback=True)
-                if req and not req.sent_headers:
-                    try:
-                        req.simple_response("500 Internal Server Error")
-                    except errors.FatalSSLAlert:
-                        # Close the connection.
-                        return True
-            return True #close the connection
-        except (KeyboardInterrupt, SystemExit):
-            raise
-        except errors.FatalSSLAlert:
-            # Close the connection.
-            return True
-        except errors.NoSSLError:
-            msg = ("The client sent a plain HTTP request, but "
-                   "this server only speaks HTTPS on this port.")
-            self.server.error_log(msg)
-
-            if req and not req.sent_headers:
-                # Unwrap our wfile
-                self.wfile = makefile(self.socket._sock, "wb", self.wbufsize)
-                req.simple_response("400 Bad Request", msg)
-                self.linger = True
-        except Exception:
-            e = sys.exc_info()[1]
-            self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
-            if req and not req.sent_headers:
-                try:
-                    req.simple_response("500 Internal Server Error")
-                except errors.FatalSSLAlert:
-                    # Close the connection.
-                    return True
-    
-    linger = False
-    
-    def close(self):
-        """Close the socket underlying this connection."""
-        self.rfile.close()
-        
-        if not self.linger:
-            # Python's socket module does NOT call close on the kernel socket
-            # when you call socket.close(). We do so manually here because we
-            # want this server to send a FIN TCP segment immediately. Note this
-            # must be called *before* calling socket.close(), because the latter
-            # drops its reference to the kernel socket.
-            # Python 3 *probably* fixed this with socket._real_close; hard to tell.
-            if not py3k:
-                if hasattr(self.socket, '_sock'):
-                    self.socket._sock.close()
-            self.socket.close()
-        else:
-            # On the other hand, sometimes we want to hang around for a bit
-            # to make sure the client has a chance to read our entire
-            # response. Skipping the close() calls here delays the FIN
-            # packet until the socket object is garbage-collected later.
-            # Someday, perhaps, we'll do the full lingering_close that
-            # Apache does, but not today.
-            pass
-
 try:
     import fcntl
 except ImportError:
         buf += sock.recv(INT32_LEN - len(buf))
     return struct.unpack("@I", buf)[0] #native byte order (same machine!)
 
-class HTTPServer(object):
-    """An HTTP server."""
+
+
+
+
+
+
+class TCPServer(object):
+    """A threaded TCP Server."""
     
     _bind_addr = "127.0.0.1"
-    _interrupt = None
     
     gateway = None
     """A Gateway instance."""
     server_name = None
     """The name of the server; defaults to socket.gethostname()."""
     
-    protocol = "HTTP/1.1"
-    """The version string to write in the Status-Line of all HTTP responses.
-    
-    For example, "HTTP/1.1" is the default. This also limits the supported
-    features used in the response."""
-    
     request_queue_size = 5
     """The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
     
     ready = False
     """An internal flag which marks whether the socket is accepting connections."""
     
-    max_request_header_size = 0
-    """The maximum size, in bytes, for request headers, or 0 for no limit."""
-    
-    max_request_body_size = 0
-    """The maximum size, in bytes, for request bodies, or 0 for no limit."""
-    
     nodelay = True
     """If True (the default since 3.1), sets the TCP_NODELAY socket option."""
     
-    ConnectionClass = HTTPConnection
-    """The class to use for handling HTTP connections."""
+    ConnectionClass = TCPConnection
+    """The class to use for handling basic TCP connections."""
     
     ssl_adapter = None
     """An instance of SSLAdapter (or a subclass).
     You must have the corresponding SSL driver library installed."""
     
     def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
-                 server_name=None, protocol='HTTP/1.1', ssl_adapter=None,
+                 server_name=None, protocol='', ssl_adapter=None,
                  recycle_threads=False):
         self.bind_addr = bind_addr
         self.gateway = gateway
         
+        self._interrupt_from_worker = None
+        
         self.requests = threadpool.ThreadPool(
             self, min=minthreads or 1, max=maxthreads)
         
 
         self.clear_stats()
         
-        self._socket_checking_complete = threading.Event()
-        self._socket_checking_complete.set() #in case of exit prior to starting!
+        self._listening_loop_complete = threading.Event()
+        self._listening_loop_complete.set() #in case of exit prior to starting!
         
         self._recycle_threads = recycle_threads
         if recycle_threads == False:
             self._active_socket_info = {}
             self._needy_conn_info = collections.deque()
     
-    def clear_stats(self):
+    def _clear_stats(self):
         self._start_time = None
         self._run_time = 0
         self.stats = {
                  for w in s['Worker Threads'].values()], 0),
             'Worker Threads': {},
             }
-        logging.statistics["Cheroot HTTPServer %d" % id(self)] = self.stats
+    
+    def clear_stats(self):
+        self._clear_stats()
+        logging.statistics["Cheroot TCPServer %d" % id(self)] = self.stats
     
     def runtime(self):
         if self._start_time is None:
         self.ready = True
         self._start_time = time.time()
         try:
-            self._socket_checking_complete.clear()
+            self._listening_loop_complete.clear()
             while self.ready:
                 try:
                     self.tick()
                 except (KeyboardInterrupt, SystemExit):
-                    raise
+                    raise #TODO: no sure why we do this.  When would safe_start not happen?
                 except:
-                    self.error_log("Error in HTTPServer.tick",
+                    self.error_log("Error in server tick()",
                                    level=logging.ERROR,
                                    traceback=True)
                 
-                if self.interrupt:
-                    #self.interrupt setter calls stop internally, which needs the
-                    #_socket_checking_complete event set (and we're done).
-                    self._socket_checking_complete.set()
-                    while self.interrupt is True:
-                        # Wait for self.stop() to complete. See _set_interrupt.
-                        time.sleep(0.1)
-                    if self.interrupt:
-                        raise self.interrupt
+                if self._interrupt_from_worker:
+                    exc = self._interrupt_from_worker
+                    if isinstance(exc, (KeyboardInterrupt, SystemExit)):
+                        #Would prefer to jsut break out of the loop here, but
+                        #older code raised the exception so that is being kept
+                        #as-is.
+                        #TODO: is this required?  Why not always have safe_start-like behaviour?
+                        raise exc
+                    else:
+                        break
         finally:
-            self._socket_checking_complete.set()
+            self._listening_loop_complete.set()
+            #Would like to do a clean stop, but legacy code harshly exited
+            #(sometimes to safestart wrapper), so this will too.
+            #TODO: determine if it is required to harshly exit here
+            #self.stop()
 
     def error_log(self, msg="", level=20, traceback=False):
         # Override this in subclasses as desired
                 sock.close()
                 return
             
-            dprint("Creating connection")
+            dprint("Creating connection of type %r" % self.ConnectionClass)
             conn = self.ConnectionClass(self, sock, mf)
-            dprint("Connection created")
+            dprint("Connection created: %r" % conn)
             
             if not isinstance(self.bind_addr, basestring):
                 # optional values
                 return
             raise
     
-    def _get_interrupt(self):
-        return self._interrupt
-    def _set_interrupt(self, interrupt):
-        self._interrupt = True
-        self.stop()
-        self._interrupt = interrupt
-    interrupt = property(_get_interrupt, _set_interrupt,
+    
+    def _get_interrupt_from_worker(self):
+        return self._interrupt_from_worker
+    def _set_interrupt_from_worker(self, interrupt):
+        self._interrupt_from_worker = interrupt
+        #Now we need to break the listening loop for it to be able to see the
+        #interrupt and exit ASAP (or we have to wait for the relevant timeout)
+        self._interrupt_listening_loop()
+    #Leaving this interrupt property in place in case of docs and legacy usage
+    interrupt = property(_get_interrupt_from_worker,
+                         _set_interrupt_from_worker,
                          doc="Set this to an Exception instance to "
-                             "interrupt the server.")
+                         "interrupt the server.")
     
+    def _interrupt_listening_loop(self):
+        """Break out of any blocking waits in the listening loop."""
+        if self._listening_loop_complete.isSet():
+            #Already done!
+            return
+        elif self._recycle_threads:
+            #Need to break the select.poll timeout.
+            #Sending 0 is a signal down the worker->main pipe signals to leave
+            threadpool.send_int32(self._thread_comm_pipe__threadsock, 0)
+        else:
+            #Need to break the socket.accept() timeout
+            self._break_listener_accept()
+        self._listening_loop_complete.wait(10) #TODO: set non-arbitrary wait
+        if self._listening_loop_complete.isSet() == False:
+            #This should not happen!  That loop shoudl be easy to exit.
+            dprint("WARNING!!!  LISTENING LOOP DID NOT EXIT!!?")
+            #in teh worst case, we let the program fall out. No sense raising
+            #an exception at this point.
+            pass
     
-    def _stop_polling_loop(self):
-        """exit the polling loop as soon as possible."""
-        #Sending 0 is a signal down the worker->main pipe signals to leave
-        threadpool.send_int32(self._thread_comm_pipe__threadsock, 0)
-        
-    def stop(self):
-        """Gracefully shutdown a server that is serving forever."""
-        dprint("************* SERVER STOP REQUESTED *******************")
-        self.ready = False
-        if self._start_time is not None:
-            self._run_time += (time.time() - self._start_time)
-        self._start_time = None
-        
-        sock = getattr(self, "socket", None)
-        if sock:
-            if self._recycle_threads:
-                #Exit the polling loop
-                self._stop_polling_loop()
-            
+    def _break_listener_accept(self):
+        """Cause socket.accept() to exit immediately on listening socket."""
+        if self._listening_loop_complete.isSet():
+            #Already done!
+            return
+        listener = getattr(self, "socket", None)
+        if listener:
             if not isinstance(self.bind_addr, basestring):
                 # Touch our own socket to make accept() return immediately.
                 try:
-                    host, port = sock.getsockname()[:2]
+                    host, port = listener.getsockname()[:2]
                 except socket.error:
                     x = sys.exc_info()[1]
                     if x.args[0] not in errors.socket_errors_to_ignore:
                         except socket.error:
                             if s:
                                 s.close()
-            if hasattr(sock, "close"):
-                sock.close()
+            if hasattr(listener, "close"):
+                listener.close()
             self.socket = None
+            
+    
+    
         
-        #shut down any active worker threads
+    def stop(self):
+        """Gracefully shutdown a server that is serving forever."""
+        dprint("************* SERVER STOP REQUESTED *******************")
+        self.ready = False #interrupts parse_request->respond connection (TODO: is this what we want?)
+        self._interrupt_listening_loop() #possibly already done, but ok
+        
+        if self._start_time is not None:
+            self._run_time += (time.time() - self._start_time)
+        self._start_time = None
+        
+        #Terminate the thread pool...
         self.requests.stop(self.shutdown_timeout)
         
-        #Ensure that we're definitely out of the main socket checking loop...
-        self._socket_checking_complete.wait(2)
-        if not self._socket_checking_complete.isSet():
-            print "GIVING UP ON EVENT!!"
-        
+        #Ensure any sockets we know about are explciitly closed (rather than
+        #relying on gc)
         if self._recycle_threads:
             #Close all active sockets, now that we're sure that the workers will
             #not alter the dict.
             for fd, ci in self._active_socket_info.iteritems():
                 ci[0].close()
+            #And the worker side of the socket pair...
+            self._thread_comm_pipe__threadsock.close()
         
-        #dprint("************* SERVER SHUT DOWN *******************")
+        dprint("************* SERVER SHUT DOWN *******************")
+        
 
-
+class HTTPServer(TCPServer):
+    """An HTTP server."""
+    protocol = "HTTP/1.1"
+    """The version string to write in the Status-Line of all HTTP responses.
+    
+    For example, "HTTP/1.1" is the default. This also limits the supported
+    features used in the response."""
+    
+    
+    max_request_header_size = 0
+    """The maximum size, in bytes, for request headers, or 0 for no limit."""
+    
+    max_request_body_size = 0
+    """The maximum size, in bytes, for request bodies, or 0 for no limit."""
+    
+    ConnectionClass = HTTPConnection
+    """The class to use for handling HTTP connections."""
+    
+    def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
+                 server_name=None, protocol='HTTP/1.1', ssl_adapter=None,
+                 recycle_threads=False):
+        TCPServer.__init__(self,
+                           bind_addr = bind_addr,
+                           gateway = gateway,
+                           minthreads = minthreads,
+                           maxthreads = maxthreads,
+                           server_name = server_name,
+                           protocol = protocol,
+                           ssl_adapter = ssl_adapter,
+                           recycle_threads = recycle_threads,
+                           )
+    
+    def clear_stats(self):
+        self._clear_stats()
+        logging.statistics["Cheroot HTTPServer %d" % id(self)] = self.stats
+    
 class Gateway(object):
     """A base class to interface HTTPServer with other systems, such as WSGI."""
     

cheroot/test/test_conn.py

     def test_HTTP11_Timeout(self):
         if self.httpserver._recycle_threads:
             #HACK!!!  Timeouts don't happen the same way. Skip for now.
-            self.skip("Test invalid when recycle_trheads == True.")
+            self.skip("Test invalid when recycle_threads == True.")
         # If we timeout without sending any data,
         # the server will close the conn with a 408.
         self.httpserver.protocol = "HTTP/1.1"
     def test_HTTP11_Timeout_after_request(self):
         if self.httpserver._recycle_threads:
             #HACK!!!  Timeouts don't happen the same way. Skip for now.
-            self.skip("Test invalid when recycle_trheads == True.")
+            self.skip("Test invalid when recycle_threads == True.")
         
         # If we timeout after at least one request has succeeded,
         # the server will close the conn without 408.

cheroot/test/test_rawtcp.py

         buf += sock.recv(INT32_LEN - len(buf))
     return struct.unpack(PACK_FMT, buf)[0] #native byte order (same machine!)
 
+
 class Int32Echo_RequestHandler(server.TCPRequest):
     """A simple TCP server handler for testing known-length raw TCP messages.
     
     def respond(self):
         self.conn.socket.sendall(struct.pack(PACK_FMT, self._rx))
 
+class Int32Echo_Connection(server.TCPConnection):
+    RequestHandlerClass = Int32Echo_RequestHandler
+
+class Int32Echo_Server(server.TCPServer):
+    ConnectionClass = Int32Echo_Connection
+
+#TODO: simplify custom server generation... having to define 3 classes is excessive.
+# - it would be nice to just specify a stock TCPServer with a defined
+#   RequestHandler (then again, custom error handlers may be needed on the
+#   connection ,too)
+
 def CreateAndStartInt32EchoServer(bind_addr = ECHO_SERVER_ADDR,
                                   minthreads = 10,
                                   maxthreads = -1,
                                   ssl_adapter = None,
                                   recycle_threads = True,
                                   ):
-    #Use HTTPServer, even though we're going to have nothing to do with
-    #HTTP (this stems from cheroot's HTTP heritage).
-    #TODO: make a TCPServer calss (TCPRequest and TCPConnection done already... one more to go)
-    srv = server.HTTPServer(bind_addr = bind_addr,
-                            gateway = None, #needed?
-                            minthreads = minthreads,
-                            maxthreads = maxthreads,
-                            ssl_adapter = ssl_adapter,
-                            recycle_threads = recycle_threads,
+    srv = Int32Echo_Server(bind_addr = bind_addr,
+                           gateway = None, #needed?
+                           minthreads = minthreads,
+                           maxthreads = maxthreads,
+                           ssl_adapter = ssl_adapter,
+                           recycle_threads = recycle_threads,
                            )
     #Jack up the listening request queue size because we are going to hit it
     #hard. If left at the default size of 5, large numbers of simulataneous
     #connections stutter badly.
     srv.request_queue_size = 500
-    #Set the request handler to our Int32Echo handler...
-    # - some more HTTP legacy stuff here with "HTTPConnection"
-    oldReqHandler = srv.ConnectionClass.RequestHandlerClass
-    try:
-        srv.ConnectionClass.RequestHandlerClass = Int32Echo_RequestHandler
-        #Launch the server on a thread...
-        slt = threading.Thread(target = srv.safe_start)
-        slt.setDaemon(1)
-        slt.start()
-        #Give it a chance to bind...
-        time.sleep(0.1)
-        #print "Server started"
-    finally:
-        #Restore the old connection class to the HTTPServer (this is a
-        #tenmporary hack until HTTPServer is moved to a higher level above a
-        #base TCPServer)
-        srv.ConnectionClass.RequestHandlerClass = oldReqHandler
+    
+    #Launch the server on a thread...
+    slt = threading.Thread(target = srv.safe_start)
+    slt.setDaemon(1)
+    slt.start()
+    #Give it a chance to bind...
+    time.sleep(0.1)
     return srv
 
 class Int32EchoClient(object):
         #Set the number of connections to test...
         # - default is 500 since many systems limit to 1024
         # - Watch your max file descriptors here or get "too many files"
-        #   - ('ulimit -n' on linux, increase with 'ulimit -Sn <value>' up to
+        #   - 'ulimit -n' on linux, increase with 'ulimit -Sn <value>' up to
         #     hard limit of 'ulimit -Hn', at least without hoop jumping.
         # - also make sure request_queue_size is large, or it stutters on
         #   the connection slam we're about to do (but still works).
         #   - seems to be a max here for stutter-free connections, at least on
         #     the box this was coded on.
         # - I have tested up to 30000 connections
-        n = 1 #client count.
+        n = 1500 #client count.
         clients = {} #k=index; v=client
+        st = time.time()
         for i in xrange(n):
             clients[i + 1] = Int32EchoClient(ECHO_SERVER_ADDR, i)
-            time.sleep(0.002) #try and give the listener a break
-            #print "Connected client %d" % i
+            #time.sleep(0.002) #try and give the listener a break
+            print "%.3f -- Connected client %d" % (time.time() - st, i)
         
         #time.sleep(1)
         #x=raw_input("Press enter!!")
         #at this point we will have n open connections.  Now exercise them.
+        st = time.time()
         for i, client in clients.iteritems():
             echo = client.get_echo(i)
             self.assertEqual(echo, i)
-            if i % 100 == 0:
-                print "Echo 1 complete for %d/%d clients" % (i, n)
+            print "%.3f -- Echo 1 complete for %d clients" % (time.time() - st, i)
+            #if i % 100 == 0:
+                #print "Echo 1 complete for %d/%d clients" % (i, n)
             #print "client %d worked!" % i
 
         #Exercise them again to prove that our persistent connections still work...

cheroot/workers/threadpool.py

                 conn = server.requests.get()
                 if conn is _SHUTDOWNREQUEST:
                     return
-                dprint("REQUEST RECEIVED ON fd %d" % conn.socket.fileno())
+                fd = conn.socket.fileno()
+                dprint("REQUEST RECEIVED ON fd %d" % fd)
                 
                 self.conn = conn
                 if server.stats['Enabled']:
                         #let the server loop know it needs to look for
                         #activity on this socket, since this worker is done
                         #with it (the current request was completely handled)
-                        dprint("SENDING fd %d BACK TO SERVER" % conn.socket.fileno(), True)
+                        dprint("SENDING fd %d BACK TO SERVER" % fd, True)
                         send_int32(server._thread_comm_pipe__threadsock,
                                    conn.socket.fileno())
-                        dprint(" -- fd %d SENT" % conn.socket.fileno())
+                        dprint(" -- fd %d SENT" % fd)
                 finally:
-                    dprint("COMMUNICATE FINISHED")
+                    dprint("COMMUNICATE FINISHED FOR fd %d" % fd)
                     if recycle_threads == False:
                         #always close it (per legacy implementation in which
                         #communciate() dealt with all requests in the single
                         conn.close()
                     else:
                         if close_connection:
-                            fd = conn.socket.fileno()
-                            #Remove the socket from the "active" socket list...
+                            #Remove the socket from the "active" socket list
+                            #since we will not be giving it back to the
+                            #listening loop...
                             # - Note: dict del is atomic for int keys
                             #    - see http://goo.gl/yBpT7
                             # - this also must be done before closing the
                     self.conn = None
         except (KeyboardInterrupt, SystemExit):
             exc = sys.exc_info()[1]
-            self.server.interrupt = exc
+            #Inform the server that a thread caught an interrupt
+            self.server._set_interrupt_from_worker(exc)
 
 
 class ThreadPool(object):