Commits

Matt Joiner committed 2cd534c

Preparations for a greened standard library

Comments (0)

Files changed (7)

examples/http_server.py

 #!/usr/bin/env python3.3
 
-import socket
-#
+import green380.socket as socket
 import green380
 
 def handler(sock, addr):
     print('Handling connection from', addr)
-    header = yield from sock.until_recv_term(b'\r\n\r\n')
+    header = yield from sock.recv_term(b'\r\n\r\n')
     print('Received header:', header)
-    yield from sock.until_sendall('Hello {}!\n'.format(addr).encode())
+    yield from sock.sendall('Hello {}!\n'.format(addr).encode())
     sock.close()
 
 @green380.spawn
 def server():
-    sock = green380.socket()
+    sock = socket.socket()
     sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
     sock.bind(('', 8080))
     sock.listen(socket.SOMAXCONN)
     while True:
-        new_sock, addr = yield from sock.until_accept()
+        new_sock, addr = yield from sock.accept()
         green380.spawn(handler, new_sock, addr)
 
 green380.run()

green380/__init__.py

+#~ from .socket import socket
+from .threading import RLock, Event
+from .queue import Queue
 from ._core import *
+
+class Hook:
+
+    def __init__(self):
+        self._path = __path__
+
+    def find_module(self, module_name, package_path):
+        if package_path == self._path:
+
+        return self
+
+    def load_module(self, module_name):
+        print(module_name)
+
+import sys
+sys.meta_path.append(Hook())

green380/_core.py

-__all__ = 'schedule', 'spawn', 'run', 'Channel', 'socket'
+__all__ = 'schedule', 'spawn', 'run', 'Channel',
 
 import collections
 import errno
 
 from .fileno import fileno
 
-#~ logging.root.setLevel(logging.NOTSET)
+logging.root.setLevel(logging.NOTSET)
 
 
-class Lock:
-
-    def __init__(self):
-        self._locked = False
-
-    def __enter__(self):
-        self.acquire()
-        return self
-
-    def __exit__(self, *args):
-        self.release()
-
-    def acquire(self, timeout=None):
-        if not self._locked:
-            self._locked = True
-            return True
-        self._waiters.add(current)
-        acquired = yield from timeout_multiplex(None, timeout)
-        yield
-        return acquired
-
-    def release(self):
-        if not self._locked:
-            raise RuntimeError("can't released unlocked lock")
-        if self._waiters:
-            w = self._waiter.pop()
-            w.send(True)
-            schedule(w)
-        else:
-            self._locked = False
-
-
-class RLock:
-
-    def __init__(self):
-        self._lock = Lock()
-        self._owner = None
-        self._depth = 0
-
-    def __enter__(self):
-        self.acquire()
-        return self
-
-    def __exit__(self, *args):
-        self.release()
-
-    def acquire(self, timeout=None):
-        if current == self._owner:
-            self._depth += 1
-            return True
-        if not self._lock.acquire(timeout):
-            return False
-        self._owner = current
-        return True
-
-    def release(self):
-        if current != self._owner:
-            raise RuntimeError('not the owner')
-        if not self._depth:
-            self._lock.release()
-            self._owner = None
-        self._depth -= 1
-
-
-class Event:
-
-    def __init__(self):
-        self._value = False
-        self._waiters = set()
-
-    def is_set(self):
-        return self._value
-
-    def set(self):
-        for w in self._waiters:
-            w.send(True)
-            schedule(w)
-        self._waiters.clear()
-
-    def clear(self):
-        self._value = False
-
-    def wait(self, timeout=None):
-        if self._value:
-            return True
-        waiter = current
-        self._waiters.add(waiter)
-        @spawn
-        def timeout_():
-            yield 'timeout', timeout
-            try:
-                self._waiters.remove(waiter)
-            except KeyError:
-                return
-            waiter.send(False)
-            schedule(waiter)
-        value = yield
-        yield
-        return value
-
-
-class Condition:
-
-    def __init__(self, lock=None):
-        if lock is None:
-            lock = RLock()
-        self._lock = lock
-        self._waiters = []
-
-    def notify(self, n=1):
-        for w in self._waiters[:n]:
-            w.send(True)
-            schedule(w)
-        self._waiters = self._waiters[n:]
-
-    def notify_all(self):
-        self.notify(len(self._waiters))
-
-    def wait(self, timeout):
-        notified = yield from timeout_multiplex(None, timeout)
-        yield
-        return notified
-
-
-class Queue:
-
-    class Timeout(Exception): pass
-
-    def __init__(self, capacity=None):
-        self._capacity = capacity
-        self._items = collections.deque()
-        self._lock = Lock()
-        self._not_full = Condition(self._lock)
-        self._not_empty = Condition(self._lock)
-
-    def put(self, item, timeout=None):
-        with self._lock:
-            if self._capacity:
-                while len(self._items) >= self._capacity:
-                    if not self._not_full.wait(timeout):
-                        raise self.Timeout
-            self._items.append(item)
-            self._not_empty.notify()
-
-    def get(self, timeout=None):
-        with self._lock:
-            while not self._items:
-                if not self._not_empty.wait(timeout):
-                    raise self.Timeout
-            item = self._items.popleft()
-            if len(self._items) < self._capacity:
-                self._not_full.notify()
-            return item
-
-
-class socket(_socket.socket):
-
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-        super().setblocking(False)
-        self.__timeout = None
-
-    def until_recv_term(self, term):
-        buf = b''
-        while not buf.endswith(term):
-            yield 'read', self
-            buf1 = self.recv(0x10000, _socket.MSG_PEEK)
-            if not buf1:
-                break
-            index = (buf + buf1).find(term)
-            buf2 = self.recv(len(buf1) if index == -1 else index - len(buf) + len(term))
-            buf += buf2
-        return buf
-
-    def until_recv(self, count):
-        yield
-        return self.recv(term)
-
-    def until_sendall(self, buf):
-        while buf:
-            sent = yield from self.until_send(buf)
-            if not sent:
-                break
-            buf = buf[sent:]
-
-    def until_send(self, buf):
-        yield 'write', self
-        return self.send(buf)
-
-    def until_accept(self):
-        yield 'read', self
-        return self.accept()
-
-    def accept(self):
-        yield 'read', self
-        sock, addr = super().accept()
-        return self.__class__(fileno=sock.detach()), addr
-
-    def settimeout(self, timeout):
-        self.__timeout = timeout
-
-    def setblocking(self, blocking):
-        self.settimeout(None if blocking else 0)
-
-    def connect(self, addr):
-        # TODO switch this to use connect_ex, should be faster
-        try:
-            return super().connect(addr)
-        except _socket.error as exc:
-            if exc.errno != errno.EINPROGRESS:
-                raise
-        @spawn
-        def timeout_fiber():
-            yield 'timeout', self.__timeout
-            main_fiber.send(False)
-            event_fiber.close()
-            schedule(main_fiber)
-        @spawn
-        def event_fiber():
-            yield 'write', self
-            err = self.getsockopt(_socket.SOL_SOCKET, _socket.SO_ERROR)
-            if err == 0:
-                main_fiber.send(True)
-            else:
-                main_fiber.throw(_socket.error(err, os.strerror(err)))
-            timeout_fiber.close()
-            schedule(main_fiber)
-        main_fiber = current
-        try:
-            yield
-        finally:
-            yield
-
 def timeout_multiplex(event, timeout):
     @spawn
     def timeout_fiber():
         yield 'timeout', timeout
+        import pdb
+        pdb.set_trace()
         main_fiber.send(False)
-    @spawn
-    def event_fiber():
-        yield event
-        main_fiber.send(True)
     main_fiber = current
-    try:
-        return (yield)
-    finally:
+    ready = yield
+    assert isinstance(ready, bool), ready
+    if ready:
         timeout_fiber.close()
-        event_fiber.close()
+    schedule(main_fiber)
+    yield
+    return ready
 
 
 class Channel:
 schedule = scheduler.schedule
 spawn = scheduler.spawn
 run = scheduler.run
+
+def current_fiber():
+    return current

green380/_thread.py

+from _thread import error, TIMEOUT_MAX
+from . import _core
+
+def start_new_thread(fiber, args=(), kwargs=None):
+    if kwargs is None:
+        kwargs = {}
+    return _core.spawn(fiber, *args, **kwargs)
+
+def get_ident():
+    assert _core.current is not None
+    return _core.current
+
+
+class allocate_lock:
+
+    def __init__(self):
+        self._locked = False
+        self._waiters = []
+
+    def __enter__(self):
+        self.acquire()
+        return self
+
+    def __exit__(self, *args):
+        self.release()
+
+    def acquire(self, blocking=True, timeout=-1):
+        if not self._locked:
+            self._locked = True
+            return True
+        if blocking:
+            if timeout == -1:
+                timeout = None
+            elif timeout < 0:
+                raise ValueError( "timeout value must be strictly positive")
+            elif timeout > TIMEOUT_MAX:
+                raise OverflowError("timeout value is too large")
+        else:
+            if timeout != -1:
+                raise ValueError("can't specify a timeout for a non-blocking call")
+            timeout = 0
+        if timeout == 0:
+            return False
+        self._waiters.append(get_ident())
+        acquired = yield from _core.timeout_multiplex(None, timeout)
+        assert self._locked
+        return acquired
+
+    def release(self):
+        if not self._locked:
+            raise RuntimeError("can't released unlocked lock")
+        if self._waiters:
+            w = self._waiters.pop()
+            w.send(True)
+        else:
+            self._locked = False

green380/socket.py

+# Wrapper module for _socket, providing some additional facilities
+# implemented in Python.
+
+"""\
+This module provides socket operations and some related functions.
+On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
+On other systems, it only supports IP. Functions specific for a
+socket are available as methods of the socket object.
+
+Functions:
+
+socket() -- create a new socket object
+socketpair() -- create a pair of new socket objects [*]
+fromfd() -- create a socket object from an open file descriptor [*]
+gethostname() -- return the current hostname
+gethostbyname() -- map a hostname to its IP number
+gethostbyaddr() -- map an IP number or hostname to DNS info
+getservbyname() -- map a service name and a protocol name to a port number
+getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
+ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
+htons(), htonl() -- convert 16, 32 bit int from host to network byte order
+inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
+inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
+socket.getdefaulttimeout() -- get the default timeout value
+socket.setdefaulttimeout() -- set the default timeout value
+create_connection() -- connects to an address, with an optional timeout and
+                       optional source address.
+
+ [*] not available on all platforms!
+
+Special objects:
+
+SocketType -- type object for socket objects
+error -- exception raised for I/O errors
+has_ipv6 -- boolean value indicating if IPv6 is supported
+
+Integer constants:
+
+AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
+SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
+
+Many other constants may be defined; these may be used in calls to
+the setsockopt() and getsockopt() methods.
+"""
+
+from ._core import spawn, current_fiber, schedule
+
+import _socket
+from _socket import *
+
+import os, sys, io
+
+try:
+    import errno
+except ImportError:
+    errno = None
+EBADF = getattr(errno, 'EBADF', 9)
+EAGAIN = getattr(errno, 'EAGAIN', 11)
+EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
+
+__all__ = ["getfqdn", "create_connection"]
+__all__.extend(os._get_exports_list(_socket))
+
+
+_realsocket = socket
+
+# WSA error codes
+if sys.platform.lower().startswith("win"):
+    errorTab = {}
+    errorTab[10004] = "The operation was interrupted."
+    errorTab[10009] = "A bad file handle was passed."
+    errorTab[10013] = "Permission denied."
+    errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
+    errorTab[10022] = "An invalid operation was attempted."
+    errorTab[10035] = "The socket operation would block"
+    errorTab[10036] = "A blocking operation is already in progress."
+    errorTab[10048] = "The network address is in use."
+    errorTab[10054] = "The connection has been reset."
+    errorTab[10058] = "The network has been shut down."
+    errorTab[10060] = "The operation timed out."
+    errorTab[10061] = "Connection refused."
+    errorTab[10063] = "The name is too long."
+    errorTab[10064] = "The host is down."
+    errorTab[10065] = "The host is unreachable."
+    __all__.append("errorTab")
+
+
+class socket(_socket.socket):
+
+    """A subclass of _socket.socket adding the makefile() method."""
+
+    __slots__ = ["__weakref__", "_io_refs", "_closed", "__timeout"]
+
+    def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
+        _socket.socket.__init__(self, family, type, proto, fileno)
+        self._io_refs = 0
+        self._closed = False
+        super().setblocking(False)
+        self.__timeout = None
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        if not self._closed:
+            self.close()
+
+    def __repr__(self):
+        """Wrap __repr__() to reveal the real class name."""
+        s = _socket.socket.__repr__(self)
+        if s.startswith("<socket object"):
+            s = "<%s.%s%s%s" % (self.__class__.__module__,
+                                self.__class__.__name__,
+                                getattr(self, '_closed', False) and " [closed] " or "",
+                                s[7:])
+        return s
+
+    def __getstate__(self):
+        raise TypeError("Cannot serialize socket object")
+
+    def dup(self):
+        """dup() -> socket object
+
+        Return a new socket object connected to the same system resource.
+        """
+        fd = dup(self.fileno())
+        sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
+        sock.settimeout(self.gettimeout())
+        return sock
+
+    def accept(self):
+        """accept() -> (socket object, address info)
+
+        Wait for an incoming connection.  Return a new socket
+        representing the connection, and the address of the client.
+        For IP sockets, the address info is a pair (hostaddr, port).
+        """
+        yield 'read', self
+        fd, addr = self._accept()
+        sock = socket(self.family, self.type, self.proto, fileno=fd)
+        # Issue #7995: if no default timeout is set and the listening
+        # socket had a (non-zero) timeout, force the new socket in blocking
+        # mode to override platform-specific socket flags inheritance.
+        if getdefaulttimeout() is None and self.gettimeout():
+            sock.setblocking(True)
+        return sock, addr
+
+    def makefile(self, mode="r", buffering=None, *,
+                 encoding=None, errors=None, newline=None):
+        """makefile(...) -> an I/O stream connected to the socket
+
+        The arguments are as for io.open() after the filename,
+        except the only mode characters supported are 'r', 'w' and 'b'.
+        The semantics are similar too.  (XXX refactor to share code?)
+        """
+        for c in mode:
+            if c not in {"r", "w", "b"}:
+                raise ValueError("invalid mode %r (only r, w, b allowed)")
+        writing = "w" in mode
+        reading = "r" in mode or not writing
+        assert reading or writing
+        binary = "b" in mode
+        rawmode = ""
+        if reading:
+            rawmode += "r"
+        if writing:
+            rawmode += "w"
+        raw = SocketIO(self, rawmode)
+        self._io_refs += 1
+        if buffering is None:
+            buffering = -1
+        if buffering < 0:
+            buffering = io.DEFAULT_BUFFER_SIZE
+        if buffering == 0:
+            if not binary:
+                raise ValueError("unbuffered streams must be binary")
+            return raw
+        if reading and writing:
+            buffer = io.BufferedRWPair(raw, raw, buffering)
+        elif reading:
+            buffer = io.BufferedReader(raw, buffering)
+        else:
+            assert writing
+            buffer = io.BufferedWriter(raw, buffering)
+        if binary:
+            return buffer
+        text = io.TextIOWrapper(buffer, encoding, errors, newline)
+        text.mode = mode
+        return text
+
+    def _decref_socketios(self):
+        if self._io_refs > 0:
+            self._io_refs -= 1
+        if self._closed:
+            self.close()
+
+    def _real_close(self, _ss=_socket.socket):
+        # This function should not reference any globals. See issue #808164.
+        _ss.close(self)
+
+    def close(self):
+        # This function should not reference any globals. See issue #808164.
+        self._closed = True
+        if self._io_refs <= 0:
+            self._real_close()
+
+    def recv_term(self, term):
+        buf = b''
+        while not buf.endswith(term):
+            yield 'read', self
+            buf1 = super().recv(0x10000, _socket.MSG_PEEK)
+            if not buf1:
+                break
+            index = (buf + buf1).find(term)
+            buf2 = yield from self.recv(len(buf1) if index == -1 else index - len(buf) + len(term))
+            buf += buf2
+        return buf
+
+    def recv(self, count):
+        yield 'read', self
+        return super().recv(count)
+
+    def sendall(self, buf):
+        while buf:
+            sent = yield from self.send(buf)
+            if not sent:
+                break
+            buf = buf[sent:]
+
+    def send(self, buf):
+        yield 'write', self
+        return super().send(buf)
+
+    def settimeout(self, timeout):
+        self.__timeout = timeout
+
+    def setblocking(self, blocking):
+        self.settimeout(None if blocking else 0)
+
+    def connect(self, addr):
+        # TODO switch this to use connect_ex, should be faster
+        try:
+            return super().connect(addr)
+        except _socket.error as exc:
+            if exc.errno != errno.EINPROGRESS:
+                raise
+        @spawn
+        def timeout_fiber():
+            yield 'timeout', self.__timeout
+            main_fiber.send(False)
+            event_fiber.close()
+            schedule(main_fiber)
+        @spawn
+        def event_fiber():
+            yield 'write', self
+            err = self.getsockopt(_socket.SOL_SOCKET, _socket.SO_ERROR)
+            if err == 0:
+                main_fiber.send(True)
+            else:
+                main_fiber.throw(_socket.error(err, os.strerror(err)))
+            timeout_fiber.close()
+            schedule(main_fiber)
+        main_fiber = current_fiber()
+        try:
+            yield
+        finally:
+            yield
+
+
+def fromfd(fd, family, type, proto=0):
+    """ fromfd(fd, family, type[, proto]) -> socket object
+
+    Create a socket object from a duplicate of the given file
+    descriptor.  The remaining arguments are the same as for socket().
+    """
+    nfd = dup(fd)
+    return socket(family, type, proto, nfd)
+
+
+if hasattr(_socket, "socketpair"):
+
+    def socketpair(family=None, type=SOCK_STREAM, proto=0):
+        """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
+
+        Create a pair of socket objects from the sockets returned by the platform
+        socketpair() function.
+        The arguments are the same as for socket() except the default family is
+        AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
+        """
+        if family is None:
+            try:
+                family = AF_UNIX
+            except NameError:
+                family = AF_INET
+        a, b = _socket.socketpair(family, type, proto)
+        a = socket(family, type, proto, a.detach())
+        b = socket(family, type, proto, b.detach())
+        return a, b
+
+
+_blocking_errnos = { EAGAIN, EWOULDBLOCK }
+
+class SocketIO(io.RawIOBase):
+
+    """Raw I/O implementation for stream sockets.
+
+    This class supports the makefile() method on sockets.  It provides
+    the raw I/O interface on top of a socket object.
+    """
+
+    # One might wonder why not let FileIO do the job instead.  There are two
+    # main reasons why FileIO is not adapted:
+    # - it wouldn't work under Windows (where you can't used read() and
+    #   write() on a socket handle)
+    # - it wouldn't work with socket timeouts (FileIO would ignore the
+    #   timeout and consider the socket non-blocking)
+
+    # XXX More docs
+
+    def __init__(self, sock, mode):
+        if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
+            raise ValueError("invalid mode: %r" % mode)
+        io.RawIOBase.__init__(self)
+        self._sock = sock
+        if "b" not in mode:
+            mode += "b"
+        self._mode = mode
+        self._reading = "r" in mode
+        self._writing = "w" in mode
+        self._timeout_occurred = False
+
+    def readinto(self, b):
+        """Read up to len(b) bytes into the writable buffer *b* and return
+        the number of bytes read.  If the socket is non-blocking and no bytes
+        are available, None is returned.
+
+        If *b* is non-empty, a 0 return value indicates that the connection
+        was shutdown at the other end.
+        """
+        self._checkClosed()
+        self._checkReadable()
+        if self._timeout_occurred:
+            raise IOError("cannot read from timed out object")
+        while True:
+            try:
+                return self._sock.recv_into(b)
+            except timeout:
+                self._timeout_occurred = True
+                raise
+            except InterruptedError:
+                continue
+            except error as e:
+                if e.args[0] in _blocking_errnos:
+                    return None
+                raise
+
+    def write(self, b):
+        """Write the given bytes or bytearray object *b* to the socket
+        and return the number of bytes written.  This can be less than
+        len(b) if not all data could be written.  If the socket is
+        non-blocking and no bytes could be written None is returned.
+        """
+        self._checkClosed()
+        self._checkWritable()
+        try:
+            return self._sock.send(b)
+        except error as e:
+            # XXX what about EINTR?
+            if e.args[0] in _blocking_errnos:
+                return None
+            raise
+
+    def readable(self):
+        """True if the SocketIO is open for reading.
+        """
+        return self._reading and not self.closed
+
+    def writable(self):
+        """True if the SocketIO is open for writing.
+        """
+        return self._writing and not self.closed
+
+    def fileno(self):
+        """Return the file descriptor of the underlying socket.
+        """
+        self._checkClosed()
+        return self._sock.fileno()
+
+    @property
+    def name(self):
+        if not self.closed:
+            return self.fileno()
+        else:
+            return -1
+
+    @property
+    def mode(self):
+        return self._mode
+
+    def close(self):
+        """Close the SocketIO object.  This doesn't close the underlying
+        socket, except if all references to it have disappeared.
+        """
+        if self.closed:
+            return
+        io.RawIOBase.close(self)
+        self._sock._decref_socketios()
+        self._sock = None
+
+
+def getfqdn(name=''):
+    """Get fully qualified domain name from name.
+
+    An empty argument is interpreted as meaning the local host.
+
+    First the hostname returned by gethostbyaddr() is checked, then
+    possibly existing aliases. In case no FQDN is available, hostname
+    from gethostname() is returned.
+    """
+    name = name.strip()
+    if not name or name == '0.0.0.0':
+        name = gethostname()
+    try:
+        hostname, aliases, ipaddrs = gethostbyaddr(name)
+    except error:
+        pass
+    else:
+        aliases.insert(0, hostname)
+        for name in aliases:
+            if '.' in name:
+                break
+        else:
+            name = hostname
+    return name
+
+
+_GLOBAL_DEFAULT_TIMEOUT = object()
+
+def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
+                      source_address=None):
+    """Connect to *address* and return the socket object.
+
+    Convenience function.  Connect to *address* (a 2-tuple ``(host,
+    port)``) and return the socket object.  Passing the optional
+    *timeout* parameter will set the timeout on the socket instance
+    before attempting to connect.  If no *timeout* is supplied, the
+    global default timeout setting returned by :func:`getdefaulttimeout`
+    is used.  If *source_address* is set it must be a tuple of (host, port)
+    for the socket to bind as a source address before making the connection.
+    An host of '' or port 0 tells the OS to use the default.
+    """
+
+    host, port = address
+    err = None
+    for res in getaddrinfo(host, port, 0, SOCK_STREAM):
+        af, socktype, proto, canonname, sa = res
+        sock = None
+        try:
+            sock = socket(af, socktype, proto)
+            if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
+                sock.settimeout(timeout)
+            if source_address:
+                sock.bind(source_address)
+            sock.connect(sa)
+            return sock
+
+        except error as _:
+            err = _
+            if sock is not None:
+                sock.close()
+
+    if err is not None:
+        raise err
+    else:
+        raise error("getaddrinfo returns an empty list")

green380/threading.py

+"""Thread module emulating a subset of Java's threading model."""
+
+import sys as _sys
+import _thread
+
+from time import time as _time, sleep as _sleep
+from traceback import format_exc as _format_exc
+from collections import deque
+from _weakrefset import WeakSet
+
+# Note regarding PEP 8 compliant names
+#  This threading model was originally inspired by Java, and inherited
+# the convention of camelCase function and method names from that
+# language. Those originaly names are not in any imminent danger of
+# being deprecated (even for Py3k),so this module provides them as an
+# alias for the PEP 8 compliant names
+# Note that using the new PEP 8 compliant names facilitates substitution
+# with the multiprocessing module, which doesn't provide the old
+# Java inspired names.
+
+__all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
+           'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier',
+           'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size']
+
+# Rename some stuff so "from threading import *" is safe
+_start_new_thread = _thread.start_new_thread
+_allocate_lock = _thread.allocate_lock
+get_ident = _thread.get_ident
+ThreadError = _thread.error
+try:
+    _CRLock = _thread.RLock
+except AttributeError:
+    _CRLock = None
+TIMEOUT_MAX = _thread.TIMEOUT_MAX
+del _thread
+
+
+# Debug support (adapted from ihooks.py).
+
+_VERBOSE = True
+
+if __debug__:
+
+    class _Verbose(object):
+
+        def __init__(self, verbose=None):
+            if verbose is None:
+                verbose = _VERBOSE
+            self._verbose = verbose
+
+        def _note(self, format, *args):
+            if self._verbose:
+                format = format % args
+                # Issue #4188: calling current_thread() can incur an infinite
+                # recursion if it has to create a DummyThread on the fly.
+                ident = get_ident()
+                try:
+                    name = _active[ident].name
+                except KeyError:
+                    name = "<Fiber %s>" % ident
+                format = "%s: %s\n" % (name, format)
+                _sys.stderr.write(format)
+
+else:
+    # Disable this when using "python -O"
+    class _Verbose(object):
+        def __init__(self, verbose=None):
+            pass
+        def _note(self, *args):
+            pass
+
+# Support for profile and trace hooks
+
+_profile_hook = None
+_trace_hook = None
+
+def setprofile(func):
+    global _profile_hook
+    _profile_hook = func
+
+def settrace(func):
+    global _trace_hook
+    _trace_hook = func
+
+# Synchronization classes
+
+class _DummyLock:
+
+    def __init__(self):
+        self._locked = False
+
+    def acquire(self):
+        assert not self._locked
+        self._locked = True
+
+    __enter__ = acquire
+
+    def release(self):
+        assert self._locked
+        self._locked = False
+
+    def __exit__(self, *args):
+        self.release()
+
+Lock = _allocate_lock
+
+def RLock(verbose=None, *args, **kwargs):
+    if verbose is None:
+        verbose = _VERBOSE
+    if (__debug__ and verbose) or _CRLock is None:
+        return _PyRLock(verbose, *args, **kwargs)
+    return _CRLock(*args, **kwargs)
+
+class _RLock(_Verbose):
+
+    def __init__(self, verbose=None):
+        _Verbose.__init__(self, verbose)
+        self._block = _allocate_lock()
+        self._owner = None
+        self._count = 0
+
+    def __repr__(self):
+        owner = self._owner
+        try:
+            owner = _active[owner].name
+        except KeyError:
+            pass
+        return "<%s owner=%r count=%d>" % (
+                self.__class__.__name__, owner, self._count)
+
+    def acquire(self, blocking=True, timeout=-1):
+        me = get_ident()
+        if self._owner == me:
+            self._count = self._count + 1
+            if __debug__:
+                self._note("%s.acquire(%s): recursive success", self, blocking)
+            return 1
+        rc = yield from self._block.acquire(blocking, timeout)
+        if rc:
+            self._owner = me
+            self._count = 1
+            if __debug__:
+                self._note("%s.acquire(%s): initial success", self, blocking)
+        else:
+            if __debug__:
+                self._note("%s.acquire(%s): failure", self, blocking)
+        return rc
+
+    __enter__ = acquire
+
+    def release(self):
+        if self._owner != get_ident():
+            raise RuntimeError("cannot release un-acquired lock")
+        self._count = count = self._count - 1
+        if not count:
+            self._owner = None
+            self._block.release()
+            if __debug__:
+                self._note("%s.release(): final release", self)
+        else:
+            if __debug__:
+                self._note("%s.release(): non-final release", self)
+
+    def __exit__(self, t, v, tb):
+        self.release()
+
+    # Internal methods used by condition variables
+
+    def _acquire_restore(self, state):
+        yield from self._block.acquire()
+        self._count, self._owner = state
+        if __debug__:
+            self._note("%s._acquire_restore()", self)
+
+    def _release_save(self):
+        if __debug__:
+            self._note("%s._release_save()", self)
+        if self._count == 0:
+            raise RuntimeError("cannot release un-acquired lock")
+        count = self._count
+        self._count = 0
+        owner = self._owner
+        self._owner = None
+        self._block.release()
+        return (count, owner)
+
+    def _is_owned(self):
+        if False:
+            yield
+        return self._owner == get_ident()
+
+_PyRLock = _RLock
+
+
+class Condition(_Verbose):
+
+    def __init__(self, lock=None, verbose=None):
+        _Verbose.__init__(self, verbose)
+        if lock is None:
+            lock = RLock()
+        self._lock = lock
+        # Export the lock's acquire() and release() methods
+        self.acquire = lock.acquire
+        self.release = lock.release
+        # If the lock defines _release_save() and/or _acquire_restore(),
+        # these override the default implementations (which just call
+        # release() and acquire() on the lock).  Ditto for _is_owned().
+        try:
+            self._release_save = lock._release_save
+        except AttributeError:
+            pass
+        try:
+            self._acquire_restore = lock._acquire_restore
+        except AttributeError:
+            pass
+        try:
+            self._is_owned = lock._is_owned
+        except AttributeError:
+            pass
+        self._waiters = []
+
+    def __enter__(self):
+        return self._lock.__enter__()
+
+    def __exit__(self, *args):
+        return self._lock.__exit__(*args)
+
+    def __repr__(self):
+        return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
+
+    def _release_save(self):
+        self._lock.release()           # No state to save
+
+    def _acquire_restore(self, x):
+        yield from self._lock.acquire()           # Ignore saved state
+
+    def _is_owned(self):
+        # Return True if lock is owned by current_thread.
+        # This method is called only if __lock doesn't have _is_owned().
+        if (yield from self._lock.acquire(0)):
+            self._lock.release()
+            return False
+        else:
+            return True
+
+    def wait(self, timeout=None):
+        if not (yield from self._is_owned()):
+            raise RuntimeError("cannot wait on un-acquired lock")
+        waiter = _allocate_lock()
+        yield from waiter.acquire()
+        self._waiters.append(waiter)
+        saved_state = self._release_save()
+        try:    # restore state no matter what (e.g., KeyboardInterrupt)
+            if timeout is None:
+                yield from waiter.acquire()
+                gotit = True
+                if __debug__:
+                    self._note("%s.wait(): got it", self)
+            else:
+                if timeout > 0:
+                    gotit = yield from waiter.acquire(True, timeout)
+                else:
+                    gotit = yield from waiter.acquire(False)
+                if not gotit:
+                    if __debug__:
+                        self._note("%s.wait(%s): timed out", self, timeout)
+                    try:
+                        self._waiters.remove(waiter)
+                    except ValueError:
+                        pass
+                else:
+                    if __debug__:
+                        self._note("%s.wait(%s): got it", self, timeout)
+            return gotit
+        finally:
+            yield from self._acquire_restore(saved_state)
+
+    def wait_for(self, predicate, timeout=None):
+        endtime = None
+        waittime = timeout
+        result = predicate()
+        while not result:
+            if waittime is not None:
+                if endtime is None:
+                    endtime = _time() + waittime
+                else:
+                    waittime = endtime - _time()
+                    if waittime <= 0:
+                        if __debug__:
+                            self._note("%s.wait_for(%r, %r): Timed out.",
+                                       self, predicate, timeout)
+                        break
+            if __debug__:
+                self._note("%s.wait_for(%r, %r): Waiting with timeout=%s.",
+                           self, predicate, timeout, waittime)
+            yield from self.wait(waittime)
+            result = predicate()
+        else:
+            if __debug__:
+                self._note("%s.wait_for(%r, %r): Success.",
+                           self, predicate, timeout)
+        return result
+
+    def notify(self, n=1):
+        if not (yield from self._is_owned()):
+            raise RuntimeError("cannot notify on un-acquired lock")
+        __waiters = self._waiters
+        waiters = __waiters[:n]
+        if not waiters:
+            if __debug__:
+                self._note("%s.notify(): no waiters", self)
+            return
+        self._note("%s.notify(): notifying %d waiter%s", self, n,
+                   n!=1 and "s" or "")
+        for waiter in waiters:
+            waiter.release()
+            try:
+                __waiters.remove(waiter)
+            except ValueError:
+                pass
+
+    def notify_all(self):
+        yield from self.notify(len(self._waiters))
+
+    notifyAll = notify_all
+
+
+class Semaphore(_Verbose):
+
+    # After Tim Peters' semaphore class, but not quite the same (no maximum)
+
+    def __init__(self, value=1, verbose=None):
+        if value < 0:
+            raise ValueError("semaphore initial value must be >= 0")
+        _Verbose.__init__(self, verbose)
+        self._cond = Condition(Lock())
+        self._value = value
+
+    def acquire(self, blocking=True, timeout=None):
+        if not blocking and timeout is not None:
+            raise ValueError("can't specify timeout for non-blocking acquire")
+        rc = False
+        endtime = None
+        self._cond.acquire()
+        while self._value == 0:
+            if not blocking:
+                break
+            if __debug__:
+                self._note("%s.acquire(%s): blocked waiting, value=%s",
+                           self, blocking, self._value)
+            if timeout is not None:
+                if endtime is None:
+                    endtime = _time() + timeout
+                else:
+                    timeout = endtime - _time()
+                    if timeout <= 0:
+                        break
+            self._cond.wait(timeout)
+        else:
+            self._value = self._value - 1
+            if __debug__:
+                self._note("%s.acquire: success, value=%s",
+                           self, self._value)
+            rc = True
+        self._cond.release()
+        return rc
+
+    __enter__ = acquire
+
+    def release(self):
+        self._cond.acquire()
+        self._value = self._value + 1
+        if __debug__:
+            self._note("%s.release: success, value=%s",
+                       self, self._value)
+        self._cond.notify()
+        self._cond.release()
+
+    def __exit__(self, t, v, tb):
+        self.release()
+
+
+class BoundedSemaphore(Semaphore):
+    """Semaphore that checks that # releases is <= # acquires"""
+    def __init__(self, value=1, verbose=None):
+        Semaphore.__init__(self, value, verbose)
+        self._initial_value = value
+
+    def release(self):
+        if self._value >= self._initial_value:
+            raise ValueError("Semaphore released too many times")
+        return Semaphore.release(self)
+
+
+class Event(_Verbose):
+
+    # After Tim Peters' event class (without is_posted())
+
+    def __init__(self, verbose=None):
+        _Verbose.__init__(self, verbose)
+        self._cond = Condition(Lock())
+        self._flag = False
+
+    def _reset_internal_locks(self):
+        # private!  called by Thread._reset_internal_locks by _after_fork()
+        self._cond.__init__()
+
+    def is_set(self):
+        return self._flag
+
+    isSet = is_set
+
+    def set(self):
+        yield from self._cond.acquire()
+        try:
+            self._flag = True
+            yield from self._cond.notify_all()
+        finally:
+            #~ import pdb
+            #~ pdb.set_trace()
+            self._cond.release()
+
+    def clear(self):
+        yield from self._cond.acquire()
+        try:
+            self._flag = False
+        finally:
+            self._cond.release()
+
+    def wait(self, timeout=None):
+        yield from self._cond.acquire()
+        try:
+            signaled = self._flag
+            if not signaled:
+                signaled = yield from self._cond.wait(timeout)
+            return signaled
+        finally:
+            self._cond.release()
+
+
+# A barrier class.  Inspired in part by the pthread_barrier_* api and
+# the CyclicBarrier class from Java.  See
+# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
+# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
+#        CyclicBarrier.html
+# for information.
+# We maintain two main states, 'filling' and 'draining' enabling the barrier
+# to be cyclic.  Threads are not allowed into it until it has fully drained
+# since the previous cycle.  In addition, a 'resetting' state exists which is
+# similar to 'draining' except that threads leave with a BrokenBarrierError,
+# and a 'broken' state in which all threads get the exception.
+class Barrier(_Verbose):
+    """
+    Barrier.  Useful for synchronizing a fixed number of threads
+    at known synchronization points.  Threads block on 'wait()' and are
+    simultaneously once they have all made that call.
+    """
+    def __init__(self, parties, action=None, timeout=None, verbose=None):
+        """
+        Create a barrier, initialised to 'parties' threads.
+        'action' is a callable which, when supplied, will be called
+        by one of the threads after they have all entered the
+        barrier and just prior to releasing them all.
+        If a 'timeout' is provided, it is uses as the default for
+        all subsequent 'wait()' calls.
+        """
+        _Verbose.__init__(self, verbose)
+        self._cond = Condition(Lock())
+        self._action = action
+        self._timeout = timeout
+        self._parties = parties
+        self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
+        self._count = 0
+
+    def wait(self, timeout=None):
+        """
+        Wait for the barrier.  When the specified number of threads have
+        started waiting, they are all simultaneously awoken. If an 'action'
+        was provided for the barrier, one of the threads will have executed
+        that callback prior to returning.
+        Returns an individual index number from 0 to 'parties-1'.
+        """
+        if timeout is None:
+            timeout = self._timeout
+        with self._cond:
+            self._enter() # Block while the barrier drains.
+            index = self._count
+            self._count += 1
+            try:
+                if index + 1 == self._parties:
+                    # We release the barrier
+                    self._release()
+                else:
+                    # We wait until someone releases us
+                    self._wait(timeout)
+                return index
+            finally:
+                self._count -= 1
+                # Wake up any threads waiting for barrier to drain.
+                self._exit()
+
+    # Block until the barrier is ready for us, or raise an exception
+    # if it is broken.
+    def _enter(self):
+        while self._state in (-1, 1):
+            # It is draining or resetting, wait until done
+            self._cond.wait()
+        #see if the barrier is in a broken state
+        if self._state < 0:
+            raise BrokenBarrierError
+        assert self._state == 0
+
+    # Optionally run the 'action' and release the threads waiting
+    # in the barrier.
+    def _release(self):
+        try:
+            if self._action:
+                self._action()
+            # enter draining state
+            self._state = 1
+            self._cond.notify_all()
+        except:
+            #an exception during the _action handler.  Break and reraise
+            self._break()
+            raise
+
+    # Wait in the barrier until we are relased.  Raise an exception
+    # if the barrier is reset or broken.
+    def _wait(self, timeout):
+        if not self._cond.wait_for(lambda : self._state != 0, timeout):
+            #timed out.  Break the barrier
+            self._break()
+            raise BrokenBarrierError
+        if self._state < 0:
+            raise BrokenBarrierError
+        assert self._state == 1
+
+    # If we are the last thread to exit the barrier, signal any threads
+    # waiting for the barrier to drain.
+    def _exit(self):
+        if self._count == 0:
+            if self._state in (-1, 1):
+                #resetting or draining
+                self._state = 0
+                self._cond.notify_all()
+
+    def reset(self):
+        """
+        Reset the barrier to the initial state.
+        Any threads currently waiting will get the BrokenBarrier exception
+        raised.
+        """
+        with self._cond:
+            if self._count > 0:
+                if self._state == 0:
+                    #reset the barrier, waking up threads
+                    self._state = -1
+                elif self._state == -2:
+                    #was broken, set it to reset state
+                    #which clears when the last thread exits
+                    self._state = -1
+            else:
+                self._state = 0
+            self._cond.notify_all()
+
+    def abort(self):
+        """
+        Place the barrier into a 'broken' state.
+        Useful in case of error.  Any currently waiting threads and
+        threads attempting to 'wait()' will have BrokenBarrierError
+        raised.
+        """
+        with self._cond:
+            self._break()
+
+    def _break(self):
+        # An internal error was detected.  The barrier is set to
+        # a broken state all parties awakened.
+        self._state = -2
+        self._cond.notify_all()
+
+    @property
+    def parties(self):
+        """
+        Return the number of threads required to trip the barrier.
+        """
+        return self._parties
+
+    @property
+    def n_waiting(self):
+        """
+        Return the number of threads that are currently waiting at the barrier.
+        """
+        # We don't need synchronization here since this is an ephemeral result
+        # anyway.  It returns the correct value in the steady state.
+        if self._state == 0:
+            return self._count
+        return 0
+
+    @property
+    def broken(self):
+        """
+        Return True if the barrier is in a broken state
+        """
+        return self._state == -2
+
+#exception raised by the Barrier class
+class BrokenBarrierError(RuntimeError): pass
+
+
+# Helper to generate new thread names
+_counter = 0
+def _newname(template="Thread-%d"):
+    global _counter
+    _counter = _counter + 1
+    return template % _counter
+
+# Active thread administration
+_active_limbo_lock = _DummyLock()
+_active = {}    # maps thread id to Thread object
+_limbo = {}
+
+# For debug and leak testing
+_dangling = WeakSet()
+
+# Main class for threads
+
+class Thread(_Verbose):
+
+    __initialized = False
+    # Need to store a reference to sys.exc_info for printing
+    # out exceptions when a thread tries to use a global var. during interp.
+    # shutdown and thus raises an exception about trying to perform some
+    # operation on/with a NoneType
+    __exc_info = _sys.exc_info
+    # Keep sys.exc_clear too to clear the exception just before
+    # allowing .join() to return.
+    #XXX __exc_clear = _sys.exc_clear
+
+    def __init__(self, group=None, target=None, name=None,
+                 args=(), kwargs=None, verbose=None, *, daemon=None):
+        assert group is None, "group argument must be None for now"
+        _Verbose.__init__(self, verbose)
+        if kwargs is None:
+            kwargs = {}
+        self._target = target
+        self._name = str(name or _newname())
+        self._args = args
+        self._kwargs = kwargs
+        if daemon is not None:
+            self._daemonic = daemon
+        else:
+            self._daemonic = current_thread().daemon
+        self._ident = None
+        self._started = Event()
+        self._stopped = False
+        self._block = Condition(Lock())
+        self._initialized = True
+        # sys.stderr is not stored in the class like
+        # sys.exc_info since it can be changed between instances
+        self._stderr = _sys.stderr
+        _dangling.add(self)
+
+    def _reset_internal_locks(self):
+        # private!  Called by _after_fork() to reset our internal locks as
+        # they may be in an invalid state leading to a deadlock or crash.
+        if hasattr(self, '_block'):  # DummyThread deletes _block
+            self._block.__init__()
+        self._started._reset_internal_locks()
+
+    def __repr__(self):
+        assert self._initialized, "Thread.__init__() was not called"
+        status = "initial"
+        if self._started.is_set():
+            status = "started"
+        if self._stopped:
+            status = "stopped"
+        if self._daemonic:
+            status += " daemon"
+        if self._ident is not None:
+            status += " %s" % self._ident
+        return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
+
+    def start(self):
+        if not self._initialized:
+            raise RuntimeError("thread.__init__() not called")
+
+        if self._started.is_set():
+            raise RuntimeError("threads can only be started once")
+        if __debug__:
+            self._note("%s.start(): starting thread", self)
+        with _active_limbo_lock:
+            _limbo[self] = self
+        try:
+            _start_new_thread(self._bootstrap, ())
+        except Exception:
+            with _active_limbo_lock:
+                del _limbo[self]
+            raise
+        #~ import pdb
+        #~ pdb.set_trace()
+        yield from self._started.wait()
+
+    def run(self):
+        try:
+            if self._target:
+                yield from self._target(*self._args, **self._kwargs)
+        finally:
+            # Avoid a refcycle if the thread is running a function with
+            # an argument that has a member that points to the thread.
+            del self._target, self._args, self._kwargs
+
+    def _bootstrap(self):
+        # Wrapper around the real bootstrap code that ignores
+        # exceptions during interpreter cleanup.  Those typically
+        # happen when a daemon thread wakes up at an unfortunate
+        # moment, finds the world around it destroyed, and raises some
+        # random exception *** while trying to report the exception in
+        # _bootstrap_inner() below ***.  Those random exceptions
+        # don't help anybody, and they confuse users, so we suppress
+        # them.  We suppress them only when it appears that the world
+        # indeed has already been destroyed, so that exceptions in
+        # _bootstrap_inner() during normal business hours are properly
+        # reported.  Also, we only suppress them for daemonic threads;
+        # if a non-daemonic encounters this, something else is wrong.
+        try:
+            yield from self._bootstrap_inner()
+        except:
+            if self._daemonic and _sys is None:
+                return
+            raise
+
+    def _set_ident(self):
+        self._ident = get_ident()
+
+    def _bootstrap_inner(self):
+        try:
+            self._set_ident()
+            #~ import pdb
+            #~ pdb.set_trace()
+            yield from self._started.set()
+            with _active_limbo_lock:
+                _active[self._ident] = self
+                del _limbo[self]
+            if __debug__:
+                self._note("%s._bootstrap(): thread started", self)
+
+            if _trace_hook:
+                self._note("%s._bootstrap(): registering trace hook", self)
+                _sys.settrace(_trace_hook)
+            if _profile_hook:
+                self._note("%s._bootstrap(): registering profile hook", self)
+                _sys.setprofile(_profile_hook)
+
+            try:
+                yield from self.run()
+            except SystemExit:
+                if __debug__:
+                    self._note("%s._bootstrap(): raised SystemExit", self)
+            except:
+                if __debug__:
+                    self._note("%s._bootstrap(): unhandled exception", self)
+                # If sys.stderr is no more (most likely from interpreter
+                # shutdown) use self._stderr.  Otherwise still use sys (as in
+                # _sys) in case sys.stderr was redefined since the creation of
+                # self.
+                if _sys:
+                    _sys.stderr.write("Exception in thread %s:\n%s\n" %
+                                      (self.name, _format_exc()))
+                else:
+                    # Do the best job possible w/o a huge amt. of code to
+                    # approximate a traceback (code ideas from
+                    # Lib/traceback.py)
+                    exc_type, exc_value, exc_tb = self._exc_info()
+                    try:
+                        print((
+                            "Exception in thread " + self.name +
+                            " (most likely raised during interpreter shutdown):"), file=self._stderr)
+                        print((
+                            "Traceback (most recent call last):"), file=self._stderr)
+                        while exc_tb:
+                            print((
+                                '  File "%s", line %s, in %s' %
+                                (exc_tb.tb_frame.f_code.co_filename,
+                                    exc_tb.tb_lineno,
+                                    exc_tb.tb_frame.f_code.co_name)), file=self._stderr)
+                            exc_tb = exc_tb.tb_next
+                        print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
+                    # Make sure that exc_tb gets deleted since it is a memory
+                    # hog; deleting everything else is just for thoroughness
+                    finally:
+                        del exc_type, exc_value, exc_tb
+            else:
+                if __debug__:
+                    self._note("%s._bootstrap(): normal return", self)
+            finally:
+                # Prevent a race in
+                # test_threading.test_no_refcycle_through_target when
+                # the exception keeps the target alive past when we
+                # assert that it's dead.
+                #XXX self.__exc_clear()
+                pass
+        finally:
+            with _active_limbo_lock:
+                yield from self._stop()
+                try:
+                    # We don't call self._delete() because it also
+                    # grabs _active_limbo_lock.
+                    del _active[get_ident()]
+                except:
+                    pass
+
+    def _stop(self):
+        yield from self._block.acquire()
+        self._stopped = True
+        self._block.notify_all()
+        self._block.release()
+
+    def _delete(self):
+        "Remove current thread from the dict of currently running threads."
+
+        # Notes about running with _dummy_thread:
+        #
+        # Must take care to not raise an exception if _dummy_thread is being
+        # used (and thus this module is being used as an instance of
+        # dummy_threading).  _dummy_thread.get_ident() always returns -1 since
+        # there is only one thread if _dummy_thread is being used.  Thus
+        # len(_active) is always <= 1 here, and any Thread instance created
+        # overwrites the (if any) thread currently registered in _active.
+        #
+        # An instance of _MainThread is always created by 'threading'.  This
+        # gets overwritten the instant an instance of Thread is created; both
+        # threads return -1 from _dummy_thread.get_ident() and thus have the
+        # same key in the dict.  So when the _MainThread instance created by
+        # 'threading' tries to clean itself up when atexit calls this method
+        # it gets a KeyError if another Thread instance was created.
+        #
+        # This all means that KeyError from trying to delete something from
+        # _active if dummy_threading is being used is a red herring.  But
+        # since it isn't if dummy_threading is *not* being used then don't
+        # hide the exception.
+
+        try:
+            with _active_limbo_lock:
+                del _active[get_ident()]
+                # There must not be any python code between the previous line
+                # and after the lock is released.  Otherwise a tracing function
+                # could try to acquire the lock again in the same thread, (in
+                # current_thread()), and would block.
+        except KeyError:
+            if 'dummy_threading' not in _sys.modules:
+                raise
+
+    def join(self, timeout=None):
+        if not self._initialized:
+            raise RuntimeError("Thread.__init__() not called")
+        if not self._started.is_set():
+            raise RuntimeError("cannot join thread before it is started")
+        if self is current_thread():
+            raise RuntimeError("cannot join current thread")
+
+        if __debug__:
+            if not self._stopped:
+                self._note("%s.join(): waiting until thread stops", self)
+
+        self._block.acquire()
+        try:
+            if timeout is None:
+                while not self._stopped:
+                    self._block.wait()
+                if __debug__:
+                    self._note("%s.join(): thread stopped", self)
+            else:
+                deadline = _time() + timeout
+                while not self._stopped:
+                    delay = deadline - _time()
+                    if delay <= 0:
+                        if __debug__:
+                            self._note("%s.join(): timed out", self)
+                        break
+                    self._block.wait(delay)
+                else:
+                    if __debug__:
+                        self._note("%s.join(): thread stopped", self)
+        finally:
+            self._block.release()
+
+    @property
+    def name(self):
+        assert self._initialized, "Thread.__init__() not called"
+        return self._name
+
+    @name.setter
+    def name(self, name):
+        assert self._initialized, "Thread.__init__() not called"
+        self._name = str(name)
+
+    @property
+    def ident(self):
+        assert self._initialized, "Thread.__init__() not called"
+        return self._ident
+
+    def is_alive(self):
+        assert self._initialized, "Thread.__init__() not called"
+        return self._started.is_set() and not self._stopped
+
+    isAlive = is_alive
+
+    @property
+    def daemon(self):
+        assert self._initialized, "Thread.__init__() not called"
+        return self._daemonic
+
+    @daemon.setter
+    def daemon(self, daemonic):
+        if not self._initialized:
+            raise RuntimeError("Thread.__init__() not called")
+        if self._started.is_set():
+            raise RuntimeError("cannot set daemon status of active thread");
+        self._daemonic = daemonic
+
+    def isDaemon(self):
+        return self.daemon
+
+    def setDaemon(self, daemonic):
+        self.daemon = daemonic
+
+    def getName(self):
+        return self.name
+
+    def setName(self, name):
+        self.name = name
+
+# The timer class was contributed by Itamar Shtull-Trauring
+
+class Timer(Thread):
+    """Call a function after a specified number of seconds:
+
+    t = Timer(30.0, f, args=[], kwargs={})
+    t.start()
+    t.cancel() # stop the timer's action if it's still waiting
+    """
+
+    def __init__(self, interval, function, args=[], kwargs={}):
+        Thread.__init__(self)
+        self.interval = interval
+        self.function = function
+        self.args = args
+        self.kwargs = kwargs
+        self.finished = Event()
+
+    def cancel(self):
+        """Stop the timer if it hasn't finished yet"""
+        self.finished.set()
+
+    def run(self):
+        self.finished.wait(self.interval)
+        if not self.finished.is_set():
+            self.function(*self.args, **self.kwargs)
+        self.finished.set()
+
+# Special thread class to represent the main thread
+# This is garbage collected through an exit handler
+
+class _MainThread(Thread):
+
+    def __init__(self):
+        Thread.__init__(self, name="MainThread", daemon=False)
+        self._started.set()
+        self._set_ident()
+        with _active_limbo_lock:
+            _active[self._ident] = self
+
+    def _exitfunc(self):
+        self._stop()
+        t = _pickSomeNonDaemonThread()
+        if t:
+            if __debug__:
+                self._note("%s: waiting for other threads", self)
+        while t:
+            t.join()
+            t = _pickSomeNonDaemonThread()
+        if __debug__:
+            self._note("%s: exiting", self)
+        self._delete()
+
+def _pickSomeNonDaemonThread():
+    for t in enumerate():
+        if not t.daemon and t.is_alive():
+            return t
+    return None
+
+
+# Dummy thread class to represent threads not started here.
+# These aren't garbage collected when they die, nor can they be waited for.
+# If they invoke anything in threading.py that calls current_thread(), they
+# leave an entry in the _active dict forever after.
+# Their purpose is to return *something* from current_thread().
+# They are marked as daemon threads so we won't wait for them
+# when we exit (conform previous semantics).
+
+class _DummyThread(Thread):
+
+    def __init__(self):
+        Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
+
+        # Thread._block consumes an OS-level locking primitive, which
+        # can never be used by a _DummyThread.  Since a _DummyThread
+        # instance is immortal, that's bad, so release this resource.
+        del self._block
+
+        self._started._flag = True
+        self._set_ident()
+        with _active_limbo_lock:
+            _active[self._ident] = self
+
+    def join(self, timeout=None):
+        assert False, "cannot join a dummy thread"
+
+
+# Global API functions
+
+def current_thread():
+    try:
+        return _active[get_ident()]
+    except KeyError:
+        return _DummyThread()
+
+currentThread = current_thread
+
+def active_count():
+    with _active_limbo_lock:
+        return len(_active) + len(_limbo)
+
+activeCount = active_count
+
+def _enumerate():
+    # Same as enumerate(), but without the lock. Internal use only.
+    return list(_active.values()) + list(_limbo.values())
+
+def enumerate():
+    with _active_limbo_lock:
+        return list(_active.values()) + list(_limbo.values())
+
+from _thread import stack_size
+
+# get thread-local implementation, either from the thread
+# module, or from the python fallback
+
+try:
+    from _thread import _local as local
+except ImportError:
+    from _threading_local import local
+
+
+def _after_fork():
+    # This function is called by Python/ceval.c:PyEval_ReInitThreads which
+    # is called from PyOS_AfterFork.  Here we cleanup threading module state
+    # that should not exist after a fork.
+
+    # Reset _active_limbo_lock, in case we forked while the lock was held
+    # by another (non-forked) thread.  http://bugs.python.org/issue874900
+    global _active_limbo_lock
+    _active_limbo_lock = _allocate_lock()
+
+    # fork() only copied the current thread; clear references to others.
+    new_active = {}
+    current = current_thread()
+    with _active_limbo_lock:
+        for thread in _active.values():
+            # Any lock/condition variable may be currently locked or in an
+            # invalid state, so we reinitialize them.
+            thread._reset_internal_locks()
+            if thread is current:
+                # There is only one active thread. We reset the ident to
+                # its new value since it can have changed.
+                ident = get_ident()
+                thread._ident = ident
+                new_active[ident] = thread
+            else:
+                # All the others are already stopped.
+                thread._stop()
+
+        _limbo.clear()
+        _active.clear()
+        _active.update(new_active)
+        assert len(_active) == 1
+from unittest import TestCase
+import threading
+import os
+import select
+import time
+import gc
+
+class TestEpoll(TestCase):
+
+    def test_close_hups(self):
+        r, w = os.pipe()
+        po = select.epoll()
+        po.register(r, -1)
+        #~ po.register(w, select.EPOLLOUT)
+        def do_close():
+            time.sleep(0.01)
+            #~ os.close(r)
+            time.sleep(0.01)
+            os.close(w)
+        threading.Thread(target=do_close).start()
+        #~ print(po.poll())
+        #~ time.sleep(0.01)
+        #~ os.write(w, b'1')
+        #~ print(po.poll())
+
+class TestGenerator(TestCase):
+
+    def gen(self):
+        try:
+            yield
+        except GeneratorExit as exc:
+            print('yep', exc)
+
+    def test_generator_exit(self):
+        def gen():
+            try:
+                yield
+            except GeneratorExit as exc:
+                print('yep', exc)
+                raise
+        a = gen()
+        a.__next__()
+        del a
+        gc.collect()
+        #~ next(a)
+        #~ del a
+        #~ gc.collect()
+
+    def test_yield_from_non_generator(self):
+        print('yield from non gen')
+        def non_gen():
+            return 3
+        print((yield from non_gen()))
+        def gen_pump():
+            print('before yield from')
+            self.assertEqual(3, (yield from non_gen()))
+            print('after yield from')
+        for a in gen_pump():
+            pass