Commits

mattip committed bf25a3f Merge

merge default into branch

Comments (0)

Files changed (384)

dotviewer/drawgraph.py

     'yellow': (255,255,0),
     }
 re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)')
+re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)')
 
 def combine(color1, color2, alpha):
     r1, g1, b1 = color1
             self.yl = float(yl)
             rest = rest[3:]
         self.style, self.color = rest
+        linematch = re_linewidth.match(self.style)
+        if linematch:
+            num = linematch.group(1)
+            self.linewidth = int(round(float(num)))
+            self.style = self.style[linematch.end(0):]
+        else:
+            self.linewidth = 1
         self.highlight = False
         self.cachedbezierpoints = None
         self.cachedarrowhead = None
                 fgcolor = highlight_color(fgcolor)
             points = [self.map(*xy) for xy in edge.bezierpoints()]
 
-            def drawedgebody(points=points, fgcolor=fgcolor):
-                pygame.draw.lines(self.screen, fgcolor, False, points)
+            def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth):
+                pygame.draw.lines(self.screen, fgcolor, False, points, width)
             edgebodycmd.append(drawedgebody)
 
             points = [self.map(*xy) for xy in edge.arrowhead()]

dotviewer/graphparse.py

         try:
             plaincontent = dot2plain_graphviz(content, contenttype)
         except PlainParseError, e:
-            print e
-            # failed, retry via codespeak
-            plaincontent = dot2plain_codespeak(content, contenttype)
+            raise
+            ##print e
+            ### failed, retry via codespeak
+            ##plaincontent = dot2plain_codespeak(content, contenttype)
     return list(parse_plain(graph_id, plaincontent, links, fixedfont))

dotviewer/test/test_interactive.py

 }
 '''
 
+SOURCE2=r'''digraph f {
+  a; d; e; f; g; h; i; j; k; l;
+  a -> d [penwidth=1, style="setlinewidth(1)"];
+  d -> e [penwidth=2, style="setlinewidth(2)"];
+  e -> f [penwidth=4, style="setlinewidth(4)"];
+  f -> g [penwidth=8, style="setlinewidth(8)"];
+  g -> h [penwidth=16, style="setlinewidth(16)"];
+  h -> i [penwidth=32, style="setlinewidth(32)"];
+  i -> j [penwidth=64, style="setlinewidth(64)"];
+  j -> k [penwidth=128, style="setlinewidth(128)"];
+  k -> l [penwidth=256, style="setlinewidth(256)"];
+}'''
+
+
+
+
+
 def setup_module(mod):
     if not option.pygame:
         py.test.skip("--pygame not enabled")
     page = MyPage(str(dotfile))
     page.fixedfont = True
     graphclient.display_page(page)
+
+def test_linewidth():
+    udir.join("graph2.dot").write(SOURCE2)
+    from dotviewer import graphpage, graphclient
+    dotfile = udir.join('graph2.dot')
+    page = graphpage.DotFileGraphPage(str(dotfile))
+    graphclient.display_page(page)

lib-python/2.7/argparse.py

             # error if this argument is not allowed with other previously
             # seen arguments, assuming that actions that use the default
             # value don't really count as "present"
-            if argument_values is not action.default:
+
+            # XXX PyPy bug-to-bug compatibility: "is" on primitive types
+            # is not consistent in CPython.  We'll assume it is close
+            # enough for ints (which is true only for "small ints"), but
+            # for floats and longs and complexes we'll go for the option
+            # of forcing "is" to say False, like it usually does on
+            # CPython.  A fix is pending on CPython trunk
+            # (http://bugs.python.org/issue18943) but that might change
+            # the details of the semantics and so not be applied to 2.7.
+            # See the line AA below.
+
+            if (argument_values is not action.default or
+                    type(argument_values) in (float, long, complex)):  # AA
                 seen_non_default_actions.add(action)
                 for conflict_action in action_conflicts.get(action, []):
                     if conflict_action in seen_non_default_actions:

lib-python/2.7/socket.py

     # All _delegate_methods must also be initialized here.
     send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
     __getattr__ = _dummy
+    def _drop(self):
+        pass
 
 # Wrapper around platform socket objects. This implements
 # a platform-independent dup() functionality. The
     def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
         if _sock is None:
             _sock = _realsocket(family, type, proto)
-        elif _type(_sock) is _realsocket:
+        else:
+            # PyPy note about refcounting: implemented with _reuse()/_drop()
+            # on the class '_socket.socket'.  Python 3 did it differently
+            # with a reference counter on this class 'socket._socketobject'
+            # instead, but it is a less compatible change.
+            
+            # Note that a few libraries (like eventlet) poke at the
+            # private implementation of socket.py, passing custom
+            # objects to _socketobject().  These libraries need the
+            # following fix for use on PyPy: the custom objects need
+            # methods _reuse() and _drop() that maintains an explicit
+            # reference counter, starting at 0.  When it drops back to
+            # zero, close() must be called.
             _sock._reuse()
-        # PyPy note about refcounting: implemented with _reuse()/_drop()
-        # on the class '_socket.socket'.  Python 3 did it differently
-        # with a reference counter on this class 'socket._socketobject'
-        # instead, but it is a less compatible change (breaks eventlet).
+
         self._sock = _sock
 
     def send(self, data, flags=0):
 
     def close(self):
         s = self._sock
-        if type(s) is _realsocket:
-            s._drop()
         self._sock = _closedsocket()
+        s._drop()
     close.__doc__ = _realsocket.close.__doc__
 
     def accept(self):
                  "_close"]
 
     def __init__(self, sock, mode='rb', bufsize=-1, close=False):
-        if type(sock) is _realsocket:
-            sock._reuse()
+        # Note that a few libraries (like eventlet) poke at the
+        # private implementation of socket.py, passing custom
+        # objects to _fileobject().  These libraries need the
+        # following fix for use on PyPy: the custom objects need
+        # methods _reuse() and _drop() that maintains an explicit
+        # reference counter, starting at 0.  When it drops back to
+        # zero, close() must be called.
+        sock._reuse()
         self._sock = sock
         self.mode = mode # Not actually used in this version
         if bufsize < 0:
                 self.flush()
         finally:
             s = self._sock
-            if type(s) is _realsocket:
+            self._sock = None
+            if s is not None:
                 s._drop()
-            if self._close:
-                self._sock.close()
-            self._sock = None
+                if self._close:
+                    s.close()
 
     def __del__(self):
         try:

lib-python/2.7/ssl.py

                  suppress_ragged_eofs=True, ciphers=None):
         socket.__init__(self, _sock=sock._sock)
 
+        # "close" the original socket: it is not usable any more.
+        # this only calls _drop(), which should not actually call
+        # the operating system's close() because the reference
+        # counter is greater than 1 (we hold one too).
+        sock.close()
+
         if ciphers is None and ssl_version != _SSLv2_IF_EXISTS:
             ciphers = _DEFAULT_CIPHERS
 
         works with the SSL connection.  Just use the code
         from the socket module."""
 
-        self._makefile_refs += 1
         # close=True so as to decrement the reference count when done with
         # the file-like object.
         return _fileobject(self, mode, bufsize, close=True)
 
+    def _reuse(self):
+        self._makefile_refs += 1
+
+    def _drop(self):
+        if self._makefile_refs < 1:
+            self.close()
+        else:
+            self._makefile_refs -= 1
+
 
 
 def wrap_socket(sock, keyfile=None, certfile=None,

lib-python/2.7/test/test_socket.py

         def recv(self, size):
             return self._recv_step.next()()
 
+        def _reuse(self): pass
+        def _drop(self): pass
+
     @staticmethod
     def _raise_eintr():
         raise socket.error(errno.EINTR)
             closed = False
             def flush(self): pass
             def close(self): self.closed = True
-            def _decref_socketios(self): pass
+            def _reuse(self): pass
+            def _drop(self): pass
 
         # must not close unless we request it: the original use of _fileobject
         # by module socket requires that the underlying socket not be closed until

lib-python/2.7/test/test_urllib2.py

         self.reason = reason
     def read(self):
         return ''
+    def _reuse(self): pass
+    def _drop(self): pass
 
 class MockHTTPClass:
     def __init__(self):

lib-python/2.7/urllib2.py

         # out of socket._fileobject() and into a base class.
 
         r.recv = r.read
+        r._reuse = lambda: None
+        r._drop = lambda: None
         fp = socket._fileobject(r, close=True)
 
         resp = addinfourl(fp, r.msg, req.get_full_url())

lib-python/2.7/uuid.py

     UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
 """
 
+import struct
+
 __author__ = 'Ka-Ping Yee <ping@zesty.ca>'
 
 RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
         overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
         """
 
-        if [hex, bytes, bytes_le, fields, int].count(None) != 4:
-            raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
         if hex is not None:
+            if (bytes is not None or bytes_le is not None or
+                    fields is not None or int is not None):
+                raise TypeError('if the hex argument is given, bytes,'
+                                ' bytes_le, fields,  and int need to be None')
             hex = hex.replace('urn:', '').replace('uuid:', '')
             hex = hex.strip('{}').replace('-', '')
             if len(hex) != 32:
                 raise ValueError('badly formed hexadecimal UUID string')
             int = long(hex, 16)
-        if bytes_le is not None:
+        elif bytes_le is not None:
+            if bytes is not None or fields is not None or int is not None:
+                raise TypeError('if the bytes_le argument is given, bytes,'
+                                ' fields, and int need to be None')
             if len(bytes_le) != 16:
                 raise ValueError('bytes_le is not a 16-char string')
             bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
                      bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
                      bytes_le[8:])
-        if bytes is not None:
+            int = (struct.unpack('>Q', bytes[:8])[0] << 64 |
+                   struct.unpack('>Q', bytes[8:])[0])
+        elif bytes is not None:
+            if fields is not None or int is not None:
+                raise TypeError('if the bytes argument is given, fields '
+                                'and int need to be None')
             if len(bytes) != 16:
                 raise ValueError('bytes is not a 16-char string')
-            int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
-        if fields is not None:
+            int = (struct.unpack('>Q', bytes[:8])[0] << 64 |
+                   struct.unpack('>Q', bytes[8:])[0])
+        elif fields is not None:
+            if int is not None:
+                raise TypeError('if the fields argument is given, int needs'
+                                ' to be None')
             if len(fields) != 6:
                 raise ValueError('fields is not a 6-tuple')
             (time_low, time_mid, time_hi_version,
             clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
             int = ((time_low << 96L) | (time_mid << 80L) |
                    (time_hi_version << 64L) | (clock_seq << 48L) | node)
-        if int is not None:
+        elif int is not None:
             if not 0 <= int < 1<<128L:
                 raise ValueError('int is out of range (need a 128-bit value)')
+        else:
+            raise TypeError('one of hex, bytes, bytes_le, fields,'
+                            ' or int need to be not None')
         if version is not None:
             if not 1 <= version <= 5:
                 raise ValueError('illegal version number')
             # Set the version number.
             int &= ~(0xf000 << 64L)
             int |= version << 76L
-        self.__dict__['int'] = int
+        object.__setattr__(self, 'int', int)
 
     def __cmp__(self, other):
         if isinstance(other, UUID):

lib_pypy/_curses.py

 """Reimplementation of the standard extension module '_curses' using cffi."""
 
 import sys
+if sys.platform == 'win32':
+    #This module does not exist in windows
+    raise ImportError('No module named _curses')
 from functools import wraps
 
 from cffi import FFI

lib_pypy/_sqlite3.py

         if cvt is not None:
             param = cvt(param)
 
-        param = adapt(param)
+        try:
+            param = adapt(param)
+        except:
+            pass  # And use previous value
 
         if param is None:
             rc = _lib.sqlite3_bind_null(self._statement, idx)
         for i in xrange(_lib.sqlite3_column_count(self._statement)):
             name = _lib.sqlite3_column_name(self._statement, i)
             if name:
-                name = _ffi.string(name).decode('utf-8').split("[")[0].strip()
+                name = _ffi.string(name).split("[")[0].strip()
             desc.append((name, None, None, None, None, None, None))
         return desc
 

lib_pypy/_tkinter/__init__.py

 READABLE = tklib.TCL_READABLE
 WRITABLE = tklib.TCL_WRITABLE
 EXCEPTION = tklib.TCL_EXCEPTION
+DONT_WAIT = tklib.TCL_DONT_WAIT
 
 def create(screenName=None, baseName=None, className=None,
            interactive=False, wantobjects=False, wantTk=True,

lib_pypy/_tkinter/app.py

 from . import TclError
 from .tclobj import TclObject, FromObj, AsObj, TypeCache
 
+import contextlib
 import sys
+import threading
+import time
+
+
+class _DummyLock(object):
+    "A lock-like object that does not do anything"
+    def acquire(self):
+        pass
+    def release(self):
+        pass
+    def __enter__(self):
+        pass
+    def __exit__(self, *exc):
+        pass
+
 
 def varname_converter(input):
     if isinstance(input, TclObject):
     def PythonCmd(clientData, interp, argc, argv):
         self = tkffi.from_handle(clientData)
         assert self.app.interp == interp
-        try:
-            args = [tkffi.string(arg) for arg in argv[1:argc]]
-            result = self.func(*args)
-            obj = AsObj(result)
-            tklib.Tcl_SetObjResult(interp, obj)
-        except:
-            self.app.errorInCmd = True
-            self.app.exc_info = sys.exc_info()
-            return tklib.TCL_ERROR
-        else:
-            return tklib.TCL_OK
+        with self.app._tcl_lock_released():
+            try:
+                args = [tkffi.string(arg) for arg in argv[1:argc]]
+                result = self.func(*args)
+                obj = AsObj(result)
+                tklib.Tcl_SetObjResult(interp, obj)
+            except:
+                self.app.errorInCmd = True
+                self.app.exc_info = sys.exc_info()
+                return tklib.TCL_ERROR
+            else:
+                return tklib.TCL_OK
 
     @tkffi.callback("Tcl_CmdDeleteProc")
     def PythonCmdDelete(clientData):
 
 
 class TkApp(object):
+    _busywaitinterval = 0.02  # 20ms.
+
     def __new__(cls, screenName, baseName, className,
                 interactive, wantobjects, wantTk, sync, use):
         if not wantobjects:
         self.quitMainLoop = False
         self.errorInCmd = False
 
+        if not self.threaded:
+            # TCL is not thread-safe, calls needs to be serialized.
+            self._tcl_lock = threading.Lock()
+        else:
+            self._tcl_lock = _DummyLock()
+
         self._typeCache = TypeCache()
         self._commands = {}
 
         if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread():
             raise RuntimeError("Calling Tcl from different appartment")
 
+    @contextlib.contextmanager
+    def _tcl_lock_released(self):
+        "Context manager to temporarily release the tcl lock."
+        self._tcl_lock.release()
+        yield
+        self._tcl_lock.acquire()
+
     def loadtk(self):
         # We want to guard against calling Tk_Init() multiple times
         err = tklib.Tcl_Eval(self.interp, "info exists     tk_version")
         flags=tklib.TCL_LEAVE_ERR_MSG
         if global_only:
             flags |= tklib.TCL_GLOBAL_ONLY
-        res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags)
-        if not res:
-            self.raiseTclError()
-        assert self._wantobjects
-        return FromObj(self, res)
+        with self._tcl_lock:
+            res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags)
+            if not res:
+                self.raiseTclError()
+            assert self._wantobjects
+            return FromObj(self, res)
 
     def _setvar(self, name1, value, global_only=False):
         name1 = varname_converter(name1)
+        # XXX Acquire tcl lock???
         newval = AsObj(value)
         flags=tklib.TCL_LEAVE_ERR_MSG
         if global_only:
             flags |= tklib.TCL_GLOBAL_ONLY
-        res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL,
-                                  newval, flags)
-        if not res:
-            self.raiseTclError()
+        with self._tcl_lock:
+            res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL,
+                                      newval, flags)
+            if not res:
+                self.raiseTclError()
 
     def _unsetvar(self, name1, name2=None, global_only=False):
         name1 = varname_converter(name1)
         flags=tklib.TCL_LEAVE_ERR_MSG
         if global_only:
             flags |= tklib.TCL_GLOBAL_ONLY
-        res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags)
-        if res == tklib.TCL_ERROR:
-            self.raiseTclError()
+        with self._tcl_lock:
+            res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags)
+            if res == tklib.TCL_ERROR:
+                self.raiseTclError()
 
     def getvar(self, name1, name2=None):
         return self._var_invoke(self._getvar, name1, name2)
         if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread():
             raise NotImplementedError("Call from another thread")
 
-        res = tklib.Tcl_CreateCommand(
-            self.interp, cmdName, _CommandData.PythonCmd,
-            clientData, _CommandData.PythonCmdDelete)
+        with self._tcl_lock:
+            res = tklib.Tcl_CreateCommand(
+                self.interp, cmdName, _CommandData.PythonCmd,
+                clientData, _CommandData.PythonCmdDelete)
         if not res:
             raise TclError("can't create Tcl command")
 
         if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread():
             raise NotImplementedError("Call from another thread")
 
-        res = tklib.Tcl_DeleteCommand(self.interp, cmdName)
+        with self._tcl_lock:
+            res = tklib.Tcl_DeleteCommand(self.interp, cmdName)
         if res == -1:
             raise TclError("can't delete Tcl command")
 
                 tklib.Tcl_IncrRefCount(obj)
                 objects[i] = obj
 
-            res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags)
-            if res == tklib.TCL_ERROR:
-                self.raiseTclError()
-            else:
-                result = self._callResult()
+            with self._tcl_lock:
+                res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags)
+                if res == tklib.TCL_ERROR:
+                    self.raiseTclError()
+                else:
+                    result = self._callResult()
         finally:
             for obj in objects:
                 if obj:
 
     def eval(self, script):
         self._check_tcl_appartment()
-        res = tklib.Tcl_Eval(self.interp, script)
-        if res == tklib.TCL_ERROR:
-            self.raiseTclError()
-        return tkffi.string(tklib.Tcl_GetStringResult(self.interp))
+        with self._tcl_lock:
+            res = tklib.Tcl_Eval(self.interp, script)
+            if res == tklib.TCL_ERROR:
+                self.raiseTclError()
+            return tkffi.string(tklib.Tcl_GetStringResult(self.interp))
 
     def evalfile(self, filename):
         self._check_tcl_appartment()
-        res = tklib.Tcl_EvalFile(self.interp, filename)
-        if res == tklib.TCL_ERROR:
-            self.raiseTclError()
-        return tkffi.string(tklib.Tcl_GetStringResult(self.interp))
+        with self._tcl_lock:
+            res = tklib.Tcl_EvalFile(self.interp, filename)
+            if res == tklib.TCL_ERROR:
+                self.raiseTclError()
+            return tkffi.string(tklib.Tcl_GetStringResult(self.interp))
 
     def split(self, arg):
         if isinstance(arg, tuple):
             if self.threaded:
                 result = tklib.Tcl_DoOneEvent(0)
             else:
-                raise NotImplementedError("TCL configured without threads")
+                with self._tcl_lock:
+                    result = tklib.Tcl_DoOneEvent(tklib.TCL_DONT_WAIT)
+                if result == 0:
+                    time.sleep(self._busywaitinterval)
 
             if result < 0:
                 break

lib_pypy/_tkinter/tklib.py

 # C bindings with libtcl and libtk.
 
 from cffi import FFI
+import sys
 
 tkffi = FFI()
 
 #define TCL_EVAL_DIRECT ...
 #define TCL_EVAL_GLOBAL ...
 
+#define TCL_DONT_WAIT ...
+
 typedef unsigned short Tcl_UniChar;
 typedef ... Tcl_Interp;
 typedef ...* Tcl_ThreadId;
 int Tk_GetNumMainWindows();
 """)
 
+# XXX find a better way to detect paths
+# XXX pick up CPPFLAGS and LDFLAGS and add to these paths?
+if sys.platform.startswith("openbsd"):
+    incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include']
+    linklibs = ['tk85', 'tcl85']
+    libdirs = ['/usr/local/lib', '/usr/X11R6/lib']
+else:
+    incdirs=['/usr/include/tcl']
+    linklibs=['tcl', 'tk']
+    libdirs = []
+
 tklib = tkffi.verify("""
 #include <tcl.h>
 #include <tk.h>
 char *get_tk_version() { return TK_VERSION; }
 char *get_tcl_version() { return TCL_VERSION; }
 """,
-include_dirs=['/usr/include/tcl'],
-libraries=['tcl', 'tk'],
+include_dirs=incdirs,
+libraries=linklibs,
+library_dirs = libdirs
 )

lib_pypy/cffi.egg-info

 Metadata-Version: 1.0
 Name: cffi
-Version: 0.6
+Version: 0.7
 Summary: Foreign Function Interface for Python calling C code.
 Home-page: http://cffi.readthedocs.org
 Author: Armin Rigo, Maciej Fijalkowski

lib_pypy/cffi/__init__.py

 from .api import FFI, CDefError, FFIError
 from .ffiplatform import VerificationError, VerificationMissing
 
-__version__ = "0.7"
-__version_info__ = (0, 7)
+__version__ = "0.7.2"
+__version_info__ = (0, 7, 2)

lib_pypy/cffi/api.py

             # _cffi_backend.so compiled.
             import _cffi_backend as backend
             from . import __version__
-            assert backend.__version__ == __version__
+            assert (backend.__version__ == __version__ or
+                    backend.__version__ == __version__[:3])
             # (If you insist you can also try to pass the option
             # 'backend=backend_ctypes.CTypesBackend()', but don't
             # rely on it!  It's probably not going to work well.)

lib_pypy/cffi/commontypes.py

         elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES:
             result = model.PrimitiveType(result)
         else:
-            assert commontype != result
+            if commontype == result:
+                raise api.FFIError("Unsupported type: %r.  Please file a bug "
+                                   "if you think it should be." % (commontype,))
             result = resolve_common_type(result)   # recursively
         assert isinstance(result, model.BaseTypeByIdentity)
         _CACHE[commontype] = result

lib_pypy/cffi/cparser.py

                 # assume a primitive type.  get it from .names, but reduce
                 # synonyms to a single chosen combination
                 names = list(type.names)
-                if names == ['signed'] or names == ['unsigned']:
-                    names.append('int')
-                if names[0] == 'signed' and names != ['signed', 'char']:
-                    names.pop(0)
-                if (len(names) > 1 and names[-1] == 'int'
-                        and names != ['unsigned', 'int']):
-                    names.pop()
+                if names != ['signed', 'char']:    # keep this unmodified
+                    prefixes = {}
+                    while names:
+                        name = names[0]
+                        if name in ('short', 'long', 'signed', 'unsigned'):
+                            prefixes[name] = prefixes.get(name, 0) + 1
+                            del names[0]
+                        else:
+                            break
+                    # ignore the 'signed' prefix below, and reorder the others
+                    newnames = []
+                    for prefix in ('unsigned', 'short', 'long'):
+                        for i in range(prefixes.get(prefix, 0)):
+                            newnames.append(prefix)
+                    if not names:
+                        names = ['int']    # implicitly
+                    if names == ['int']:   # but kill it if 'short' or 'long'
+                        if 'short' in prefixes or 'long' in prefixes:
+                            names = []
+                    names = newnames + names
                 ident = ' '.join(names)
                 if ident == 'void':
                     return model.void_type
                 self._partial_length = True
                 return None
         #
-        raise api.FFIError("unsupported non-constant or "
-                           "not immediately constant expression")
+        raise api.FFIError("unsupported expression: expected a "
+                           "simple numeric constant")
 
     def _build_enum_type(self, explicit_name, decls):
         if decls is not None:

lib_pypy/cffi/vengine_gen.py

     def load_library(self):
         # import it with the CFFI backend
         backend = self.ffi._backend
-        module = backend.load_library(self.verifier.modulefilename)
+        # needs to make a path that contains '/', on Posix
+        filename = os.path.join(os.curdir, self.verifier.modulefilename)
+        module = backend.load_library(filename)
         #
         # call loading_gen_struct() to get the struct layout inferred by
         # the C compiler

lib_pypy/datetime.py

 # for all computations.  See the book for algorithms for converting between
 # proleptic Gregorian ordinals and many other calendar systems.
 
-_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
+_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
 
-_DAYS_BEFORE_MONTH = [None]
+_DAYS_BEFORE_MONTH = [-1]
 dbm = 0
 for dim in _DAYS_IN_MONTH[1:]:
     _DAYS_BEFORE_MONTH.append(dbm)

lib_pypy/readline.egg-info

+Metadata-Version: 1.0
+Name: readline
+Version: 6.2.4.1
+Summary: Hack to make "pip install readline" happy and do nothing
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN

pypy/config/pypyoption.py

 
 
 pypy_optiondescription = OptionDescription("objspace", "Object Space Options", [
-    OptionDescription("opcodes", "opcodes to enable in the interpreter", [
-        BoolOption("CALL_METHOD", "emit a special bytecode for expr.name()",
-                   default=False),
-        ]),
-
     OptionDescription("usemodules", "Which Modules should be used", [
         BoolOption(modname, "use module %s" % (modname, ),
                    default=modname in default_modules,
         BoolOption("optimized_int_add",
                    "special case the addition of two integers in BINARY_ADD",
                    default=False),
-        BoolOption("optimized_comparison_op",
-                   "special case the comparison of integers",
-                   default=False),
         BoolOption("optimized_list_getitem",
                    "special case the 'list[integer]' expressions",
                    default=False),
 
     # all the good optimizations for PyPy should be listed here
     if level in ['2', '3', 'jit']:
-        config.objspace.opcodes.suggest(CALL_METHOD=True)
         config.objspace.std.suggest(withrangelist=True)
         config.objspace.std.suggest(withmethodcache=True)
         config.objspace.std.suggest(withprebuiltchar=True)

pypy/doc/_ref.txt

 .. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py
 .. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py
 .. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py
-.. _`rpython/rtyper/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/
-.. _`rpython/rtyper/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ootype.py
 .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py
 .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py
 .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py

pypy/doc/coding-guide.rst

 
 + methods and other class attributes do not change after startup
 + single inheritance is fully supported
-+ simple mixins somewhat work too, but the mixed in class needs a
-  ``_mixin_ = True`` class attribute. isinstance checks against the
-  mixin type will fail when translated.
++ use `rpython.rlib.objectmodel.import_from_mixin(M)` in a class
+  body to copy the whole content of a class `M`.  This can be used
+  to implement mixins: functions and staticmethods are duplicated
+  (the other class attributes are just copied unmodified).
 
 + classes are first-class objects too
 

pypy/doc/config/objspace.opcodes.CALL_METHOD.txt

-Enable a pair of bytecodes that speed up method calls.
-See ``pypy.interpreter.callmethod`` for a description.
-
-The goal is to avoid creating the bound method object in the common
-case.  So far, this only works for calls with no keyword, no ``*arg``
-and no ``**arg`` but it would be easy to extend.
-
-For more information, see the section in `Standard Interpreter Optimizations`_.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#lookup-method-call-method

pypy/doc/config/objspace.opcodes.txt

-..  intentionally empty

pypy/doc/cppyy.rst

 the selection of scientific software) will also work for a build with the
 builtin backend.
 
-.. _`download`: http://cern.ch/wlav/reflex-2013-04-23.tar.bz2
+.. _`download`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2
 .. _`ROOT`: http://root.cern.ch/
 
 Besides Reflex, you probably need a version of `gccxml`_ installed, which is
 
 To install the standalone version of Reflex, after download::
 
-    $ tar jxf reflex-2013-04-23.tar.bz2
-    $ cd reflex-2013-04-23
+    $ tar jxf reflex-2013-08-14.tar.bz2
+    $ cd reflex-2013-08-14
     $ ./build/autogen
     $ ./configure <usual set of options such as --prefix>
     $ make && make install
   attempt to point newcomers at existing alternatives, which are more
   mainstream and where they will get help from many people.*
 
-  *If anybody seriously wants to promote RPython anyway, he is welcome
+  *If anybody seriously wants to promote RPython anyway, they are welcome
   to: we won't actively resist such a plan.  There are a lot of things
   that could be done to make RPython a better Java-ish language for
   example, starting with supporting non-GIL-based multithreading, but we
 patch the generated machine code.
 
 So the position of the core PyPy developers is that if anyone wants to
-make an N+1'th attempt with LLVM, he is welcome, and he will receive a
-bit of help on the IRC channel, but he is left with the burden of proof
+make an N+1'th attempt with LLVM, they are welcome, and will be happy to
+provide help in the IRC channel, but they are left with the burden of proof
 that it works.
 
 ----------------------

pypy/doc/getting-started-python.rst

      zlib-devel bzip2-devel ncurses-devel expat-devel \
      openssl-devel gc-devel python-sphinx python-greenlet
 
+   On SLES11:
+
+     $ sudo zypper install gcc make python-devel pkg-config \
+     zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \
+     libexpat-devel libffi-devel python-curses
+
    The above command lines are split with continuation characters, giving the necessary dependencies first, then the optional ones.
 
    * ``pkg-config`` (to help us locate libffi files)

pypy/doc/interpreter-optimizations.rst

 if it is not None, then it is considered to be an additional first
 argument in the call to the *im_func* object from the stack.
 
-You can enable this feature with the :config:`objspace.opcodes.CALL_METHOD`
-option.
-
 .. more here?
 
 Overall Effects

pypy/doc/jit/index.rst

 
 - Hooks_ debugging facilities available to a python programmer
 
+- Virtualizable_ how virtualizables work and what they are (in other words how
+  to make frames more efficient).
 
 .. _Overview: overview.html
 .. _Notes: pyjitpl5.html
 .. _Hooks: ../jit-hooks.html
+.. _Virtualizable: virtualizable.html

pypy/doc/jit/virtualizable.rst

+
+Virtualizables
+==============
+
+**Note:** this document does not have a proper introduction as to how
+to understand the basics. We should write some. If you happen to be here
+and you're missing context, feel free to pester us on IRC.
+
+Problem description
+-------------------
+
+The JIT is very good at making sure some objects are never allocated if they
+don't escape from the trace. Such objects are called ``virtuals``. However,
+if we're dealing with frames, virtuals are often not good enough. Frames
+can escape and they can also be allocated already at the moment we enter the
+JIT. In such cases we need some extra object that can still be optimized away,
+despite existing on the heap.
+
+Solution
+--------
+
+We introduce virtualizables. They're objects that exist on the heap, but their
+fields are not always in sync with whatever happens in the assembler. One
+example is that virtualizable fields can store virtual objects without
+forcing them. This is very useful for frames. Declaring an object to be
+virtualizable works like this:
+
+    class Frame(object):
+       _virtualizable_ = ['locals[*]', 'stackdepth']
+
+And we use them in ``JitDriver`` like this::
+
+    jitdriver = JitDriver(greens=[], reds=['frame'], virtualizables=['frame'])
+
+This declaration means that ``stackdepth`` is a virtualizable **field**, while
+``locals`` is a virtualizable **array** (a list stored on a virtualizable).
+There are various rules about using virtualizables, especially using
+virtualizable arrays that can be very confusing. Those will usually end
+up with a compile-time error (as opposed to strange behavior). The rules are:
+
+* Each array access must be with a known positive index that cannot raise
+  an ``IndexError``. Using ``no = jit.hint(no, promote=True)`` might be useful
+  to get a constant-number access. This is only safe if the index is actually
+  constant or changing rarely within the context of the user's code.
+
+* If you initialize a new virtualizable in the JIT, it has to be done like this
+  (for example if we're in ``Frame.__init__``)::
+
+    self = hint(self, access_directly=True, fresh_virtualizable=True)
+
+  that way you can populate the fields directly.
+
+* If you use virtualizable outside of the JIT – it's very expensive and
+  sometimes aborts tracing. Consider it carefully as to how do it only for
+  debugging purposes and not every time (e.g. ``sys._getframe`` call).
+
+* If you have something equivalent of a Python generator, where the
+  virtualizable survives for longer, you want to force it before returning.
+  It's better to do it that way than by an external call some time later.
+  It's done using ``jit.hint(frame, force_virtualizable=True)``

pypy/doc/tool/makecontributor.py

     'Roberto De Ioris': ['roberto@mrspurr'],
     'Sven Hager': ['hager'],
     'Tomo Cocoa': ['cocoatomo'],
+    'Romain Guillebert': ['rguillebert', 'rguillbert', 'romain', 'Guillebert Romain'],
+    'Ronan Lamy': ['ronan'],
+    'Edd Barrett': ['edd'],
+    'Manuel Jacob': ['mjacob'],
+    'Rami Chowdhury': ['necaris'],
     }
 
 alias_map = {}
     if not match:
         return set()
     ignore_words = ['around', 'consulting', 'yesterday', 'for a bit', 'thanks',
-                    'in-progress', 'bits of', 'even a little', 'floating',]
+                    'in-progress', 'bits of', 'even a little', 'floating',
+                    'a bit', 'reviewing']
     sep_words = ['and', ';', '+', '/', 'with special  by']
     nicknames = match.group(1)
     for word in ignore_words:
     ##         print '%5d %s' % (n, name)
     ##     else:
     ##         print name
-                
+
     items = authors_count.items()
     items.sort(key=operator.itemgetter(1), reverse=True)
     for name, n in items:

pypy/doc/whatsnew-head.rst

 .. this is a revision shortly after release-2.1-beta
 .. startrev: 4eb52818e7c0
 
+.. branch: sanitise_bytecode_dispatch
+Make PyPy's bytecode dispatcher easy to read, and less reliant on RPython
+magic. There is no functional change, though the removal of dead code leads
+to many fewer tests to execute.
+
 .. branch: fastjson
 Fast json decoder written in RPython, about 3-4x faster than the pure Python
 decoder which comes with the stdlib
 No longer delegate numpy string_ methods to space.StringObject, in numpy
 this works by kind of by accident. Support for merging the refactor-str-types
 branch
+
+.. branch: kill-typesystem
+Remove the "type system" abstraction, now that there is only ever one kind of
+type system used.
+
+.. branch: kill-gen-store-back-in
+Kills gen_store_back_in_virtualizable - should improve non-inlined calls by
+a bit
+
+.. branch: dotviewer-linewidth
+.. branch: reflex-support
+.. branch: numpypy-inplace-op
+.. branch: rewritten-loop-logging
+.. branch: no-release-gil
+.. branch: safe-win-mmap
+.. branch: boolean-indexing-cleanup
+
+.. branch: nobold-backtrace
+Work on improving UnionError messages and stack trace displays.
+
+.. branch: improve-errors-again
+More improvements and refactorings of error messages.
+
+.. branch: improve-errors-again2
+Unbreak tests in rlib.
+
+.. branch: less-stringly-ops
+Use subclasses of SpaceOperation instead of SpaceOperator objects.
+Random cleanups in flowspace.
+

pypy/doc/windows.rst

 The following text gives some hints about how to translate the PyPy
 interpreter.
 
+PyPy supports only being translated as a 32bit program, even on
+64bit Windows.  See at the end of this page for what is missing
+for a full 64bit translation.
+
 To build pypy-c you need a C compiler.  Microsoft Visual Studio is
 preferred, but can also use the mingw32 port of gcc.
 
 INCLUDE, LIB and PATH (for DLLs) environment variables appropriately.
 
 Abridged method (for -Ojit builds using Visual Studio 2008)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Download the versions of all the external packages
 from 
 https://bitbucket.org/pypy/pypy/downloads/local.zip
     nmake -f makefile.msc
     
 The sqlite3 database library
-~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Download http://www.sqlite.org/2013/sqlite-amalgamation-3071601.zip and extract
 it into a directory under the base directory. Also get 
 http://www.sqlite.org/2013/sqlite-dll-win32-x86-3071601.zip and extract the dll
 into the bin directory, and the sqlite3.def into the sources directory.
 Now build the import library so cffi can use the header and dll::
+
     lib /DEF:sqlite3.def" /OUT:sqlite3.lib"
     copy sqlite3.lib path\to\libs
 
 March 2012, --cc is not a valid option for pytest.py. However if you set an
 environment variable CC to the compliter exe, testing will use it.
 
-.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds
+.. _`mingw32 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds
 .. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds
 .. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29   
 .. _`libffi source files`: http://sourceware.org/libffi/
 .. _`RPython translation toolchain`: translation.html
+
+
+What is missing for a full 64-bit translation
+---------------------------------------------
+
+The main blocker is that we assume that the integer type of RPython is
+large enough to (occasionally) contain a pointer value cast to an
+integer.  The simplest fix is to make sure that it is so, but it will
+give the following incompatibility between CPython and PyPy on Win64:
+  
+CPython: ``sys.maxint == 2**32-1, sys.maxsize == 2**64-1``
+
+PyPy: ``sys.maxint == sys.maxsize == 2**64-1``
+
+...and, correspondingly, PyPy supports ints up to the larger value of
+sys.maxint before they are converted to ``long``.  The first decision
+that someone needs to make is if this incompatibility is reasonable.
+
+Assuming that it is, the first thing to do is probably to hack *CPython*
+until it fits this model: replace the field in PyIntObject with a ``long
+long`` field, and change the value of ``sys.maxint``.  This might just
+work, even if half-brokenly: I'm sure you can crash it because of the
+precision loss that undoubtedly occurs everywhere, but try not to. :-)
+
+Such a hacked CPython is what you'll use in the next steps.  We'll call
+it CPython64/64.
+
+It is probably not too much work if the goal is only to get a translated
+PyPy executable, and to run all tests before transaction.  But you need
+to start somewhere, and you should start with some tests in
+rpython/translator/c/test/, like ``test_standalone.py`` and
+``test_newgc.py``: try to have them pass on top of CPython64/64.
+
+Keep in mind that this runs small translations, and some details may go
+wrong.  The most obvious one is to check that it produces C files that
+use the integer type ``Signed`` --- but what is ``Signed`` defined to?
+It should be equal to ``long`` on every other platforms, but on Win64 it
+should be something like ``long long``.
+
+What is more generally needed is to review all the C files in
+rpython/translator/c/src for the word ``long``, because this means a
+32-bit integer even on Win64.  Replace it with ``Signed`` most of the
+times.  You can replace one with the other without breaking anything on
+any other platform, so feel free to.
+
+Then, these two C types have corresponding RPython types: ``rffi.LONG``
+and ``lltype.Signed`` respectively.  The first should really correspond
+to the C ``long``.  Add tests that check that integers casted to one
+type or the other really have 32 and 64 bits respectively, on Win64.
+
+Once these basic tests work, you need to review ``rpython/rlib/`` for
+usages of ``rffi.LONG`` versus ``lltype.Signed``.  The goal would be to
+fix some more ``LONG-versus-Signed`` issues, by fixing the tests --- as
+always run on top of CPython64/64.  Note that there was some early work
+done in ``rpython/rlib/rarithmetic`` with the goal of running all the
+tests on Win64 on the regular CPython, but I think by now that it's a
+bad idea.  Look only at CPython64/64.
+
+The major intermediate goal is to get a translation of PyPy with ``-O2``
+with a minimal set of modules, starting with ``--no-allworkingmodules``;
+you need to use CPython64/64 to run this translation too.  Check
+carefully the warnings of the C compiler at the end.  I think that MSVC
+is "nice" in the sense that by default a lot of mismatches of integer
+sizes are reported as warnings.
+
+Then you need to review ``pypy/module/*/`` for ``LONG-versus-Signed``
+issues.  At some time during this review, we get a working translated
+PyPy on Windows 64 that includes all ``--translationmodules``, i.e.
+everything needed to run translations.  When we are there, the hacked
+CPython64/64 becomes much less important, because we can run future
+translations on top of this translated PyPy.  As soon as we get there,
+please *distribute* the translated PyPy.  It's an essential component
+for anyone else that wants to work on Win64!  We end up with a strange
+kind of dependency --- we need a translated PyPy in order to translate a
+PyPy ---, but I believe it's ok here, as Windows executables are
+supposed to never be broken by newer versions of Windows.
+
+Happy hacking :-)

pypy/interpreter/app_main.py

     except:
         try:
             stderr = sys.stderr
-        except AttributeError:
-            pass   # too bad
-        else:
             print >> stderr, 'Error calling sys.excepthook:'
             originalexcepthook(*sys.exc_info())
             print >> stderr
             print >> stderr, 'Original exception was:'
+        except:
+            pass   # too bad
 
     # we only get here if sys.excepthook didn't do its job
     originalexcepthook(etype, evalue, etraceback)

pypy/interpreter/astcompiler/codegen.py

         return self._call_has_no_star_args(call) and not call.keywords
 
     def _optimize_method_call(self, call):
-        if not self.space.config.objspace.opcodes.CALL_METHOD or \
-                not self._call_has_no_star_args(call) or \
-                not isinstance(call.func, ast.Attribute):
+        if not self._call_has_no_star_args(call) or \
+           not isinstance(call.func, ast.Attribute):
             return False
         attr_lookup = call.func
         assert isinstance(attr_lookup, ast.Attribute)

pypy/interpreter/baseobjspace.py

         raise NotImplementedError("only for interp-level user subclasses "
                                   "from typedef.py")
 
-    def getname(self, space, default='?'):
+    def getname(self, space):
         try:
             return space.str_w(space.getattr(self, space.wrap('__name__')))
         except OperationError, e:
             if e.match(space, space.w_TypeError) or e.match(space, space.w_AttributeError):
-                return default
+                return '?'
             raise
 
     def getaddrstring(self, space):

pypy/interpreter/buffer.py

 from pypy.interpreter.typedef import TypeDef
 from pypy.interpreter.gateway import interp2app, unwrap_spec
 from pypy.interpreter.error import OperationError
-from rpython.rlib.objectmodel import compute_hash
+from rpython.rlib.objectmodel import compute_hash, import_from_mixin
 from rpython.rlib.rstring import StringBuilder
 
 
 # ____________________________________________________________
 
 class SubBufferMixin(object):
-    _mixin_ = True
-
     def __init__(self, buffer, offset, size):
         self.buffer = buffer
         self.offset = offset
                           # out of bounds
         return self.buffer.getslice(self.offset + start, self.offset + stop, step, size)
 
-class SubBuffer(SubBufferMixin, Buffer):
-    pass
+class SubBuffer(Buffer):
+    import_from_mixin(SubBufferMixin)
 
-class RWSubBuffer(SubBufferMixin, RWBuffer):
+class RWSubBuffer(RWBuffer):
+    import_from_mixin(SubBufferMixin)
 
     def setitem(self, index, char):
         self.buffer.setitem(self.offset + index, char)

pypy/interpreter/function.py

                 space.abstract_isinstance_w(w_firstarg, self.w_class)):
             pass  # ok
         else:
-            clsdescr = self.w_class.getname(space, "")
-            if clsdescr:
+            clsdescr = self.w_class.getname(space)
+            if clsdescr and clsdescr != '?':
                 clsdescr += " instance"
             else:
                 clsdescr = "instance"
             if w_firstarg is None:
                 instdescr = "nothing"
             else:
-                instname = space.abstract_getclass(w_firstarg).getname(space,
-                                                                       "")
-                if instname:
+                instname = space.abstract_getclass(w_firstarg).getname(space)
+                if instname and instname != '?':
                     instdescr = instname + " instance"
                 else:
                     instdescr = "instance"

pypy/interpreter/generator.py

File contents unchanged.

pypy/interpreter/pycode.py

                         tuple(self.co_cellvars))
 
     def exec_host_bytecode(self, w_globals, w_locals):
-        from pypy.interpreter.pyframe import CPythonFrame
-        frame = CPythonFrame(self.space, self, w_globals, None)
+        if sys.version_info < (2, 7):
+            raise Exception("PyPy no longer supports Python 2.6 or lower")
+        from pypy.interpreter.pyframe import PyFrame
+        frame = self.space.FrameClass(self.space, self, w_globals, None)
         frame.setdictscope(w_locals)
         return frame.run()
 

pypy/interpreter/pyframe.py

 
     def __init__(self, space, code, w_globals, outer_func):
         if not we_are_translated():
-            assert type(self) in (space.FrameClass, CPythonFrame), (
+            assert type(self) == space.FrameClass, (
                 "use space.FrameClass(), not directly PyFrame()")
         self = hint(self, access_directly=True, fresh_virtualizable=True)
         assert isinstance(code, pycode.PyCode)
         # CO_OPTIMIZED: no locals dict needed at all
         # NB: this method is overridden in nestedscope.py
         flags = code.co_flags
-        if flags & pycode.CO_OPTIMIZED: 
-            return 
+        if flags & pycode.CO_OPTIMIZED:
+            return
         if flags & pycode.CO_NEWLOCALS:
             self.w_locals = self.space.newdict(module=True)
         else:
                 executioncontext.return_trace(self, self.space.w_None)
                 raise
             executioncontext.return_trace(self, w_exitvalue)
-            # clean up the exception, might be useful for not
-            # allocating exception objects in some cases
-            self.last_exception = None
+            # it used to say self.last_exception = None
+            # this is now done by the code in pypyjit module
+            # since we don't want to invalidate the virtualizable
+            # for no good reason
             got_exception = False
         finally:
             executioncontext.leave(self, w_exitvalue, got_exception)
                 break
             w_value = self.peekvalue(delta)
             self.pushvalue(w_value)
-        
+
     def peekvalue(self, index_from_top=0):
         # NOTE: top of the stack is peekvalue(0).
         # Contrast this with CPython where it's PEEK(-1).
         nlocals = self.pycode.co_nlocals
         values_w = self.locals_stack_w[nlocals:self.valuestackdepth]
         w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w)
-        
+
         w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()])
         w_fastlocals = maker.slp_into_tuple_with_nulls(
             space, self.locals_stack_w[:nlocals])
         else:
             w_exc_value = self.last_exception.get_w_value(space)
             w_tb = w(self.last_exception.get_traceback())
-        
+
         tup_state = [
             w(self.f_backref()),
             w(self.get_builtin()),
             w(f_lineno),
             w_fastlocals,
             space.w_None,           #XXX placeholder for f_locals
-            
+
             #f_restricted requires no additional data!
             space.w_None, ## self.w_f_trace,  ignore for now
 
             ncellvars = len(pycode.co_cellvars)
             cellvars = cells[:ncellvars]
             closure = cells[ncellvars:]
-        
+
         # do not use the instance's __init__ but the base's, because we set
         # everything like cells from here
         # XXX hack
 
     ### line numbers ###
 
-    def fget_f_lineno(self, space): 
+    def fget_f_lineno(self, space):
         "Returns the line number of the instruction currently being executed."
         if self.w_f_trace is None:
             return space.wrap(self.get_last_lineno())
         except OperationError, e:
             raise OperationError(space.w_ValueError,
                                  space.wrap("lineno must be an integer"))
-            
+
         if self.w_f_trace is None:
             raise OperationError(space.w_ValueError,
                   space.wrap("f_lineno can only be set by a trace function."))
         if ord(code[new_lasti]) in (DUP_TOP, POP_TOP):
             raise OperationError(space.w_ValueError,
                   space.wrap("can't jump to 'except' line as there's no exception"))
-            
+
         # Don't jump into or out of a finally block.
         f_lasti_setup_addr = -1
         new_lasti_setup_addr = -1
                         if addr == self.last_instr:
                             f_lasti_setup_addr = setup_addr
                         break
-                    
+
             if op >= HAVE_ARGUMENT:
                 addr += 3
             else:
                 addr += 1
-                
+
         assert len(blockstack) == 0
 
         if new_lasti_setup_addr != f_lasti_setup_addr:
             block = self.pop_block()
             block.cleanup(self)
             f_iblock -= 1
-            
+
         self.f_lineno = new_lineno
         self.last_instr = new_lasti
-            
+
     def get_last_lineno(self):
         "Returns the line number of the instruction currently being executed."
         return pytraceback.offset2lineno(self.pycode, self.last_instr)
             self.f_lineno = self.get_last_lineno()
             space.frame_trace_action.fire()
 
-    def fdel_f_trace(self, space): 
-        self.w_f_trace = None 
+    def fdel_f_trace(self, space):
+        self.w_f_trace = None
 
     def fget_f_exc_type(self, space):
         if self.last_exception is not None:
             if f is not None:
                 return f.last_exception.w_type
         return space.w_None
-         
+
     def fget_f_exc_value(self, space):
         if self.last_exception is not None:
             f = self.f_backref()
             if f is not None:
                 return space.wrap(f.last_exception.get_traceback())
         return space.w_None
-         
+
     def fget_f_restricted(self, space):
         if space.config.objspace.honor__builtins__:
             return space.wrap(self.builtin is not space.builtin)
         return space.w_False
 
-class CPythonFrame(PyFrame):
-    """
-    Execution of host (CPython) opcodes.
-    """
-
-    bytecode_spec = host_bytecode_spec
-    opcode_method_names = host_bytecode_spec.method_names
-    opcodedesc = host_bytecode_spec.opcodedesc
-    opdescmap = host_bytecode_spec.opdescmap
-    HAVE_ARGUMENT = host_bytecode_spec.HAVE_ARGUMENT
-
 
 # ____________________________________________________________
 

pypy/interpreter/pyopcode.py

 from rpython.rlib.objectmodel import we_are_translated
 from rpython.rlib import jit, rstackovf
 from rpython.rlib.rarithmetic import r_uint, intmask
-from rpython.rlib.unroll import unrolling_iterable
 from rpython.rlib.debug import check_nonneg
-from pypy.tool.stdlib_opcode import (bytecode_spec,
-                                     unrolling_all_opcode_descs)
+from pypy.tool.stdlib_opcode import bytecode_spec
 
 def unaryoperation(operationname):
     """NOT_RPYTHON"""
 
     return func_with_new_name(opimpl, "opcode_impl_for_%s" % operationname)