Commits

mattip  committed 5c09a84 Merge

merge with default, still waiting for review

  • Participants
  • Parent commits 6e5f01f, f1509ba
  • Branches numpypy-out

Comments (0)

Files changed (71)

File lib-python/modified-2.7/ctypes/test/test_arrays.py

 import unittest
 from ctypes import *
+from test.test_support import impl_detail
 
 formats = "bBhHiIlLqQfd"
 
+# c_longdouble commented out for PyPy, look at the commend in test_longdouble
 formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
-          c_long, c_ulonglong, c_float, c_double, c_longdouble
+          c_long, c_ulonglong, c_float, c_double #, c_longdouble
 
 class ArrayTestCase(unittest.TestCase):
+
+    @impl_detail('long double not supported by PyPy', pypy=False)
+    def test_longdouble(self):
+        """
+        This test is empty. It's just here to remind that we commented out
+        c_longdouble in "formats". If pypy will ever supports c_longdouble, we
+        should kill this test and uncomment c_longdouble inside formats.
+        """
+
     def test_simple(self):
         # create classes holding simple numeric types, and check
         # various properties.

File lib_pypy/cPickle.py

 # One-liner implementation of cPickle
 #
 
-from pickle import *
+from pickle import Pickler, dump, dumps, PickleError, PicklingError, UnpicklingError, _EmptyClass
 from pickle import __doc__, __version__, format_version, compatible_formats
+from types import *
+from copy_reg import dispatch_table
+from copy_reg import _extension_registry, _inverted_registry, _extension_cache
+import marshal, struct, sys
 
 try: from __pypy__ import builtinify
 except ImportError: builtinify = lambda f: f
 
+# These are purely informational; no code uses these.
+format_version = "2.0"                  # File format version we write
+compatible_formats = ["1.0",            # Original protocol 0
+                      "1.1",            # Protocol 0 with INST added
+                      "1.2",            # Original protocol 1
+                      "1.3",            # Protocol 1 with BINFLOAT added
+                      "2.0",            # Protocol 2
+                      ]                 # Old format versions we can read
+
+# Keep in synch with cPickle.  This is the highest protocol number we
+# know how to read.
+HIGHEST_PROTOCOL = 2
 
 BadPickleGet = KeyError
 UnpickleableError = PicklingError
 
+MARK            = ord('(')   # push special markobject on stack
+STOP            = ord('.')   # every pickle ends with STOP
+POP             = ord('0')   # discard topmost stack item
+POP_MARK        = ord('1')   # discard stack top through topmost markobject
+DUP             = ord('2')   # duplicate top stack item
+FLOAT           = ord('F')   # push float object; decimal string argument
+INT             = ord('I')   # push integer or bool; decimal string argument
+BININT          = ord('J')   # push four-byte signed int
+BININT1         = ord('K')   # push 1-byte unsigned int
+LONG            = ord('L')   # push long; decimal string argument
+BININT2         = ord('M')   # push 2-byte unsigned int
+NONE            = ord('N')   # push None
+PERSID          = ord('P')   # push persistent object; id is taken from string arg
+BINPERSID       = ord('Q')   #  "       "         "  ;  "  "   "     "  stack
+REDUCE          = ord('R')   # apply callable to argtuple, both on stack
+STRING          = ord('S')   # push string; NL-terminated string argument
+BINSTRING       = ord('T')   # push string; counted binary string argument
+SHORT_BINSTRING = ord('U')   #  "     "   ;    "      "       "      " < 256 bytes
+UNICODE         = ord('V')   # push Unicode string; raw-unicode-escaped'd argument
+BINUNICODE      = ord('X')   #   "     "       "  ; counted UTF-8 string argument
+APPEND          = ord('a')   # append stack top to list below it
+BUILD           = ord('b')   # call __setstate__ or __dict__.update()
+GLOBAL          = ord('c')   # push self.find_class(modname, name); 2 string args
+DICT            = ord('d')   # build a dict from stack items
+EMPTY_DICT      = ord('}')   # push empty dict
+APPENDS         = ord('e')   # extend list on stack by topmost stack slice
+GET             = ord('g')   # push item from memo on stack; index is string arg
+BINGET          = ord('h')   #   "    "    "    "   "   "  ;   "    " 1-byte arg
+INST            = ord('i')   # build & push class instance
+LONG_BINGET     = ord('j')   # push item from memo on stack; index is 4-byte arg
+LIST            = ord('l')   # build list from topmost stack items
+EMPTY_LIST      = ord(']')   # push empty list
+OBJ             = ord('o')   # build & push class instance
+PUT             = ord('p')   # store stack top in memo; index is string arg
+BINPUT          = ord('q')   #   "     "    "   "   " ;   "    " 1-byte arg
+LONG_BINPUT     = ord('r')   #   "     "    "   "   " ;   "    " 4-byte arg
+SETITEM         = ord('s')   # add key+value pair to dict
+TUPLE           = ord('t')   # build tuple from topmost stack items
+EMPTY_TUPLE     = ord(')')   # push empty tuple
+SETITEMS        = ord('u')   # modify dict by adding topmost key+value pairs
+BINFLOAT        = ord('G')   # push float; arg is 8-byte float encoding
+
+TRUE            = 'I01\n'  # not an opcode; see INT docs in pickletools.py
+FALSE           = 'I00\n'  # not an opcode; see INT docs in pickletools.py
+
+# Protocol 2
+
+PROTO           = ord('\x80')  # identify pickle protocol
+NEWOBJ          = ord('\x81')  # build object by applying cls.__new__ to argtuple
+EXT1            = ord('\x82')  # push object from extension registry; 1-byte index
+EXT2            = ord('\x83')  # ditto, but 2-byte index
+EXT4            = ord('\x84')  # ditto, but 4-byte index
+TUPLE1          = ord('\x85')  # build 1-tuple from stack top
+TUPLE2          = ord('\x86')  # build 2-tuple from two topmost stack items
+TUPLE3          = ord('\x87')  # build 3-tuple from three topmost stack items
+NEWTRUE         = ord('\x88')  # push True
+NEWFALSE        = ord('\x89')  # push False
+LONG1           = ord('\x8a')  # push long from < 256 bytes
+LONG4           = ord('\x8b')  # push really big long
+
+_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
+
+
 # ____________________________________________________________
 # XXX some temporary dark magic to produce pickled dumps that are
 #     closer to the ones produced by cPickle in CPython
     file = StringIO()
     Pickler(file, protocol).dump(obj)
     return file.getvalue()
+
+# Why use struct.pack() for pickling but marshal.loads() for
+# unpickling?  struct.pack() is 40% faster than marshal.dumps(), but
+# marshal.loads() is twice as fast as struct.unpack()!
+mloads = marshal.loads
+
+# Unpickling machinery
+
+class Unpickler(object):
+
+    def __init__(self, file):
+        """This takes a file-like object for reading a pickle data stream.
+
+        The protocol version of the pickle is detected automatically, so no
+        proto argument is needed.
+
+        The file-like object must have two methods, a read() method that
+        takes an integer argument, and a readline() method that requires no
+        arguments.  Both methods should return a string.  Thus file-like
+        object can be a file object opened for reading, a StringIO object,
+        or any other custom object that meets this interface.
+        """
+        self.readline = file.readline
+        self.read = file.read
+        self.memo = {}
+
+    def load(self):
+        """Read a pickled object representation from the open file.
+
+        Return the reconstituted object hierarchy specified in the file.
+        """
+        self.mark = object() # any new unique object
+        self.stack = []
+        self.append = self.stack.append
+        try:
+            key = ord(self.read(1))
+            while key != STOP:
+                self.dispatch[key](self)
+                key = ord(self.read(1))
+        except TypeError:
+            if self.read(1) == '':
+                raise EOFError
+            raise
+        return self.stack.pop()
+
+    # Return largest index k such that self.stack[k] is self.mark.
+    # If the stack doesn't contain a mark, eventually raises IndexError.
+    # This could be sped by maintaining another stack, of indices at which
+    # the mark appears.  For that matter, the latter stack would suffice,
+    # and we wouldn't need to push mark objects on self.stack at all.
+    # Doing so is probably a good thing, though, since if the pickle is
+    # corrupt (or hostile) we may get a clue from finding self.mark embedded
+    # in unpickled objects.
+    def marker(self):
+        k = len(self.stack)-1
+        while self.stack[k] is not self.mark: k -= 1
+        return k
+
+    dispatch = {}
+
+    def load_proto(self):
+        proto = ord(self.read(1))
+        if not 0 <= proto <= 2:
+            raise ValueError, "unsupported pickle protocol: %d" % proto
+    dispatch[PROTO] = load_proto
+
+    def load_persid(self):
+        pid = self.readline()[:-1]
+        self.append(self.persistent_load(pid))
+    dispatch[PERSID] = load_persid
+
+    def load_binpersid(self):
+        pid = self.stack.pop()
+        self.append(self.persistent_load(pid))
+    dispatch[BINPERSID] = load_binpersid
+
+    def load_none(self):
+        self.append(None)
+    dispatch[NONE] = load_none
+
+    def load_false(self):
+        self.append(False)
+    dispatch[NEWFALSE] = load_false
+
+    def load_true(self):
+        self.append(True)
+    dispatch[NEWTRUE] = load_true
+
+    def load_int(self):
+        data = self.readline()
+        if data == FALSE[1:]:
+            val = False
+        elif data == TRUE[1:]:
+            val = True
+        else:
+            try:
+                val = int(data)
+            except ValueError:
+                val = long(data)
+        self.append(val)
+    dispatch[INT] = load_int
+
+    def load_binint(self):
+        self.append(mloads('i' + self.read(4)))
+    dispatch[BININT] = load_binint
+
+    def load_binint1(self):
+        self.append(ord(self.read(1)))
+    dispatch[BININT1] = load_binint1
+
+    def load_binint2(self):
+        self.append(mloads('i' + self.read(2) + '\000\000'))
+    dispatch[BININT2] = load_binint2
+
+    def load_long(self):
+        self.append(long(self.readline()[:-1], 0))
+    dispatch[LONG] = load_long
+
+    def load_long1(self):
+        n = ord(self.read(1))
+        bytes = self.read(n)
+        self.append(decode_long(bytes))
+    dispatch[LONG1] = load_long1
+
+    def load_long4(self):
+        n = mloads('i' + self.read(4))
+        bytes = self.read(n)
+        self.append(decode_long(bytes))
+    dispatch[LONG4] = load_long4
+
+    def load_float(self):
+        self.append(float(self.readline()[:-1]))
+    dispatch[FLOAT] = load_float
+
+    def load_binfloat(self, unpack=struct.unpack):
+        self.append(unpack('>d', self.read(8))[0])
+    dispatch[BINFLOAT] = load_binfloat
+
+    def load_string(self):
+        rep = self.readline()
+        if len(rep) < 3:
+            raise ValueError, "insecure string pickle"
+        if rep[0] == "'" == rep[-2]:
+            rep = rep[1:-2]
+        elif rep[0] == '"' == rep[-2]:
+            rep = rep[1:-2]
+        else:
+            raise ValueError, "insecure string pickle"
+        self.append(rep.decode("string-escape"))
+    dispatch[STRING] = load_string
+
+    def load_binstring(self):
+        L = mloads('i' + self.read(4))
+        self.append(self.read(L))
+    dispatch[BINSTRING] = load_binstring
+
+    def load_unicode(self):
+        self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
+    dispatch[UNICODE] = load_unicode
+
+    def load_binunicode(self):
+        L = mloads('i' + self.read(4))
+        self.append(unicode(self.read(L),'utf-8'))
+    dispatch[BINUNICODE] = load_binunicode
+
+    def load_short_binstring(self):
+        L = ord(self.read(1))
+        self.append(self.read(L))
+    dispatch[SHORT_BINSTRING] = load_short_binstring
+
+    def load_tuple(self):
+        k = self.marker()
+        self.stack[k:] = [tuple(self.stack[k+1:])]
+    dispatch[TUPLE] = load_tuple
+
+    def load_empty_tuple(self):
+        self.stack.append(())
+    dispatch[EMPTY_TUPLE] = load_empty_tuple
+
+    def load_tuple1(self):
+        self.stack[-1] = (self.stack[-1],)
+    dispatch[TUPLE1] = load_tuple1
+
+    def load_tuple2(self):
+        self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
+    dispatch[TUPLE2] = load_tuple2
+
+    def load_tuple3(self):
+        self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
+    dispatch[TUPLE3] = load_tuple3
+
+    def load_empty_list(self):
+        self.stack.append([])
+    dispatch[EMPTY_LIST] = load_empty_list
+
+    def load_empty_dictionary(self):
+        self.stack.append({})
+    dispatch[EMPTY_DICT] = load_empty_dictionary
+
+    def load_list(self):
+        k = self.marker()
+        self.stack[k:] = [self.stack[k+1:]]
+    dispatch[LIST] = load_list
+
+    def load_dict(self):
+        k = self.marker()
+        d = {}
+        items = self.stack[k+1:]
+        for i in range(0, len(items), 2):
+            key = items[i]
+            value = items[i+1]
+            d[key] = value
+        self.stack[k:] = [d]
+    dispatch[DICT] = load_dict
+
+    # INST and OBJ differ only in how they get a class object.  It's not
+    # only sensible to do the rest in a common routine, the two routines
+    # previously diverged and grew different bugs.
+    # klass is the class to instantiate, and k points to the topmost mark
+    # object, following which are the arguments for klass.__init__.
+    def _instantiate(self, klass, k):
+        args = tuple(self.stack[k+1:])
+        del self.stack[k:]
+        instantiated = 0
+        if (not args and
+                type(klass) is ClassType and
+                not hasattr(klass, "__getinitargs__")):
+            try:
+                value = _EmptyClass()
+                value.__class__ = klass
+                instantiated = 1
+            except RuntimeError:
+                # In restricted execution, assignment to inst.__class__ is
+                # prohibited
+                pass
+        if not instantiated:
+            try:
+                value = klass(*args)
+            except TypeError, err:
+                raise TypeError, "in constructor for %s: %s" % (
+                    klass.__name__, str(err)), sys.exc_info()[2]
+        self.append(value)
+
+    def load_inst(self):
+        module = self.readline()[:-1]
+        name = self.readline()[:-1]
+        klass = self.find_class(module, name)
+        self._instantiate(klass, self.marker())
+    dispatch[INST] = load_inst
+
+    def load_obj(self):
+        # Stack is ... markobject classobject arg1 arg2 ...
+        k = self.marker()
+        klass = self.stack.pop(k+1)
+        self._instantiate(klass, k)
+    dispatch[OBJ] = load_obj
+
+    def load_newobj(self):
+        args = self.stack.pop()
+        cls = self.stack[-1]
+        obj = cls.__new__(cls, *args)
+        self.stack[-1] = obj
+    dispatch[NEWOBJ] = load_newobj
+
+    def load_global(self):
+        module = self.readline()[:-1]
+        name = self.readline()[:-1]
+        klass = self.find_class(module, name)
+        self.append(klass)
+    dispatch[GLOBAL] = load_global
+
+    def load_ext1(self):
+        code = ord(self.read(1))
+        self.get_extension(code)
+    dispatch[EXT1] = load_ext1
+
+    def load_ext2(self):
+        code = mloads('i' + self.read(2) + '\000\000')
+        self.get_extension(code)
+    dispatch[EXT2] = load_ext2
+
+    def load_ext4(self):
+        code = mloads('i' + self.read(4))
+        self.get_extension(code)
+    dispatch[EXT4] = load_ext4
+
+    def get_extension(self, code):
+        nil = []
+        obj = _extension_cache.get(code, nil)
+        if obj is not nil:
+            self.append(obj)
+            return
+        key = _inverted_registry.get(code)
+        if not key:
+            raise ValueError("unregistered extension code %d" % code)
+        obj = self.find_class(*key)
+        _extension_cache[code] = obj
+        self.append(obj)
+
+    def find_class(self, module, name):
+        # Subclasses may override this
+        __import__(module)
+        mod = sys.modules[module]
+        klass = getattr(mod, name)
+        return klass
+
+    def load_reduce(self):
+        args = self.stack.pop()
+        func = self.stack[-1]
+        value = self.stack[-1](*args)
+        self.stack[-1] = value
+    dispatch[REDUCE] = load_reduce
+
+    def load_pop(self):
+        del self.stack[-1]
+    dispatch[POP] = load_pop
+
+    def load_pop_mark(self):
+        k = self.marker()
+        del self.stack[k:]
+    dispatch[POP_MARK] = load_pop_mark
+
+    def load_dup(self):
+        self.append(self.stack[-1])
+    dispatch[DUP] = load_dup
+
+    def load_get(self):
+        self.append(self.memo[self.readline()[:-1]])
+    dispatch[GET] = load_get
+
+    def load_binget(self):
+        i = ord(self.read(1))
+        self.append(self.memo[repr(i)])
+    dispatch[BINGET] = load_binget
+
+    def load_long_binget(self):
+        i = mloads('i' + self.read(4))
+        self.append(self.memo[repr(i)])
+    dispatch[LONG_BINGET] = load_long_binget
+
+    def load_put(self):
+        self.memo[self.readline()[:-1]] = self.stack[-1]
+    dispatch[PUT] = load_put
+
+    def load_binput(self):
+        i = ord(self.read(1))
+        self.memo[repr(i)] = self.stack[-1]
+    dispatch[BINPUT] = load_binput
+
+    def load_long_binput(self):
+        i = mloads('i' + self.read(4))
+        self.memo[repr(i)] = self.stack[-1]
+    dispatch[LONG_BINPUT] = load_long_binput
+
+    def load_append(self):
+        value = self.stack.pop()
+        self.stack[-1].append(value)
+    dispatch[APPEND] = load_append
+
+    def load_appends(self):
+        stack = self.stack
+        mark = self.marker()
+        lst = stack[mark - 1]
+        lst.extend(stack[mark + 1:])
+        del stack[mark:]
+    dispatch[APPENDS] = load_appends
+
+    def load_setitem(self):
+        stack = self.stack
+        value = stack.pop()
+        key = stack.pop()
+        dict = stack[-1]
+        dict[key] = value
+    dispatch[SETITEM] = load_setitem
+
+    def load_setitems(self):
+        stack = self.stack
+        mark = self.marker()
+        dict = stack[mark - 1]
+        for i in range(mark + 1, len(stack), 2):
+            dict[stack[i]] = stack[i + 1]
+
+        del stack[mark:]
+    dispatch[SETITEMS] = load_setitems
+
+    def load_build(self):
+        stack = self.stack
+        state = stack.pop()
+        inst = stack[-1]
+        setstate = getattr(inst, "__setstate__", None)
+        if setstate:
+            setstate(state)
+            return
+        slotstate = None
+        if isinstance(state, tuple) and len(state) == 2:
+            state, slotstate = state
+        if state:
+            try:
+                d = inst.__dict__
+                try:
+                    for k, v in state.iteritems():
+                        d[intern(k)] = v
+                # keys in state don't have to be strings
+                # don't blow up, but don't go out of our way
+                except TypeError:
+                    d.update(state)
+
+            except RuntimeError:
+                # XXX In restricted execution, the instance's __dict__
+                # is not accessible.  Use the old way of unpickling
+                # the instance variables.  This is a semantic
+                # difference when unpickling in restricted
+                # vs. unrestricted modes.
+                # Note, however, that cPickle has never tried to do the
+                # .update() business, and always uses
+                #     PyObject_SetItem(inst.__dict__, key, value) in a
+                # loop over state.items().
+                for k, v in state.items():
+                    setattr(inst, k, v)
+        if slotstate:
+            for k, v in slotstate.items():
+                setattr(inst, k, v)
+    dispatch[BUILD] = load_build
+
+    def load_mark(self):
+        self.append(self.mark)
+    dispatch[MARK] = load_mark
+
+#from pickle import decode_long
+
+def decode_long(data):
+    r"""Decode a long from a two's complement little-endian binary string.
+
+    >>> decode_long('')
+    0L
+    >>> decode_long("\xff\x00")
+    255L
+    >>> decode_long("\xff\x7f")
+    32767L
+    >>> decode_long("\x00\xff")
+    -256L
+    >>> decode_long("\x00\x80")
+    -32768L
+    >>> decode_long("\x80")
+    -128L
+    >>> decode_long("\x7f")
+    127L
+    """
+
+    nbytes = len(data)
+    if nbytes == 0:
+        return 0L
+    ind = nbytes - 1
+    while ind and ord(data[ind]) == 0:
+        ind -= 1
+    n = ord(data[ind])
+    while ind:
+        n <<= 8
+        ind -= 1
+        if ord(data[ind]):
+            n += ord(data[ind])
+    if ord(data[nbytes - 1]) >= 128:
+        n -= 1L << (nbytes << 3)
+    return n
+
+def load(f):
+    return Unpickler(f).load()
+
+def loads(str):
+    f = StringIO(str)
+    return Unpickler(f).load()

File lib_pypy/datetime.py

     def __setstate(self, string):
         if len(string) != 4 or not (1 <= ord(string[2]) <= 12):
             raise TypeError("not enough arguments")
-        yhi, ylo, self._month, self._day = map(ord, string)
-        self._year = yhi * 256 + ylo
+        self._month, self._day = ord(string[2]), ord(string[3])
+        self._year = ord(string[0]) * 256 + ord(string[1])
 
     def __reduce__(self):
         return (self.__class__, self._getstate())
     def __setstate(self, string, tzinfo):
         if len(string) != 6 or ord(string[0]) >= 24:
             raise TypeError("an integer is required")
-        self._hour, self._minute, self._second, us1, us2, us3 = \
-                                                            map(ord, string)
-        self._microsecond = (((us1 << 8) | us2) << 8) | us3
+        self._hour, self._minute, self._second = ord(string[0]), \
+                                                 ord(string[1]), ord(string[2])
+        self._microsecond = (((ord(string[3]) << 8) | \
+                            ord(string[4])) << 8) | ord(string[5])
         self._tzinfo = tzinfo
 
     def __reduce__(self):
             return (basestate, self._tzinfo)
 
     def __setstate(self, string, tzinfo):
-        (yhi, ylo, self._month, self._day, self._hour,
-         self._minute, self._second, us1, us2, us3) = map(ord, string)
-        self._year = yhi * 256 + ylo
-        self._microsecond = (((us1 << 8) | us2) << 8) | us3
+        (self._month, self._day, self._hour, self._minute,
+            self._second) = (ord(string[2]), ord(string[3]), ord(string[4]),
+                             ord(string[5]), ord(string[6]))
+        self._year = ord(string[0]) * 256 + ord(string[1])
+        self._microsecond = (((ord(string[7]) << 8) | ord(string[8])) << 8) | ord(string[9])
         self._tzinfo = tzinfo
 
     def __reduce__(self):

File pypy/config/pypyoption.py

                and not p.basename.startswith('test')]
 
 essential_modules = dict.fromkeys(
-    ["exceptions", "_file", "sys", "__builtin__", "posix"]
+    ["exceptions", "_file", "sys", "__builtin__", "posix", "_warnings"]
 )
 
 default_modules = essential_modules.copy()

File pypy/interpreter/baseobjspace.py

 
     def warn(self, msg, w_warningcls):
         self.appexec([self.wrap(msg), w_warningcls], """(msg, warningcls):
-            import warnings
-            warnings.warn(msg, warningcls, stacklevel=2)
+            import _warnings
+            _warnings.warn(msg, warningcls, stacklevel=2)
         """)
 
     def resolve_target(self, w_obj):

File pypy/interpreter/pyparser/parsestring.py

 from pypy.interpreter.error import OperationError
 from pypy.interpreter import unicodehelper
+from pypy.rlib.rstring import StringBuilder
 
 def parsestr(space, encoding, s, unicode_literals=False):
     # compiler.transformer.Transformer.decode_literal depends on what 
     the string is UTF-8 encoded and should be re-encoded in the
     specified encoding.
     """
-    lis = []
+    builder = StringBuilder(len(s))
     ps = 0
     end = len(s)
-    while ps < end:
-        if s[ps] != '\\':
-            # note that the C code has a label here.
-            # the logic is the same.
+    while 1:
+        ps2 = ps
+        while ps < end and s[ps] != '\\':
             if recode_encoding and ord(s[ps]) & 0x80:
                 w, ps = decode_utf8(space, s, ps, end, recode_encoding)
-                # Append bytes to output buffer.
-                lis.append(w)
+                builder.append(w)
+                ps2 = ps
             else:
-                lis.append(s[ps])
                 ps += 1
-            continue
+        if ps > ps2:
+            builder.append_slice(s, ps2, ps)
+        if ps == end:
+            break
+
         ps += 1
         if ps == end:
             raise_app_valueerror(space, 'Trailing \\ in string')
         if ch == '\n':
             pass
         elif ch == '\\':
-            lis.append('\\')
+            builder.append('\\')
         elif ch == "'":
-            lis.append("'")
+            builder.append("'")
         elif ch == '"':
-            lis.append('"')
+            builder.append('"')
         elif ch == 'b':
-            lis.append("\010")
+            builder.append("\010")
         elif ch == 'f':
-            lis.append('\014') # FF
+            builder.append('\014') # FF
         elif ch == 't':
-            lis.append('\t')
+            builder.append('\t')
         elif ch == 'n':
-            lis.append('\n')
+            builder.append('\n')
         elif ch == 'r':
-            lis.append('\r')
+            builder.append('\r')
         elif ch == 'v':
-            lis.append('\013') # VT
+            builder.append('\013') # VT
         elif ch == 'a':
-            lis.append('\007') # BEL, not classic C
+            builder.append('\007') # BEL, not classic C
         elif ch in '01234567':
             # Look for up to two more octal digits
             span = ps
             # emulate a strange wrap-around behavior of CPython:
             # \400 is the same as \000 because 0400 == 256
             num = int(octal, 8) & 0xFF
-            lis.append(chr(num))
+            builder.append(chr(num))
             ps = span
         elif ch == 'x':
             if ps+2 <= end and isxdigit(s[ps]) and isxdigit(s[ps + 1]):
                 hexa = s[ps : ps + 2]
                 num = int(hexa, 16)
-                lis.append(chr(num))
+                builder.append(chr(num))
                 ps += 2
             else:
                 raise_app_valueerror(space, 'invalid \\x escape')
             # this was not an escape, so the backslash
             # has to be added, and we start over in
             # non-escape mode.
-            lis.append('\\')
+            builder.append('\\')
             ps -= 1
             assert ps >= 0
             continue
             # an arbitry number of unescaped UTF-8 bytes may follow.
 
-    buf = ''.join(lis)
+    buf = builder.build()
     return buf
 
 

File pypy/interpreter/streamutil.py

+from pypy.rlib.streamio import StreamError
+from pypy.interpreter.error import OperationError, wrap_oserror2
+
+def wrap_streamerror(space, e, w_filename=None):
+    if isinstance(e, StreamError):
+        return OperationError(space.w_ValueError,
+                              space.wrap(e.message))
+    elif isinstance(e, OSError):
+        return wrap_oserror_as_ioerror(space, e, w_filename)
+    else:
+        # should not happen: wrap_streamerror() is only called when
+        # StreamErrors = (OSError, StreamError) are raised
+        return OperationError(space.w_IOError, space.w_None)
+
+def wrap_oserror_as_ioerror(space, e, w_filename=None):
+    return wrap_oserror2(space, e, w_filename,
+                         w_exception_class=space.w_IOError)

File pypy/interpreter/test/test_typedef.py

         assert_method(w_o1, "c", True)
         assert_method(w_o2, "c", False)
 
+    def test_total_ordering(self):
+        class W_SomeType(Wrappable):
+            def __init__(self, space, x):
+                self.space = space
+                self.x = x
+
+            def descr__lt(self, w_other):
+                assert isinstance(w_other, W_SomeType)
+                return self.space.wrap(self.x < w_other.x)
+
+            def descr__eq(self, w_other):
+                assert isinstance(w_other, W_SomeType)
+                return self.space.wrap(self.x == w_other.x)
+
+        W_SomeType.typedef = typedef.TypeDef(
+            'some_type',
+            __total_ordering__ = 'auto',
+            __lt__ = interp2app(W_SomeType.descr__lt),
+            __eq__ = interp2app(W_SomeType.descr__eq),
+            )
+        space = self.space
+        w_b = space.wrap(W_SomeType(space, 2))
+        w_c = space.wrap(W_SomeType(space, 2))
+        w_a = space.wrap(W_SomeType(space, 1))
+        # explicitly defined
+        assert space.is_true(space.lt(w_a, w_b))
+        assert not space.is_true(space.eq(w_a, w_b))
+        assert space.is_true(space.eq(w_b, w_c))
+        # automatically defined
+        assert space.is_true(space.le(w_a, w_b))
+        assert space.is_true(space.le(w_b, w_c))
+        assert space.is_true(space.gt(w_b, w_a))
+        assert space.is_true(space.ge(w_b, w_a))
+        assert space.is_true(space.ge(w_b, w_c))
+        assert space.is_true(space.ne(w_a, w_b))
+        assert not space.is_true(space.ne(w_b, w_c))
 
 class AppTestTypeDef:
 

File pypy/interpreter/typedef.py

 from pypy.rlib.jit import promote
 
 class TypeDef:
-    def __init__(self, __name, __base=None, **rawdict):
+    def __init__(self, __name, __base=None, __total_ordering__=None, **rawdict):
         "NOT_RPYTHON: initialization-time only"
         self.name = __name
         if __base is None:
         # xxx used by faking
         self.fakedcpytype = None
         self.add_entries(**rawdict)
+        assert __total_ordering__ in (None, 'auto'), "Unknown value for __total_ordering"
+        if __total_ordering__ == 'auto':
+            self.auto_total_ordering()
     
     def add_entries(self, **rawdict):
         # xxx fix the names of the methods to match what app-level expects
             if isinstance(value, (interp2app, GetSetProperty)):
                 value.name = key
         self.rawdict.update(rawdict)
-    
+
+    def auto_total_ordering(self):
+        assert '__lt__' in self.rawdict, "__total_ordering='auto' requires __lt__"
+        assert '__eq__' in self.rawdict, "__total_ordering='auto' requires __eq__"
+        self.add_entries(__le__ = auto__le__,
+                         __gt__ = auto__gt__,
+                         __ge__ = auto__ge__,
+                         __ne__ = auto__ne__)
+
     def _freeze_(self):
         # hint for the annotator: track individual constant instances of TypeDef
         return True
         return "<%s name=%r>" % (self.__class__.__name__, self.name)
 
 
+# generic special cmp methods defined on top of __lt__ and __eq__, used by
+# automatic total ordering
+
+@interp2app
+def auto__le__(space, w_self, w_other):
+    return space.not_(space.lt(w_other, w_self))
+
+@interp2app
+def auto__gt__(space, w_self, w_other):
+    return space.lt(w_other, w_self)
+
+@interp2app
+def auto__ge__(space, w_self, w_other):
+    return space.not_(space.lt(w_self, w_other))
+
+@interp2app
+def auto__ne__(space, w_self, w_other):
+    return space.not_(space.eq(w_self, w_other))
+
+
 # ____________________________________________________________
 #  Hash support
 

File pypy/jit/backend/llgraph/llimpl.py

     'unicodesetitem'  : (('ref', 'int', 'int'), 'int'),
     'cast_ptr_to_int' : (('ref',), 'int'),
     'cast_int_to_ptr' : (('int',), 'ref'),
-    'debug_merge_point': (('ref', 'int'), None),
+    'debug_merge_point': (('ref', 'int', 'int'), None),
     'force_token'     : ((), 'int'),
     'call_may_force'  : (('int', 'varargs'), 'intorptr'),
     'guard_not_forced': ((), None),

File pypy/jit/backend/llsupport/gc.py

     This is the class supporting --gcrootfinder=asmgcc.
     """
     is_shadow_stack = False
+    is_64_bit = (WORD == 8)
 
     LOC_REG       = 0
     LOC_ESP_PLUS  = 1
             self._gcmap_deadentries += 1
             item += asmgcroot.arrayitemsize
 
-    def get_basic_shape(self, is_64_bit=False):
+    def get_basic_shape(self):
         # XXX: Should this code even really know about stack frame layout of
         # the JIT?
-        if is_64_bit:
-            return [chr(self.LOC_EBP_PLUS  | 8),
-                    chr(self.LOC_EBP_MINUS | 8),
-                    chr(self.LOC_EBP_MINUS | 16),
-                    chr(self.LOC_EBP_MINUS | 24),
-                    chr(self.LOC_EBP_MINUS | 32),
-                    chr(self.LOC_EBP_MINUS | 40),
-                    chr(self.LOC_EBP_PLUS  | 0),
+        if self.is_64_bit:
+            return [chr(self.LOC_EBP_PLUS  | 4),    # return addr: at   8(%rbp)
+                    chr(self.LOC_EBP_MINUS | 4),    # saved %rbx:  at  -8(%rbp)
+                    chr(self.LOC_EBP_MINUS | 8),    # saved %r12:  at -16(%rbp)
+                    chr(self.LOC_EBP_MINUS | 12),   # saved %r13:  at -24(%rbp)
+                    chr(self.LOC_EBP_MINUS | 16),   # saved %r14:  at -32(%rbp)
+                    chr(self.LOC_EBP_MINUS | 20),   # saved %r15:  at -40(%rbp)
+                    chr(self.LOC_EBP_PLUS  | 0),    # saved %rbp:  at    (%rbp)
                     chr(0)]
         else:
             return [chr(self.LOC_EBP_PLUS  | 4),    # return addr: at   4(%ebp)
         shape.append(chr(number | flag))
 
     def add_frame_offset(self, shape, offset):
-        assert (offset & 3) == 0
+        if self.is_64_bit:
+            assert (offset & 7) == 0
+            offset >>= 1
+        else:
+            assert (offset & 3) == 0
         if offset >= 0:
             num = self.LOC_EBP_PLUS | offset
         else:
     def initialize(self):
         pass
 
-    def get_basic_shape(self, is_64_bit=False):
+    def get_basic_shape(self):
         return []
 
     def add_frame_offset(self, shape, offset):
         self.generate_function('malloc_unicode', malloc_unicode,
                                [lltype.Signed])
 
-        # Rarely called: allocate a fixed-size amount of bytes, but
-        # not in the nursery, because it is too big.  Implemented like
-        # malloc_nursery_slowpath() above.
-        self.generate_function('malloc_fixedsize', malloc_nursery_slowpath,
-                               [lltype.Signed])
+        # Never called as far as I can tell, but there for completeness:
+        # allocate a fixed-size object, but not in the nursery, because
+        # it is too big.
+        def malloc_big_fixedsize(size, tid):
+            if self.DEBUG:
+                self._random_usage_of_xmm_registers()
+            type_id = llop.extract_ushort(llgroup.HALFWORD, tid)
+            check_typeid(type_id)
+            return llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
+                                                   type_id, size,
+                                                   False, False, False)
+        self.generate_function('malloc_big_fixedsize', malloc_big_fixedsize,
+                               [lltype.Signed] * 2)
 
     def _bh_malloc(self, sizedescr):
         from pypy.rpython.memory.gctypelayout import check_typeid

File pypy/jit/backend/llsupport/rewrite.py

     def handle_new_fixedsize(self, descr, op):
         assert isinstance(descr, SizeDescr)
         size = descr.size
-        self.gen_malloc_nursery(size, op.result)
-        self.gen_initialize_tid(op.result, descr.tid)
+        if self.gen_malloc_nursery(size, op.result):
+            self.gen_initialize_tid(op.result, descr.tid)
+        else:
+            self.gen_malloc_fixedsize(size, descr.tid, op.result)
 
     def handle_new_array(self, arraydescr, op):
         v_length = op.getarg(0)
                 pass    # total_size is still -1
         elif arraydescr.itemsize == 0:
             total_size = arraydescr.basesize
-        if 0 <= total_size <= 0xffffff:     # up to 16MB, arbitrarily
-            self.gen_malloc_nursery(total_size, op.result)
+        if (total_size >= 0 and
+                self.gen_malloc_nursery(total_size, op.result)):
             self.gen_initialize_tid(op.result, arraydescr.tid)
             self.gen_initialize_len(op.result, v_length, arraydescr.lendescr)
         elif self.gc_ll_descr.kind == 'boehm':
         # mark 'v_result' as freshly malloced
         self.recent_mallocs[v_result] = None
 
-    def gen_malloc_fixedsize(self, size, v_result):
-        """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)).
-        Note that with the framework GC, this should be called very rarely.
+    def gen_malloc_fixedsize(self, size, typeid, v_result):
+        """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...).
+        Used on Boehm, and on the framework GC for large fixed-size
+        mallocs.  (For all I know this latter case never occurs in
+        practice, but better safe than sorry.)
         """
-        addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize')
-        self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result,
-                                 self.gc_ll_descr.malloc_fixedsize_descr)
+        if self.gc_ll_descr.fielddescr_tid is not None:  # framework GC
+            assert (size & (WORD-1)) == 0, "size not aligned?"
+            addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize')
+            args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)]
+            descr = self.gc_ll_descr.malloc_big_fixedsize_descr
+        else:                                            # Boehm
+            addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize')
+            args = [ConstInt(addr), ConstInt(size)]
+            descr = self.gc_ll_descr.malloc_fixedsize_descr
+        self._gen_call_malloc_gc(args, v_result, descr)
 
     def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result):
         """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm."""
         """
         size = self.round_up_for_allocation(size)
         if not self.gc_ll_descr.can_use_nursery_malloc(size):
-            self.gen_malloc_fixedsize(size, v_result)
-            return
+            return False
         #
         op = None
         if self._op_malloc_nursery is not None:
         self._previous_size = size
         self._v_last_malloced_nursery = v_result
         self.recent_mallocs[v_result] = None
+        return True
 
     def gen_initialize_tid(self, v_newgcobj, tid):
         if self.gc_ll_descr.fielddescr_tid is not None:

File pypy/jit/backend/llsupport/test/test_gc.py

         def frame_pos(n):
             return -4*(4+n)
         gcrootmap = GcRootMap_asmgcc()
+        gcrootmap.is_64_bit = False
         num1 = frame_pos(-5)
         num1a = num1|2
         num2 = frame_pos(55)

File pypy/jit/backend/llsupport/test/test_rewrite.py

             jump()
         """, """
             []
-            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
-                                %(adescr.basesize + 10 * adescr.itemsize)d, \
-                                descr=malloc_fixedsize_descr)
-            setfield_gc(p0, 10, descr=alendescr)
+            p0 = call_malloc_gc(ConstClass(malloc_array),   \
+                                %(adescr.basesize)d,        \
+                                10,                         \
+                                %(adescr.itemsize)d,        \
+                                %(adescr.lendescr.offset)d, \
+                                descr=malloc_array_descr)
             jump()
         """)
+##      should ideally be:
+##            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
+##                                %(adescr.basesize + 10 * adescr.itemsize)d, \
+##                                descr=malloc_fixedsize_descr)
+##            setfield_gc(p0, 10, descr=alendescr)
 
     def test_new_array_variable(self):
         self.check_rewrite("""
             jump()
         """, """
             [i1]
-            p0 = call_malloc_gc(ConstClass(malloc_fixedsize),   \
-                                %(unicodedescr.basesize +       \
-                                  10 * unicodedescr.itemsize)d, \
-                                descr=malloc_fixedsize_descr)
-            setfield_gc(p0, 10, descr=unicodelendescr)
+            p0 = call_malloc_gc(ConstClass(malloc_array),   \
+                                %(unicodedescr.basesize)d,  \
+                                10,                         \
+                                %(unicodedescr.itemsize)d,  \
+                                %(unicodelendescr.offset)d, \
+                                descr=malloc_array_descr)
             jump()
         """)
+##      should ideally be:
+##            p0 = call_malloc_gc(ConstClass(malloc_fixedsize),   \
+##                                %(unicodedescr.basesize +       \
+##                                  10 * unicodedescr.itemsize)d, \
+##                                descr=malloc_fixedsize_descr)
+##            setfield_gc(p0, 10, descr=unicodelendescr)
 
 
 class TestFramework(RewriteTests):
         #
         class FakeCPU(object):
             def sizeof(self, STRUCT):
-                descr = SizeDescrWithVTable(102)
+                descr = SizeDescrWithVTable(104)
                 descr.tid = 9315
                 return descr
         self.cpu = FakeCPU()
             jump()
         """, """
             []
-            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
-                                %(bdescr.basesize + 104)d,    \
-                                descr=malloc_fixedsize_descr)
-            setfield_gc(p0, 8765, descr=tiddescr)
-            setfield_gc(p0, 103, descr=blendescr)
+            p0 = call_malloc_gc(ConstClass(malloc_array), 1,  \
+                                %(bdescr.tid)d, 103,          \
+                                descr=malloc_array_descr)
             jump()
         """)
 
             jump()
         """, """
             [p1]
-            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \
-                                descr=malloc_fixedsize_descr)
-            setfield_gc(p0, 9315, descr=tiddescr)
+            p0 = call_malloc_gc(ConstClass(malloc_big_fixedsize), 104, 9315, \
+                                descr=malloc_big_fixedsize_descr)
             setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr)
             jump()
         """)

File pypy/jit/backend/test/runner_test.py

         res = self.cpu.get_latest_value_int(0)
         assert res == 20
 
+    def test_compile_big_bridge_out_of_small_loop(self):
+        i0 = BoxInt()
+        faildescr1 = BasicFailDescr(1)
+        looptoken = JitCellToken()
+        operations = [
+            ResOperation(rop.GUARD_FALSE, [i0], None, descr=faildescr1),
+            ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)),
+            ]
+        inputargs = [i0]
+        operations[0].setfailargs([i0])
+        self.cpu.compile_loop(inputargs, operations, looptoken)
+
+        i1list = [BoxInt() for i in range(1000)]
+        bridge = []
+        iprev = i0
+        for i1 in i1list:
+            bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1))
+            iprev = i1
+        bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None,
+                                   descr=BasicFailDescr(3)))
+        bridge.append(ResOperation(rop.FINISH, [], None,
+                                   descr=BasicFailDescr(4)))
+        bridge[-2].setfailargs(i1list)
+
+        self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken)
+
+        fail = self.cpu.execute_token(looptoken, 1)
+        assert fail.identifier == 3
+        for i in range(1000):
+            res = self.cpu.get_latest_value_int(i)
+            assert res == 2 + i
+
     def test_get_latest_value_count(self):
         i0 = BoxInt()
         i1 = BoxInt()
                                          [funcbox, BoxInt(arg1), BoxInt(arg2)],
                                          'int', descr=calldescr)
             assert res.getint() == f(arg1, arg2)
-        
+
     def test_call_stack_alignment(self):
         # test stack alignment issues, notably for Mac OS/X.
         # also test the ordering of the arguments.
     def test_noops(self):
         c_box = self.alloc_string("hi there").constbox()
         c_nest = ConstInt(0)
-        self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest], 'void')
+        c_id = ConstInt(0)
+        self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest, c_id], 'void')
         self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest,
                                                c_nest, c_nest], 'void')
 
             ResOperation(rop.JUMP, [i2], None, descr=targettoken2),
             ]
         self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken)
-        
+
         fail = self.cpu.execute_token(looptoken, 2)
         assert fail.identifier == 3
         res = self.cpu.get_latest_value_int(0)
             assert len(mc) == len(ops)
             for i in range(len(mc)):
                 assert mc[i].split("\t")[-1].startswith(ops[i])
-            
+
         data = ctypes.string_at(info.asmaddr, info.asmlen)
         mc = list(machine_code_dump(data, info.asmaddr, cpuname))
         lines = [line for line in mc if line.count('\t') == 2]

File pypy/jit/backend/x86/assembler.py

         self._debug = False
         self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i')
         self.fail_boxes_count = 0
-        self._current_depths_cache = (0, 0)
         self.datablockwrapper = None
         self.stack_check_slowpath = 0
         self.propagate_exception_path = 0
         looppos = self.mc.get_relative_pos()
         looptoken._x86_loop_code = looppos
         clt.frame_depth = -1     # temporarily
-        clt.param_depth = -1     # temporarily
-        frame_depth, param_depth = self._assemble(regalloc, operations)
+        frame_depth = self._assemble(regalloc, operations)
         clt.frame_depth = frame_depth
-        clt.param_depth = param_depth
         #
         size_excluding_failure_stuff = self.mc.get_relative_pos()
         self.write_pending_failure_recoveries()
             rawstart + size_excluding_failure_stuff,
             rawstart))
         debug_stop("jit-backend-addr")
-        self._patch_stackadjust(rawstart + stackadjustpos,
-                                frame_depth + param_depth)
+        self._patch_stackadjust(rawstart + stackadjustpos, frame_depth)
         self.patch_pending_failure_recoveries(rawstart)
         #
         ops_offset = self.mc.ops_offset
             assert ([loc.assembler() for loc in arglocs] ==
                     [loc.assembler() for loc in faildescr._x86_debug_faillocs])
         regalloc = RegAlloc(self, self.cpu.translate_support_code)
-        fail_depths = faildescr._x86_current_depths
         startpos = self.mc.get_relative_pos()
-        operations = regalloc.prepare_bridge(fail_depths, inputargs, arglocs,
+        operations = regalloc.prepare_bridge(inputargs, arglocs,
                                              operations,
                                              self.current_clt.allgcrefs)
 
         stackadjustpos = self._patchable_stackadjust()
-        frame_depth, param_depth = self._assemble(regalloc, operations)
+        frame_depth = self._assemble(regalloc, operations)
         codeendpos = self.mc.get_relative_pos()
         self.write_pending_failure_recoveries()
         fullsize = self.mc.get_relative_pos()
         debug_print("bridge out of Guard %d has address %x to %x" %
                     (descr_number, rawstart, rawstart + codeendpos))
         debug_stop("jit-backend-addr")
-        self._patch_stackadjust(rawstart + stackadjustpos,
-                                frame_depth + param_depth)
+        self._patch_stackadjust(rawstart + stackadjustpos, frame_depth)
         self.patch_pending_failure_recoveries(rawstart)
         if not we_are_translated():
             # for the benefit of tests
             faildescr._x86_bridge_frame_depth = frame_depth
-            faildescr._x86_bridge_param_depth = param_depth
         # patch the jump from original guard
         self.patch_jump_for_descr(faildescr, rawstart)
         ops_offset = self.mc.ops_offset
         self.fixup_target_tokens(rawstart)
         self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth)
-        self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth)
         self.teardown()
         # oprofile support
         if self.cpu.profile_agent is not None:
         regalloc.walk_operations(operations)
         if we_are_translated() or self.cpu.dont_keepalive_stuff:
             self._regalloc = None   # else keep it around for debugging
-        frame_depth = regalloc.fm.get_frame_depth()
-        param_depth = regalloc.param_depth
+        frame_depth = regalloc.get_final_frame_depth()
         jump_target_descr = regalloc.jump_target_descr
         if jump_target_descr is not None:
             target_frame_depth = jump_target_descr._x86_clt.frame_depth
-            target_param_depth = jump_target_descr._x86_clt.param_depth
             frame_depth = max(frame_depth, target_frame_depth)
-            param_depth = max(param_depth, target_param_depth)
-        return frame_depth, param_depth
+        return frame_depth
 
     def _patchable_stackadjust(self):
         # stack adjustment LEA
         genop_math_list[oopspecindex](self, op, arglocs, resloc)
 
     def regalloc_perform_with_guard(self, op, guard_op, faillocs,
-                                    arglocs, resloc, current_depths):
+                                    arglocs, resloc):
         faildescr = guard_op.getdescr()
         assert isinstance(faildescr, AbstractFailDescr)
-        faildescr._x86_current_depths = current_depths
         failargs = guard_op.getfailargs()
         guard_opnum = guard_op.getopnum()
         guard_token = self.implement_guard_recovery(guard_opnum,
             # must be added by the genop_guard_list[]()
             assert guard_token is self.pending_guard_tokens[-1]
 
-    def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc,
-                               current_depths):
+    def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc):
         self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs,
-                                         resloc, current_depths)
+                                         resloc)
 
     def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm0):
         self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale))
                     self.mc.MOV(tmp, loc)
                     self.mc.MOV_sr(p, tmp.value)
             p += loc.get_width()
-        self._regalloc.reserve_param(p//WORD)
         # x is a location
         self.mc.CALL(x)
         self.mark_gc_roots(force_index)
         #
         if callconv != FFI_DEFAULT_ABI:
             self._fix_stdcall(callconv, p)
+        #
+        self._regalloc.needed_extra_stack_locations(p//WORD)
 
     def _fix_stdcall(self, callconv, p):
         from pypy.rlib.clibffi import FFI_STDCALL
             x = r10
         remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG)
 
-        self._regalloc.reserve_param(len(pass_on_stack))
         self.mc.CALL(x)
         self.mark_gc_roots(force_index)
+        self._regalloc.needed_extra_stack_locations(len(pass_on_stack))
 
     def call(self, addr, args, res):
         force_index = self.write_new_force_index()
             if reg in save_registers:
                 self.mc.MOV_sr(p, reg.value)
                 p += WORD
-        self._regalloc.reserve_param(p//WORD)
         #
         if gcrootmap.is_shadow_stack:
             args = []
             if reg in save_registers:
                 self.mc.MOV_rs(reg.value, p)
                 p += WORD
+        self._regalloc.needed_extra_stack_locations(p//WORD)
 
     def call_reacquire_gil(self, gcrootmap, save_loc):
         # save the previous result (eax/xmm0) into the stack temporarily.
         # to save xmm0 in this case.
         if isinstance(save_loc, RegLoc) and not save_loc.is_xmm:
             self.mc.MOV_sr(WORD, save_loc.value)
-            self._regalloc.reserve_param(2)
         # call the reopenstack() function (also reacquiring the GIL)
         if gcrootmap.is_shadow_stack:
             args = []
         # restore the result from the stack
         if isinstance(save_loc, RegLoc) and not save_loc.is_xmm:
             self.mc.MOV_rs(save_loc.value, WORD)
+            self._regalloc.needed_extra_stack_locations(2)
 
     def genop_guard_call_assembler(self, op, guard_op, guard_token,
                                    arglocs, result_loc):
         # copy of heap(nursery_free_adr), so that the final MOV below is
         # a no-op.
 
-        # reserve room for the argument to the real malloc and the
-        # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1
-        # word)
-        self._regalloc.reserve_param(1+16)
-
         gcrootmap = self.cpu.gc_ll_descr.gcrootmap
         shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack)
         if not shadow_stack:
         slowpath_addr2 = self.malloc_slowpath2
         self.mc.CALL(imm(slowpath_addr2))
 
+        # reserve room for the argument to the real malloc and the
+        # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1
+        # word)
+        self._regalloc.needed_extra_stack_locations(1+16)
+
         offset = self.mc.get_relative_pos() - jmp_adr
         assert 0 < offset <= 127
         self.mc.overwrite(jmp_adr-1, chr(offset))

File pypy/jit/backend/x86/regalloc.py

 
     def _prepare(self, inputargs, operations, allgcrefs):
         self.fm = X86FrameManager()
-        self.param_depth = 0
+        self.min_frame_depth = 0
         cpu = self.assembler.cpu
         operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations,
                                                        allgcrefs)
             self.min_bytes_before_label = 13
         return operations
 
-    def prepare_bridge(self, prev_depths, inputargs, arglocs, operations,
-                       allgcrefs):
+    def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs):
         operations = self._prepare(inputargs, operations, allgcrefs)
         self._update_bindings(arglocs, inputargs)
-        self.param_depth = prev_depths[1]
         self.min_bytes_before_label = 0
         return operations
 
         self.min_bytes_before_label = max(self.min_bytes_before_label,
                                           at_least_position)
 
-    def reserve_param(self, n):
-        self.param_depth = max(self.param_depth, n)
+    def needed_extra_stack_locations(self, n):
+        # call *after* you needed extra stack locations: (%esp), (%esp+4)...
+        min_frame_depth = self.fm.get_frame_depth() + n
+        if min_frame_depth > self.min_frame_depth:
+            self.min_frame_depth = min_frame_depth
+
+    def get_final_frame_depth(self):
+        self.needed_extra_stack_locations(0)  # update min_frame_depth
+        return self.min_frame_depth
 
     def _set_initial_bindings(self, inputargs):
         if IS_X86_64:
     def locs_for_fail(self, guard_op):
         return [self.loc(v) for v in guard_op.getfailargs()]
 
-    def get_current_depth(self):
-        # return (self.fm.frame_depth, self.param_depth), but trying to share
-        # the resulting tuple among several calls
-        arg0 = self.fm.get_frame_depth()
-        arg1 = self.param_depth
-        result = self.assembler._current_depths_cache
-        if result[0] != arg0 or result[1] != arg1:
-            result = (arg0, arg1)
-            self.assembler._current_depths_cache = result
-        return result
-
     def perform_with_guard(self, op, guard_op, arglocs, result_loc):
         faillocs = self.locs_for_fail(guard_op)
         self.rm.position += 1
         self.xrm.position += 1
-        current_depths = self.get_current_depth()
         self.assembler.regalloc_perform_with_guard(op, guard_op, faillocs,
-                                                   arglocs, result_loc,
-                                                   current_depths)
+                                                   arglocs, result_loc)
         if op.result is not None:
             self.possibly_free_var(op.result)
         self.possibly_free_vars(guard_op.getfailargs())
                                                       arglocs))
             else:
                 self.assembler.dump('%s(%s)' % (guard_op, arglocs))
-        current_depths = self.get_current_depth()
         self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs,
-                                              result_loc,
-                                              current_depths)
+                                              result_loc)
         self.possibly_free_vars(guard_op.getfailargs())
 
     def PerformDiscard(self, op, arglocs):
         self.force_spill_var(op.getarg(0))
 
     def get_mark_gc_roots(self, gcrootmap, use_copy_area=False):
-        shape = gcrootmap.get_basic_shape(IS_X86_64)
+        shape = gcrootmap.get_basic_shape()
         for v, val in self.fm.bindings.items():
             if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)):
                 assert isinstance(val, StackLoc)

File pypy/jit/backend/x86/test/test_gc_integration.py

 
 class MockGcRootMap(object):
     is_shadow_stack = False
-    def get_basic_shape(self, is_64_bit):
+    def get_basic_shape(self):
         return ['shape']
     def add_frame_offset(self, shape, offset):
         shape.append(offset)
         self.addrs[1] = self.addrs[0] + 64
         self.calls = []
         def malloc_slowpath(size):
+            if self.gcrootmap is not None:   # hook
+                self.gcrootmap.hook_malloc_slowpath()
             self.calls.append(size)
             # reset the nursery
             nadr = rffi.cast(lltype.Signed, self.nursery)
         assert gc_ll_descr.addrs[0] == nurs_adr + 24
         # this should call slow path once
         assert gc_ll_descr.calls == [24]
+
+    def test_save_regs_around_malloc(self):
+        S1 = lltype.GcStruct('S1')
+        S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)),
+                                   ('s1', lltype.Ptr(S1)),
+                                   ('s2', lltype.Ptr(S1)),
+                                   ('s3', lltype.Ptr(S1)),
+                                   ('s4', lltype.Ptr(S1)),
+                                   ('s5', lltype.Ptr(S1)),
+                                   ('s6', lltype.Ptr(S1)),
+                                   ('s7', lltype.Ptr(S1)),
+                                   ('s8', lltype.Ptr(S1)),
+                                   ('s9', lltype.Ptr(S1)),
+                                   ('s10', lltype.Ptr(S1)),
+                                   ('s11', lltype.Ptr(S1)),
+                                   ('s12', lltype.Ptr(S1)),
+                                   ('s13', lltype.Ptr(S1)),
+                                   ('s14', lltype.Ptr(S1)),
+                                   ('s15', lltype.Ptr(S1)))
+        cpu = self.cpu
+        self.namespace = self.namespace.copy()
+        for i in range(16):
+            self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i)
+        ops = '''
+        [p0]
+        p1 = getfield_gc(p0, descr=ds0)
+        p2 = getfield_gc(p0, descr=ds1)
+        p3 = getfield_gc(p0, descr=ds2)
+        p4 = getfield_gc(p0, descr=ds3)
+        p5 = getfield_gc(p0, descr=ds4)
+        p6 = getfield_gc(p0, descr=ds5)
+        p7 = getfield_gc(p0, descr=ds6)
+        p8 = getfield_gc(p0, descr=ds7)
+        p9 = getfield_gc(p0, descr=ds8)
+        p10 = getfield_gc(p0, descr=ds9)
+        p11 = getfield_gc(p0, descr=ds10)
+        p12 = getfield_gc(p0, descr=ds11)
+        p13 = getfield_gc(p0, descr=ds12)
+        p14 = getfield_gc(p0, descr=ds13)
+        p15 = getfield_gc(p0, descr=ds14)
+        p16 = getfield_gc(p0, descr=ds15)
+        #
+        # now all registers are in use
+        p17 = call_malloc_nursery(40)
+        p18 = call_malloc_nursery(40)     # overflow
+        #
+        finish(p1, p2, p3, p4, p5, p6, p7, p8,         \
+               p9, p10, p11, p12, p13, p14, p15, p16)
+        '''
+        s2 = lltype.malloc(S2)
+        for i in range(16):
+            setattr(s2, 's%d' % i, lltype.malloc(S1))
+        s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2)
+        #
+        self.interpret(ops, [s2ref])
+        gc_ll_descr = cpu.gc_ll_descr
+        gc_ll_descr.check_nothing_in_nursery()
+        assert gc_ll_descr.calls == [40]
+        # check the returned pointers
+        for i in range(16):
+            s1ref = self.cpu.get_latest_value_ref(i)
+            s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref)
+            assert s1 == getattr(s2, 's%d' % i)
+
+
+class MockShadowStackRootMap(MockGcRootMap):
+    is_shadow_stack = True
+    MARKER_FRAME = 88       # this marker follows the frame addr
+    S1 = lltype.GcStruct('S1')
+
+    def __init__(self):
+        self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 20,
+                                   flavor='raw')
+        # root_stack_top
+        self.addrs[0] = rffi.cast(lltype.Signed, self.addrs) + 3*WORD
+        # random stuff
+        self.addrs[1] = 123456
+        self.addrs[2] = 654321
+        self.check_initial_and_final_state()
+        self.callshapes = {}
+        self.should_see = []
+
+    def check_initial_and_final_state(self):
+        assert self.addrs[0] == rffi.cast(lltype.Signed, self.addrs) + 3*WORD
+        assert self.addrs[1] == 123456
+        assert self.addrs[2] == 654321
+
+    def get_root_stack_top_addr(self):
+        return rffi.cast(lltype.Signed, self.addrs)
+
+    def compress_callshape(self, shape, datablockwrapper):
+        assert shape[0] == 'shape'
+        return ['compressed'] + shape[1:]
+
+    def write_callshape(self, mark, force_index):
+        assert mark[0] == 'compressed'
+        assert force_index not in self.callshapes
+        assert force_index == 42 + len(self.callshapes)
+        self.callshapes[force_index] = mark
+
+    def hook_malloc_slowpath(self):
+        num_entries = self.addrs[0] - rffi.cast(lltype.Signed, self.addrs)
+        assert num_entries == 5*WORD    # 3 initially, plus 2 by the asm frame
+        assert self.addrs[1] == 123456  # unchanged
+        assert self.addrs[2] == 654321  # unchanged
+        frame_addr = self.addrs[3]                   # pushed by the asm frame
+        assert self.addrs[4] == self.MARKER_FRAME    # pushed by the asm frame
+        #
+        from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS
+        addr = rffi.cast(rffi.CArrayPtr(lltype.Signed),
+                         frame_addr + FORCE_INDEX_OFS)
+        force_index = addr[0]
+        assert force_index == 43    # in this test: the 2nd call_malloc_nursery
+        #
+        # The callshapes[43] saved above should list addresses both in the
+        # COPY_AREA and in the "normal" stack, where all the 16 values p1-p16
+        # of test_save_regs_at_correct_place should have been stored.  Here
+        # we replace them with new addresses, to emulate a moving GC.
+        shape = self.callshapes[force_index]
+        assert len(shape[1:]) == len(self.should_see)
+        new_objects = [None] * len(self.should_see)
+        for ofs in shape[1:]:
+            assert isinstance(ofs, int)    # not a register at all here
+            addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), frame_addr + ofs)
+            contains = addr[0]
+            for j in range(len(self.should_see)):
+                obj = self.should_see[j]
+                if contains == rffi.cast(lltype.Signed, obj):
+                    assert new_objects[j] is None   # duplicate?
+                    break
+            else:
+                assert 0   # the value read from the stack looks random?
+            new_objects[j] = lltype.malloc(self.S1)
+            addr[0] = rffi.cast(lltype.Signed, new_objects[j])
+        self.should_see[:] = new_objects
+
+
+class TestMallocShadowStack(BaseTestRegalloc):
+
+    def setup_method(self, method):
+        cpu = CPU(None, None)
+        cpu.gc_ll_descr = GCDescrFastpathMalloc()
+        cpu.gc_ll_descr.gcrootmap = MockShadowStackRootMap()
+        cpu.setup_once()
+        for i in range(42):
+            cpu.reserve_some_free_fail_descr_number()
+        self.cpu = cpu
+
+    def test_save_regs_at_correct_place(self):
+        cpu = self.cpu
+        gc_ll_descr = cpu.gc_ll_descr
+        S1 = gc_ll_descr.gcrootmap.S1
+        S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)),
+                                   ('s1', lltype.Ptr(S1)),
+                                   ('s2', lltype.Ptr(S1)),
+                                   ('s3', lltype.Ptr(S1)),
+                                   ('s4', lltype.Ptr(S1)),
+                                   ('s5', lltype.Ptr(S1)),
+                                   ('s6', lltype.Ptr(S1)),
+                                   ('s7', lltype.Ptr(S1)),
+                                   ('s8', lltype.Ptr(S1)),
+                                   ('s9', lltype.Ptr(S1)),
+                                   ('s10', lltype.Ptr(S1)),
+                                   ('s11', lltype.Ptr(S1)),
+                                   ('s12', lltype.Ptr(S1)),
+                                   ('s13', lltype.Ptr(S1)),
+                                   ('s14', lltype.Ptr(S1)),
+                                   ('s15', lltype.Ptr(S1)))
+        self.namespace = self.namespace.copy()
+        for i in range(16):
+            self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i)
+        ops = '''
+        [p0]
+        p1 = getfield_gc(p0, descr=ds0)
+        p2 = getfield_gc(p0, descr=ds1)
+        p3 = getfield_gc(p0, descr=ds2)
+        p4 = getfield_gc(p0, descr=ds3)
+        p5 = getfield_gc(p0, descr=ds4)
+        p6 = getfield_gc(p0, descr=ds5)
+        p7 = getfield_gc(p0, descr=ds6)
+        p8 = getfield_gc(p0, descr=ds7)
+        p9 = getfield_gc(p0, descr=ds8)
+        p10 = getfield_gc(p0, descr=ds9)
+        p11 = getfield_gc(p0, descr=ds10)
+        p12 = getfield_gc(p0, descr=ds11)
+        p13 = getfield_gc(p0, descr=ds12)
+        p14 = getfield_gc(p0, descr=ds13)
+        p15 = getfield_gc(p0, descr=ds14)
+        p16 = getfield_gc(p0, descr=ds15)
+        #
+        # now all registers are in use
+        p17 = call_malloc_nursery(40)
+        p18 = call_malloc_nursery(40)     # overflow
+        #
+        finish(p1, p2, p3, p4, p5, p6, p7, p8,         \
+               p9, p10, p11, p12, p13, p14, p15, p16)
+        '''
+        s2 = lltype.malloc(S2)
+        for i in range(16):
+            s1 = lltype.malloc(S1)
+            setattr(s2, 's%d' % i, s1)
+            gc_ll_descr.gcrootmap.should_see.append(s1)
+        s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2)
+        #
+        self.interpret(ops, [s2ref])
+        gc_ll_descr.check_nothing_in_nursery()
+        assert gc_ll_descr.calls == [40]
+        gc_ll_descr.gcrootmap.check_initial_and_final_state()
+        # check the returned pointers
+        for i in range(16):
+            s1ref = self.cpu.get_latest_value_ref(i)
+            s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref)
+            for j in range(16):
+                assert s1 != getattr(s2, 's%d' % j)
+            assert s1 == gc_ll_descr.gcrootmap.should_see[i]

File pypy/jit/backend/x86/test/test_recompilation.py

         '''
         loop = self.interpret(ops, [0])
         previous = loop._jitcelltoken.compiled_loop_token.frame_depth
-        assert loop._jitcelltoken.compiled_loop_token.param_depth == 0
         assert self.getint(0) == 20
         ops = '''
         [i1]
         bridge = self.attach_bridge(ops, loop, -2)
         descr = loop.operations[3].getdescr()
         new = descr._x86_bridge_frame_depth
-        assert descr._x86_bridge_param_depth == 0
         # the force_spill() forces the stack to grow
         assert new > previous
         fail = self.run(loop, 0)
         loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth
         bridge = self.attach_bridge(ops, loop, 6)
         guard_op = loop.operations[6]
-        assert loop._jitcelltoken.compiled_loop_token.param_depth == 0
         # the force_spill() forces the stack to grow
         assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth
-        assert guard_op.getdescr()._x86_bridge_param_depth == 0
         self.run(loop, 0, 0, 0, 0, 0, 0)
         assert self.getint(0) == 1
         assert self.getint(1) == 20

File pypy/jit/backend/x86/test/test_regalloc.py

         assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1]
 
 class TestRegAllocCallAndStackDepth(BaseTestRegalloc):
-    def expected_param_depth(self, num_args):
+    def expected_frame_depth(self, num_call_args, num_pushed_input_args=0):
         # Assumes the arguments are all non-float
         if IS_X86_32:
-            return num_args
+            extra_esp = num_call_args
+            return extra_esp
         elif IS_X86_64:
-            return max(num_args - 6, 0)
+            # 'num_pushed_input_args' is for X86_64 only
+            extra_esp = max(num_call_args - 6, 0)
+            return num_pushed_input_args + extra_esp
 
     def test_one_call(self):
         ops = '''
-        [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9]
+        [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b]
         i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr)
-        finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9)
+        finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b)
         '''
-        loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9])
-        assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9]
+        loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 8])
+        assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8]
         clt = loop._jitcelltoken.compiled_loop_token
-        assert clt.param_depth == self.expected_param_depth(1)
+        assert clt.frame_depth == self.expected_frame_depth(1, 5)
+
+    def test_one_call_reverse(self):
+        ops = '''
+        [i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b, i0]
+        i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr)
+        finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b)
+        '''
+        loop = self.interpret(ops, [7, 9, 9 ,9, 9, 9, 9, 9, 9, 8, 4])
+        assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8]
+        clt = loop._jitcelltoken.compiled_loop_token
+        assert clt.frame_depth == self.expected_frame_depth(1, 6)
 
     def test_two_calls(self):
         ops = '''
         loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9])
         assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9]
         clt = loop._jitcelltoken.compiled_loop_token
-        assert clt.param_depth == self.expected_param_depth(2)
+        assert clt.frame_depth == self.expected_frame_depth(2, 5)
 
     def test_call_many_arguments(self):
         # NB: The first and last arguments in the call are constants. This
         loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9])
         assert self.getint(0) == 55
         clt = loop._jitcelltoken.compiled_loop_token
-        assert clt.param_depth == self.expected_param_depth(10)
+        assert clt.frame_depth == self.expected_frame_depth(10)
 
     def test_bridge_calls_1(self):
         ops = '''
         [i0, i1]
         i2 = call(ConstClass(f1ptr), i0, descr=f1_calldescr)
-        guard_value(i2, 0, descr=fdescr1) [i2, i1]
+        guard_value(i2, 0, descr=fdescr1) [i2, i0, i1]
         finish(i1)
         '''
         loop = self.interpret(ops, [4, 7])
         assert self.getint(0) == 5
+        clt = loop._jitcelltoken.compiled_loop_token
+        orgdepth = clt.frame_depth
+        assert orgdepth == self.expected_frame_depth(1, 2)
+
         ops = '''
-        [i2, i1]
+        [i2, i0, i1]
         i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr)        
-        finish(i3, descr=fdescr2)        
+        finish(i3, i0, descr=fdescr2)
         '''
         bridge = self.attach_bridge(ops, loop, -2)
 
-        assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
+        assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(2, 2))
+        assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \
+            self.expected_frame_depth(2, 2)
 
         self.run(loop, 4, 7)
         assert self.getint(0) == 5*7
         [i0, i1]
         i2 = call(ConstClass(f2ptr), i0, i1, descr=f2_calldescr)
         guard_value(i2, 0, descr=fdescr1) [i2]
-        finish(i1)
+        finish(i2)