Commits

Amaury Forgeot d'Arc committed 37ce58a Merge

hg merge default

  • Participants
  • Parent commits c6fd4a0, 89eebf6
  • Branches py3k

Comments (0)

Files changed (78)

 b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked
 d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6
 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7
+07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1

File pypy/config/translationoption.py

     BoolOption("no__thread",
                "don't use __thread for implementing TLS",
                default=False, cmdline="--no__thread", negation=False),
-    StrOption("compilerflags", "Specify flags for the C compiler",
-               cmdline="--cflags"),
-    StrOption("linkerflags", "Specify flags for the linker (C backend only)",
-               cmdline="--ldflags"),
+##  --- not supported since a long time.  Use the env vars CFLAGS/LDFLAGS.
+##    StrOption("compilerflags", "Specify flags for the C compiler",
+##               cmdline="--cflags"),
+##    StrOption("linkerflags", "Specify flags for the linker (C backend only)",
+##               cmdline="--ldflags"),
     IntOption("make_jobs", "Specify -j argument to make for compilation"
               " (C backend only)",
               cmdline="--make-jobs", default=detect_number_of_processors()),

File pypy/doc/conf.py

 # built documents.
 #
 # The short X.Y version.
-version = '1.9'
+version = '2.0'
 # The full version, including alpha/beta/rc tags.
-release = '1.9'
+release = '2.0-beta1'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.

File pypy/doc/getting-started-python.rst

 executable. The executable behaves mostly like a normal Python interpreter::
 
     $ ./pypy-c
-    Python 2.7.2 (341e1e3821ff, Jun 07 2012, 15:40:31)
-    [PyPy 1.9.0 with GCC 4.4.3] on linux2
+    Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18)
+    [PyPy 2.0.0-beta1 with GCC 4.7.1] on linux2
     Type "help", "copyright", "credits" or "license" for more information.
     And now for something completely different: ``RPython magically makes you rich
     and famous (says so on the tin)''
 the ``bin/pypy`` executable.
 
 To install PyPy system wide on unix-like systems, it is recommended to put the
-whole hierarchy alone (e.g. in ``/opt/pypy1.9``) and put a symlink to the
+whole hierarchy alone (e.g. in ``/opt/pypy2.0-beta1``) and put a symlink to the
 ``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin``
 
 If the executable fails to find suitable libraries, it will report

File pypy/doc/getting-started.rst

 PyPy is ready to be executed as soon as you unpack the tarball or the zip
 file, with no need to install it in any specific location::
 
-    $ tar xf pypy-1.9-linux.tar.bz2
-    $ ./pypy-1.9/bin/pypy
-    Python 2.7.2 (341e1e3821ff, Jun 07 2012, 15:40:31)
-    [PyPy 1.9.0 with GCC 4.4.3] on linux2
+    $ tar xf pypy-2.0-beta1-linux.tar.bz2
+    $ ./pypy-2.0-beta1/bin/pypy
+    Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18)
+    [PyPy 2.0.0-beta1 with GCC 4.7.1] on linux2
     Type "help", "copyright", "credits" or "license" for more information.
-    And now for something completely different: ``it seems to me that once you
-    settle on an execution / object model and / or bytecode format, you've already
-    decided what languages (where the 's' seems superfluous) support is going to be
-    first class for''
+    And now for something completely different: ``PyPy is an exciting technology
+    that lets you to write fast, portable, multi-platform interpreters with less
+    effort''
     >>>>
 
 If you want to make PyPy available system-wide, you can put a symlink to the
 
     $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py
 
-    $ ./pypy-1.9/bin/pypy distribute_setup.py
+    $ ./pypy-2.0-beta1/bin/pypy distribute_setup.py
 
-    $ ./pypy-1.9/bin/pypy get-pip.py
+    $ ./pypy-2.0-beta1/bin/pypy get-pip.py
 
-    $ ./pypy-1.9/bin/pip install pygments  # for example
+    $ ./pypy-2.0-beta1/bin/pip install pygments  # for example
 
-3rd party libraries will be installed in ``pypy-1.9/site-packages``, and
-the scripts in ``pypy-1.9/bin``.
+3rd party libraries will be installed in ``pypy-2.0-beta1/site-packages``, and
+the scripts in ``pypy-2.0-beta1/bin``.
 
 Installing using virtualenv
 ---------------------------

File pypy/doc/index.rst

 
 * `FAQ`_: some frequently asked questions.
 
-* `Release 1.9`_: the latest official release
+* `Release 2.0 beta 1`_: the latest official release
 
 * `PyPy Blog`_: news and status info about PyPy 
 
 .. _`Getting Started`: getting-started.html
 .. _`Papers`: extradoc.html
 .. _`Videos`: video-index.html
-.. _`Release 1.9`: http://pypy.org/download.html
+.. _`Release 2.0 beta 1`: http://pypy.org/download.html
 .. _`speed.pypy.org`: http://speed.pypy.org
 .. _`RPython toolchain`: translation.html
 .. _`potential project ideas`: project-ideas.html
 Windows, on top of .NET, and on top of Java.
 To dig into PyPy it is recommended to try out the current
 Mercurial default branch, which is always working or mostly working,
-instead of the latest release, which is `1.9`__.
+instead of the latest release, which is `2.0 beta1`__.
 
-.. __: release-1.9.0.html
+.. __: release-2.0.0-beta1.html
 
 PyPy is mainly developed on Linux and Mac OS X.  Windows is supported,
 but platform-specific bugs tend to take longer before we notice and fix

File pypy/doc/jit-hooks.rst

 understanding what's pypy's JIT doing while running your program. There
 are three functions related to that coming from the `pypyjit` module:
 
-* `set_optimize_hook`::
+* `set_optimize_hook(callable)`::
 
     Set a compiling hook that will be called each time a loop is optimized,
-    but before assembler compilation. This allows to add additional
+    but before assembler compilation. This allows adding additional
     optimizations on Python level.
-    
-    The hook will be called with the following signature:
-    hook(jitdriver_name, loop_type, greenkey or guard_number, operations)
 
-    jitdriver_name is the name of this particular jitdriver, 'pypyjit' is
-    the main interpreter loop
+    The callable will be called with the pypyjit.JitLoopInfo object.
+    Refer to it's documentation for details.
 
-    loop_type can be either `loop` `entry_bridge` or `bridge`
-    in case loop is not `bridge`, greenkey will be a tuple of constants
-    or a string describing it.
+    Result value will be the resulting list of operations, or None
 
-    for the interpreter loop` it'll be a tuple
-    (code, offset, is_being_profiled)
+
+* `set_compile_hook(callable)`::
+
+    Set a compiling hook that will be called each time a loop is compiled.
+
+    The callable will be called with the pypyjit.JitLoopInfo object.
+    Refer to it's documentation for details.
 
     Note that jit hook is not reentrant. It means that if the code
     inside the jit hook is itself jitted, it will get compiled, but the
     jit hook won't be called for that.
 
-    Result value will be the resulting list of operations, or None
-
-* `set_compile_hook`::
-
-    Set a compiling hook that will be called each time a loop is compiled.
-    The hook will be called with the following signature:
-    hook(jitdriver_name, loop_type, greenkey or guard_number, operations,
-         assembler_addr, assembler_length)
-
-    jitdriver_name is the name of this particular jitdriver, 'pypyjit' is
-    the main interpreter loop
-
-    loop_type can be either `loop` `entry_bridge` or `bridge`
-    in case loop is not `bridge`, greenkey will be a tuple of constants
-    or a string describing it.
-
-    for the interpreter loop` it'll be a tuple
-    (code, offset, is_being_profiled)
-
-    assembler_addr is an integer describing where assembler starts,
-    can be accessed via ctypes, assembler_lenght is the lenght of compiled
-    asm
-
-    Note that jit hook is not reentrant. It means that if the code
-    inside the jit hook is itself jitted, it will get compiled, but the
-    jit hook won't be called for that.
-
-* `set_abort_hook`::
+* `set_abort_hook(hook)`::
 
     Set a hook (callable) that will be called each time there is tracing
     aborted due to some reason.
 
     The hook will be called as in: hook(jitdriver_name, greenkey, reason)
 
-    Where reason is the reason for abort, see documentation for set_compile_hook
-    for descriptions of other arguments.
+    Reason is a string, the meaning of other arguments is the same
+    as attributes on JitLoopInfo object
+

File pypy/doc/release-2.0.0-beta1.rst

 Windows 64 work is still stalling, we would welcome a volunteer
 to handle that.
 
-.. XXX link
+.. _`pypy 2.0 beta 1 and cpython 2.7.3`: http://bit.ly/USXqpP
 
-XXX donors info?
+How to use PyPy?
+================
+
+We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv
+installed, you can follow instructions from `pypy documentation`_ on how
+to proceed. This document also covers other `installation schemes`_.
+
+.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv
+.. _`virtualenv`: http://www.virtualenv.org/en/latest/
+.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy
+.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy
 
 Regressions
 ===========
 ==========
 
 * ``cffi`` is officially supported by PyPy. You can install it normally by
-  using ``pip install cffi`` once you have PyPy installed. The corresponding
-  ``0.4`` version of ``cffi`` has been released.
+  using ``pip install cffi`` once you have installed `PyPy and pip`_.
+  The corresponding ``0.4`` version of ``cffi`` has been released.
 
 * ARM is now an officially supported processor architecture.
   PyPy now work on soft-float ARM/Linux builds.  Currently ARM processors
   unicode strings, which means that now such collections will be both faster
   and more compact.
 
+.. _`cpython issue tracker`: http://bugs.python.org/issue14621
+.. _`jit hooks`: http://doc.pypy.org/en/latest/jit-hooks.html
+
+Things we're working on
+=======================
+
+There are a few things that did not make it to the 2.0 beta 1, which
+are being actively worked on. Greenlets support in the JIT is one
+that we would like to have before 2.0 final. Two important items that
+will not make it to 2.0, but are being actively worked on, are:
+
+* Faster JIT warmup time.
+
+* Software Transactional Memory.
+
+Cheers,
+Maciej Fijalkowski, Armin Rigo and the PyPy team

File pypy/doc/whatsnew-head.rst

 What's new in PyPy xxx
 ======================
 
-.. this is the revision of the last merge from default to release-1.9.x
-.. startrev: 8d567513d04d
+.. this is a revision shortly after release-2.0-beta1
+.. startrev: 0e6161a009c6
 
-Fixed the performance of gc.get_referrers()
+.. branch: autoreds
+XXX
 
-.. branch: default
-.. branch: app_main-refactor
-.. branch: win-ordinal
-.. branch: reflex-support
-Provides cppyy module (disabled by default) for access to C++ through Reflex.
-See doc/cppyy.rst for full details and functionality.
-.. branch: nupypy-axis-arg-check
-Check that axis arg is valid in _numpypy
-.. branch:less-gettestobjspace
-.. branch: move-apptest-support
-
-.. branch: iterator-in-rpython
-.. branch: numpypy_count_nonzero
-.. branch: numpy-refactor
-Remove numpy lazy evaluation and simplify everything
-.. branch: numpy-reintroduce-jit-drivers
-.. branch: numpy-fancy-indexing
-Support for array[array-of-ints] in numpy
-.. branch: even-more-jit-hooks
-Implement better JIT hooks
-.. branch: virtual-arguments
-Improve handling of **kwds greatly, making them virtual sometimes.
-.. branch: improve-rbigint
-Introduce __int128 on systems where it's supported and improve the speed of
-rlib/rbigint.py greatly.
-.. branch: translation-cleanup
-Start to clean up a bit the flow object space.
-.. branch: ffi-backend
-Support CFFI.  http://morepypy.blogspot.ch/2012/08/cffi-release-03.html
-.. branch: speedup-unpackiterable
-.. branch: stdlib-2.7.3
-The stdlib was updated to version 2.7.3
-
-.. branch: numpypy-complex2
-Complex dtype support for numpy
-.. branch: numpypy-problems
-Improve dtypes intp, uintp, void, string and record
-.. branch: numpypy.float16
-Add float16 numpy dtype
-.. branch: kill-someobject
-major cleanups including killing some object support
-.. branch: cpyext-PyThreadState_New
-implement threadstate-related functions in cpyext
-
-.. branch: unicode-strategies
-add dict/list/set strategies optimized for unicode items
-
-.. "uninteresting" branches that we should just ignore for the whatsnew:
-.. branch: slightly-shorter-c
-.. branch: better-enforceargs
-.. branch: rpython-unicode-formatting
-.. branch: jit-opaque-licm
-.. branch: rpython-utf8
-Support for utf-8 encoding in RPython
-.. branch: arm-backend-2
-Support ARM in the JIT.
+.. branch: length-hint
+XXX

File pypy/interpreter/astcompiler/optimize.py

 unrolling_unary_folders = unrolling_iterable(unary_folders.items())
 
 for folder in binary_folders.values() + unary_folders.values():
-    folder._always_inline_ = True
+    folder._always_inline_ = 'try'
 del folder
 
 opposite_compare_operations = misc.dict_to_switch({

File pypy/interpreter/error.py

         return w_type
 
     def write_unraisable(self, space, where, w_object=None,
-                         with_traceback=False):
+                         with_traceback=False, extra_line=''):
         if w_object is None:
             objrepr = ''
         else:
                 w_tb = space.wrap(self.get_traceback())
                 space.appexec([space.wrap(where),
                                space.wrap(objrepr),
+                               space.wrap(extra_line),
                                w_t, w_v, w_tb],
-                """(where, objrepr, t, v, tb):
+                """(where, objrepr, extra_line, t, v, tb):
                     import sys, traceback
                     sys.stderr.write('From %s%s:\\n' % (where, objrepr))
+                    if extra_line:
+                        sys.stderr.write(extra_line)
                     traceback.print_exception(t, v, tb)
                 """)
             else:

File pypy/interpreter/executioncontext.py

         actionflag = self.space.actionflag
         if actionflag.get_ticker() < 0:
             actionflag.action_dispatcher(self, frame)     # slow path
-    bytecode_trace_after_exception._always_inline_ = True
+    bytecode_trace_after_exception._always_inline_ = 'try'
+    # NB. this function is not inlined right now.  backendopt.inline would
+    # need some improvements to handle this case, but it's not really an
+    # issue
 
     def exception_trace(self, frame, operationerr):
         "Trace function called upon OperationError."

File pypy/jit/backend/arm/arch.py

 static int pypy__arm_int_div(int a, int b) {
     return a/b;
 }
-static uint pypy__arm_uint_div(uint a, uint b) {
+static unsigned int pypy__arm_uint_div(unsigned int a, unsigned int b) {
     return a/b;
 }
-static int pypy__arm_int_mod(uint a, uint b) {
+static int pypy__arm_int_mod(int a, int b) {
     return a % b;
 }
 """])

File pypy/jit/backend/arm/codebuilder.py

         self._VCVT(target, source, cond, 0, 1)
 
     def _VCVT(self, target, source, cond, opc2, sz):
-        D = 0x0
+        D = 0
         M = 0
         op = 1
         instr = (cond << 28
                 | (source & 0xF))
         self.write32(instr)
 
+    def _VCVT_single_double(self, target, source, cond, sz):
+        # double_to_single = (sz == '1');
+        D = 0
+        M = 0
+        instr = (cond << 28
+                | 0xEB7 << 16
+                | 0xAC << 4
+                | D << 22
+                | (target & 0xF) << 12
+                | sz << 8
+                | M << 5
+                | (source & 0xF))
+        self.write32(instr)
+
+    def VCVT_f64_f32(self, target, source, cond=cond.AL):
+        self._VCVT_single_double(target, source, cond, 1)
+
+    def VCVT_f32_f64(self, target, source, cond=cond.AL):
+        self._VCVT_single_double(target, source, cond, 0)
+
     def POP(self, regs, cond=cond.AL):
         instr = self._encode_reg_list(cond << 28 | 0x8BD << 16, regs)
         self.write32(instr)

File pypy/jit/backend/arm/opassembler.py

             # must save the register loc_index before it is mutated
             self.mc.PUSH([loc_index.value])
             tmp1 = loc_index
-            tmp2 = arglocs[2]
+            tmp2 = arglocs[-1]  # the last item is a preallocated tmp
             # lr = byteofs
             s = 3 + descr.jit_wb_card_page_shift
             self.mc.MVN_rr(r.lr.value, loc_index.value,
         self.mc.MOV_ri(r.ip.value, 0)
         self.mc.VMOV_cr(res.value, tmp.value, r.ip.value)
         return fcond
+
+    def emit_op_cast_float_to_singlefloat(self, op, arglocs, regalloc, fcond):
+        arg, res = arglocs
+        assert arg.is_vfp_reg()
+        assert res.is_reg()
+        self.mc.VCVT_f64_f32(r.vfp_ip.value, arg.value)
+        self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value)
+        return fcond
+    
+    def emit_op_cast_singlefloat_to_float(self, op, arglocs, regalloc, fcond):
+        arg, res = arglocs
+        assert res.is_vfp_reg()
+        assert arg.is_reg()
+        self.mc.MOV_ri(r.ip.value, 0)
+        self.mc.VMOV_cr(res.value, arg.value, r.ip.value)
+        self.mc.VCVT_f32_f64(res.value, res.value)
+        return fcond

File pypy/jit/backend/arm/regalloc.py

         args = op.getarglist()
         arglocs = [self._ensure_value_is_boxed(op.getarg(i), args)
                                                               for i in range(N)]
-        tmp = self.get_scratch_reg(INT)
+        tmp = self.get_scratch_reg(INT, args)
+        assert tmp not in arglocs
         arglocs.append(tmp)
         return arglocs
 
         res = self.vfprm.force_allocate_reg(op.result)
         return [loc, res]
 
+    def prepare_op_cast_float_to_singlefloat(self, op, fcond):
+        loc1 = self._ensure_value_is_boxed(op.getarg(0))
+        res = self.force_allocate_reg(op.result)
+        return [loc1, res]
+    
+    def prepare_op_cast_singlefloat_to_float(self, op, fcond):
+        loc1 = self._ensure_value_is_boxed(op.getarg(0))
+        res = self.force_allocate_reg(op.result)
+        return [loc1, res]
+
 
 def add_none_argument(fn):
     return lambda self, op, fcond: fn(self, op, None, fcond)

File pypy/jit/backend/arm/runner.py

     supports_floats = True
     supports_longlong = False # XXX requires an implementation of
                               # read_timestamp that works in user mode
+    supports_singlefloats = True
     
     use_hf_abi = False        # use hard float abi flag
 

File pypy/jit/backend/arm/test/test_float.py

+
+import py
+from pypy.jit.backend.arm.test.support import JitARMMixin
+from pypy.jit.metainterp.test.test_float import FloatTests
+
+class TestFloat(JitARMMixin, FloatTests):
+    # for the individual tests see
+    # ====> ../../../metainterp/test/test_float.py
+    pass

File pypy/jit/backend/arm/test/test_regalloc.py

         return -1
 
 
+def get_zero_division_error(self):
+    # for tests, a random emulated ll_inst will do
+    ll_inst = lltype.malloc(rclass.OBJECT)
+    ll_inst.typeptr = lltype.malloc(rclass.OBJECT_VTABLE,
+                                    immortal=True)
+    _zer_error_vtable = llmemory.cast_ptr_to_adr(ll_inst.typeptr)
+    zer_vtable = self.cast_adr_to_int(_zer_error_vtable)
+    zer_inst = lltype.cast_opaque_ptr(llmemory.GCREF, ll_inst)
+    return zer_vtable, zer_inst
+
+
 class BaseTestRegalloc(object):
     cpu = CPU(None, None)
     cpu.setup_once()
     f_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT,
                                                     EffectInfo.MOST_GENERAL)
 
-    zero_division_tp, zero_division_value = cpu.get_zero_division_error()
+    zero_division_tp, zero_division_value = get_zero_division_error(cpu)
     zd_addr = cpu.cast_int_to_adr(zero_division_tp)
     zero_division_error = llmemory.cast_adr_to_ptr(zd_addr,
                                             lltype.Ptr(rclass.OBJECT_VTABLE))

File pypy/jit/backend/llgraph/runner.py

         else:
             return ootype.NULL
 
-    def get_overflow_error(self):
-        ll_err = llimpl._get_error(OverflowError)
-        return (ootype.cast_to_object(ll_err.args[0]),
-                ootype.cast_to_object(ll_err.args[1]))
-
-    def get_zero_division_error(self):
-        ll_err = llimpl._get_error(ZeroDivisionError)
-        return (ootype.cast_to_object(ll_err.args[0]),
-                ootype.cast_to_object(ll_err.args[1]))
-
     def do_new_with_vtable(self, clsbox):
         cls = clsbox.getref_base()
         typedescr = self.class_sizes[cls]

File pypy/jit/backend/llsupport/llmodel.py

             self.vtable_offset, _ = symbolic.get_field_token(rclass.OBJECT,
                                                              'typeptr',
                                                         translate_support_code)
-        self._setup_prebuilt_error('ovf', OverflowError)
-        self._setup_prebuilt_error('zer', ZeroDivisionError)
         if translate_support_code:
             self._setup_exception_handling_translated()
         else:
     def setup(self):
         pass
 
-    def _setup_prebuilt_error(self, prefix, Class):
-        if self.rtyper is not None:   # normal case
-            bk = self.rtyper.annotator.bookkeeper
-            clsdef = bk.getuniqueclassdef(Class)
-            ll_inst = self.rtyper.exceptiondata.get_standard_ll_exc_instance(
-                self.rtyper, clsdef)
-        else:
-            # for tests, a random emulated ll_inst will do
-            ll_inst = lltype.malloc(rclass.OBJECT)
-            ll_inst.typeptr = lltype.malloc(rclass.OBJECT_VTABLE,
-                                            immortal=True)
-        setattr(self, '_%s_error_vtable' % prefix,
-                llmemory.cast_ptr_to_adr(ll_inst.typeptr))
-        setattr(self, '_%s_error_inst' % prefix, ll_inst)
-
 
     def _setup_exception_handling_untranslated(self):
         # for running un-translated only, all exceptions occurring in the
         return ffisupport.calldescr_dynamic_for_tests(self, atypes, rtype,
                                                       abiname)
 
-    def get_overflow_error(self):
-        ovf_vtable = self.cast_adr_to_int(self._ovf_error_vtable)
-        ovf_inst = lltype.cast_opaque_ptr(llmemory.GCREF,
-                                          self._ovf_error_inst)
-        return ovf_vtable, ovf_inst
-
-    def get_zero_division_error(self):
-        zer_vtable = self.cast_adr_to_int(self._zer_error_vtable)
-        zer_inst = lltype.cast_opaque_ptr(llmemory.GCREF,
-                                          self._zer_error_inst)
-        return zer_vtable, zer_inst
-
     # ____________________________________________________________
 
     def bh_arraylen_gc(self, array, arraydescr):

File pypy/jit/backend/x86/test/test_float.py

File contents unchanged.

File pypy/jit/backend/x86/test/test_regalloc.py

     def _compute_next_usage(self, v, _):
         return -1
 
+
+def get_zero_division_error(self):
+    # for tests, a random emulated ll_inst will do
+    ll_inst = lltype.malloc(rclass.OBJECT)
+    ll_inst.typeptr = lltype.malloc(rclass.OBJECT_VTABLE,
+                                    immortal=True)
+    _zer_error_vtable = llmemory.cast_ptr_to_adr(ll_inst.typeptr)
+    zer_vtable = self.cast_adr_to_int(_zer_error_vtable)
+    zer_inst = lltype.cast_opaque_ptr(llmemory.GCREF, ll_inst)
+    return zer_vtable, zer_inst
+
+
 class BaseTestRegalloc(object):
     cpu = CPU(None, None)
     cpu.setup_once()
                               zero_division_value)
     FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void))
     raising_fptr = llhelper(FPTR, raising_func)
-    zero_division_tp, zero_division_value = cpu.get_zero_division_error()
+    zero_division_tp, zero_division_value = get_zero_division_error(cpu)
     zd_addr = cpu.cast_int_to_adr(zero_division_tp)
     zero_division_error = llmemory.cast_adr_to_ptr(zd_addr,
                                             lltype.Ptr(rclass.OBJECT_VTABLE))

File pypy/jit/metainterp/heapcache.py

         elif (opnum != rop.GETFIELD_GC and
               opnum != rop.MARK_OPAQUE_PTR and
               opnum != rop.PTR_EQ and
-              opnum != rop.PTR_NE):
+              opnum != rop.PTR_NE and
+              opnum != rop.INSTANCE_PTR_EQ and
+              opnum != rop.INSTANCE_PTR_NE):
             idx = 0
             for box in argboxes:
                 # setarrayitem_gc don't escape its first argument

File pypy/jit/metainterp/logger.py

                 r = "<Guard%d>" % index
             else:
                 r = self.repr_of_descr(descr)
-            args += ', descr=' +  r
+            if args:
+                args += ', descr=' +  r
+            else:
+                args = "descr=" + r
         if is_guard and op.getfailargs() is not None:
             fail_args = ' [' + ", ".join([self.repr_of_arg(arg)
                                           for arg in op.getfailargs()]) + ']'

File pypy/jit/metainterp/test/test_heapcache.py

         assert not h.is_unescaped(box1)
         assert not h.is_unescaped(box2)
 
+    def test_ops_dont_escape(self):
+        h = HeapCache()
+        h.new(box1)
+        h.new(box2)
+        assert h.is_unescaped(box1)
+        assert h.is_unescaped(box2)
+        h.invalidate_caches(rop.INSTANCE_PTR_EQ, None, [box1, box2])
+        assert h.is_unescaped(box1)
+        assert h.is_unescaped(box2)
+        h.invalidate_caches(rop.INSTANCE_PTR_NE, None, [box1, box2])
+        assert h.is_unescaped(box1)
+        assert h.is_unescaped(box2)
+
     def test_circular_virtuals(self):
         h = HeapCache()
         h.new(box1)

File pypy/jit/metainterp/test/test_logger.py

         '''
         self.reparse(inp)
 
+    def test_guard_not_invalidated(self):
+        inp = '''
+        []
+        guard_not_invalidated(descr=descr) []
+        finish()
+        '''
+        loop = pure_parse(inp, namespace={'descr': Descr()})
+        logger = Logger(self.make_metainterp_sd())
+        output = logger.log_loop(loop, {'descr': Descr()})
+        assert 'guard_not_invalidated(descr=' in output
+
     def test_guard_w_hole(self):
         inp = '''
         [i0]

File pypy/jit/metainterp/test/test_warmspot.py

         assert res == expected
         self.check_resops(int_sub=2, int_mul=0, int_add=2)
 
-    def test_inline_in_portal(self):
+    def test_inline_jit_merge_point(self):
+        # test that the machinery to inline jit_merge_points in callers
+        # works. The final user does not need to mess manually with the
+        # _inline_jit_merge_point_ attribute and similar, it is all nicely
+        # handled by @JitDriver.inline() (see next tests)
+        myjitdriver = JitDriver(greens = ['a'], reds = 'auto')
+
+        def jit_merge_point(a, b):
+            myjitdriver.jit_merge_point(a=a)
+
+        def add(a, b):
+            jit_merge_point(a, b)
+            return a+b
+        add._inline_jit_merge_point_ = jit_merge_point
+        myjitdriver.inline_jit_merge_point = True
+
+        def calc(n):
+            res = 0
+            while res < 1000:
+                res = add(n, res)
+            return res
+
+        def f():
+            return calc(1) + calc(3)
+
+        res = self.meta_interp(f, [])
+        assert res == 1000 + 1002
+        self.check_resops(int_add=4)
+
+    def test_jitdriver_inline(self):
         myjitdriver = JitDriver(greens = [], reds = 'auto')
         class MyRange(object):
             def __init__(self, n):
             def __iter__(self):
                 return self
 
-            @myjitdriver.inline_in_portal
+            def jit_merge_point(self):
+                myjitdriver.jit_merge_point()
+
+            @myjitdriver.inline(jit_merge_point)
             def next(self):
-                myjitdriver.jit_merge_point()
                 if self.cur == self.n:
                     raise StopIteration
                 self.cur += 1
                 return self.cur
 
-        def one():
+        def f(n):
             res = 0
-            for i in MyRange(10):
+            for i in MyRange(n):
                 res += i
             return res
 
-        def two():
+        expected = f(21)
+        res = self.meta_interp(f, [21])
+        assert res == expected
+        self.check_resops(int_eq=2, int_add=4)
+        self.check_trace_count(1)
+
+    def test_jitdriver_inline_twice(self):
+        myjitdriver = JitDriver(greens = [], reds = 'auto')
+
+        def jit_merge_point(a, b):
+            myjitdriver.jit_merge_point()
+
+        @myjitdriver.inline(jit_merge_point)
+        def add(a, b):
+            return a+b
+
+        def one(n):
             res = 0
-            for i in MyRange(13):
-                res += i * 2
+            while res < 1000:
+                res = add(n, res)
             return res
 
-        def f(n, m):
-            res = one() * 100
-            res += two()
+        def two(n):
+            res = 0
+            while res < 2000:
+                res = add(n, res)
             return res
-        expected = f(21, 5)
-        res = self.meta_interp(f, [21, 5])
+
+        def f(n):
+            return one(n) + two(n)
+
+        res = self.meta_interp(f, [1])
+        assert res == 3000
+        self.check_resops(int_add=4)
+        self.check_trace_count(2)
+
+    def test_jitdriver_inline_exception(self):
+        # this simulates what happens in a real case scenario: inside the next
+        # we have a call which we cannot inline (e.g. space.next in the case
+        # of W_InterpIterable), but we need to put it in a try/except block.
+        # With the first "inline_in_portal" approach, this case crashed
+        myjitdriver = JitDriver(greens = [], reds = 'auto')
+        
+        def inc(x, n):
+            if x == n:
+                raise OverflowError
+            return x+1
+        inc._dont_inline_ = True
+        
+        class MyRange(object):
+            def __init__(self, n):
+                self.cur = 0
+                self.n = n
+
+            def __iter__(self):
+                return self
+
+            def jit_merge_point(self):
+                myjitdriver.jit_merge_point()
+
+            @myjitdriver.inline(jit_merge_point)
+            def next(self):
+                try:
+                    self.cur = inc(self.cur, self.n)
+                except OverflowError:
+                    raise StopIteration
+                return self.cur
+
+        def f(n):
+            res = 0
+            for i in MyRange(n):
+                res += i
+            return res
+
+        expected = f(21)
+        res = self.meta_interp(f, [21])
         assert res == expected
-        self.check_resops(int_eq=4, int_add=8)
-        self.check_trace_count(2)
+        self.check_resops(int_eq=2, int_add=4)
+        self.check_trace_count(1)
+
 
 class TestLLWarmspot(WarmspotTests, LLJitMixin):
     CPUClass = runner.LLtypeCPU

File pypy/jit/metainterp/warmspot.py

 
     def inline_inlineable_portals(self):
         """
-        Find all the graphs which have been decorated with
-        @jitdriver.inline_in_portal and inline them in the callers, making
-        them JIT portals. Then, create a fresh copy of the jitdriver for each
-        of those new portals, because they cannot share the same one.  See
-        test_ajit::test_inline_in_portal.
+        Find all the graphs which have been decorated with @jitdriver.inline
+        and inline them in the callers, making them JIT portals. Then, create
+        a fresh copy of the jitdriver for each of those new portals, because
+        they cannot share the same one.  See
+        test_ajit::test_inline_jit_merge_point
         """
-        from pypy.translator.backendopt import inline
-        lltype_to_classdef = self.translator.rtyper.lltype_to_classdef_mapping()
-        raise_analyzer = inline.RaiseAnalyzer(self.translator)
-        callgraph = inline.inlinable_static_callers(self.translator.graphs)
+        from pypy.translator.backendopt.inline import (
+            get_funcobj, inlinable_static_callers, auto_inlining)
+
+        jmp_calls = {}
+        def get_jmp_call(graph, _inline_jit_merge_point_):
+            # there might be multiple calls to the @inlined function: the
+            # first time we see it, we remove the call to the jit_merge_point
+            # and we remember the corresponding op. Then, we create a new call
+            # to it every time we need a new one (i.e., for each callsite
+            # which becomes a new portal)
+            try:
+                op, jmp_graph = jmp_calls[graph]
+            except KeyError:
+                op, jmp_graph = fish_jmp_call(graph, _inline_jit_merge_point_)
+                jmp_calls[graph] = op, jmp_graph
+            #
+            # clone the op
+            newargs = op.args[:]
+            newresult = Variable()
+            newresult.concretetype = op.result.concretetype
+            op = SpaceOperation(op.opname, newargs, newresult)
+            return op, jmp_graph
+
+        def fish_jmp_call(graph, _inline_jit_merge_point_):
+            # graph is function which has been decorated with
+            # @jitdriver.inline, so its very first op is a call to the
+            # function which contains the actual jit_merge_point: fish it!
+            jmp_block, op_jmp_call = next(callee.iterblockops())
+            msg = ("The first operation of an _inline_jit_merge_point_ graph must be "
+                   "a direct_call to the function passed to @jitdriver.inline()")
+            assert op_jmp_call.opname == 'direct_call', msg
+            jmp_funcobj = get_funcobj(op_jmp_call.args[0].value)
+            assert jmp_funcobj._callable is _inline_jit_merge_point_, msg
+            jmp_block.operations.remove(op_jmp_call)
+            return op_jmp_call, jmp_funcobj.graph
+
+        # find all the graphs which call an @inline_in_portal function
+        callgraph = inlinable_static_callers(self.translator.graphs, store_calls=True)
+        new_callgraph = []
         new_portals = set()
-        for caller, callee in callgraph:
+        for caller, block, op_call, callee in callgraph:
             func = getattr(callee, 'func', None)
-            _inline_in_portal_ = getattr(func, '_inline_in_portal_', False)
-            if _inline_in_portal_:
-                count = inline.inline_function(self.translator, callee, caller,
-                                               lltype_to_classdef, raise_analyzer)
-                assert count > 0, ('The function has been decorated with '
-                                   '@inline_in_portal, but it is not possible '
-                                   'to inline it')
+            _inline_jit_merge_point_ = getattr(func, '_inline_jit_merge_point_', None)
+            if _inline_jit_merge_point_:
+                _inline_jit_merge_point_._always_inline_ = True
+                op_jmp_call, jmp_graph = get_jmp_call(callee, _inline_jit_merge_point_)
+                #
+                # now we move the op_jmp_call from callee to caller, just
+                # before op_call. We assume that the args passed to
+                # op_jmp_call are the very same which are received by callee
+                # (i.e., the one passed to op_call)
+                assert len(op_call.args) == len(op_jmp_call.args)
+                op_jmp_call.args[1:] = op_call.args[1:]
+                idx = block.operations.index(op_call)
+                block.operations.insert(idx, op_jmp_call)
+                #
+                # finally, we signal that we want to inline op_jmp_call into
+                # caller, so that finally the actuall call to
+                # driver.jit_merge_point will be seen there
+                new_callgraph.append((caller, jmp_graph))
                 new_portals.add(caller)
+
+        # inline them!
+        inline_threshold = 0.1 # we rely on the _always_inline_ set above
+        auto_inlining(self.translator, inline_threshold, new_callgraph)
+
+        # make a fresh copy of the JitDriver in all newly created
+        # jit_merge_points
         self.clone_inlined_jit_merge_points(new_portals)
 
     def clone_inlined_jit_merge_points(self, graphs):
         for graph, block, pos in find_jit_merge_points(graphs):
             op = block.operations[pos]
             v_driver = op.args[1]
-            new_driver = v_driver.value.clone()
+            driver = v_driver.value
+            if not driver.inline_jit_merge_point:
+                continue
+            new_driver = driver.clone()
             c_new_driver = Constant(new_driver, v_driver.concretetype)
             op.args[1] = c_new_driver
 
                         alive_v.add(op1.result)
                 greens_v = op.args[2:]
                 reds_v = alive_v - set(greens_v)
+                reds_v = [v for v in reds_v if v.concretetype is not lltype.Void]
                 reds_v = support.sort_vars(reds_v)
                 op.args.extend(reds_v)
                 if jitdriver.numreds is None:
                 maybe_compile_and_run(state.increment_threshold, *args)
             maybe_enter_jit._always_inline_ = True
         jd._maybe_enter_jit_fn = maybe_enter_jit
-
-        def maybe_enter_from_start(*args):
-            maybe_compile_and_run(state.increment_function_threshold, *args)
-        maybe_enter_from_start._always_inline_ = True
-        jd._maybe_enter_from_start_fn = maybe_enter_from_start
+        jd._maybe_compile_and_run_fn = maybe_compile_and_run
 
     def make_driverhook_graphs(self):
         from pypy.rlib.jit import BaseJitCell
         RESULT = PORTALFUNC.RESULT
         result_kind = history.getkind(RESULT)
         ts = self.cpu.ts
+        state = jd.warmstate
+        maybe_compile_and_run = jd._maybe_compile_and_run_fn
 
         def ll_portal_runner(*args):
             start = True
             while 1:
                 try:
+                    # maybe enter from the function's start.  Note that the
+                    # 'start' variable is constant-folded away because it's
+                    # the first statement in the loop.
                     if start:
-                        jd._maybe_enter_from_start_fn(*args)
+                        maybe_compile_and_run(
+                            state.increment_function_threshold, *args)
+                    #
+                    # then run the normal portal function, i.e. the
+                    # interpreter's main loop.  It might enter the jit
+                    # via maybe_enter_jit(), which typically ends with
+                    # handle_fail() being called, which raises on the
+                    # following exceptions --- catched here, because we
+                    # want to interrupt the whole interpreter loop.
                     return support.maybe_on_top_of_llinterp(rtyper,
                                                       portal_ptr)(*args)
                 except self.ContinueRunningNormally, e:

File pypy/jit/tool/pypytrace-mode.el

 (defun set-truncate-lines ()
   (setq truncate-lines t))
 
+(defun pypytrace-beginning-of-defun ()
+  (search-backward "{")
+  (beginning-of-line))
+
+(defun pypytrace-end-of-defun ()
+  (search-forward "}")
+  (end-of-line))
+
+
 ;; to generate the list of keywords:
 ;; from pypy.jit.metainterp import resoperation
 ;; print ' '.join(sorted('"%s"' % op.lower() for op in resoperation.opname.values() if not op.startswith('GUARD')))
      (4 'escape-glyph t)
      (5 'custom-variable-tag t)))
   '("\\.trace$")
-  '(set-truncate-lines)
+  '(set-truncate-lines
+    (lambda ()
+      (set (make-local-variable 'beginning-of-defun-function)
+           'pypytrace-beginning-of-defun)
+      (set (make-local-variable 'end-of-defun-function) 'pypytrace-end-of-defun))
+    )
   "A mode for pypy traces files")
 
 ;; debug helpers

File pypy/module/__builtin__/app_functional.py

File contents unchanged.

File pypy/module/_cffi_backend/ccallback.py

                                  space.wrap("expected a function ctype"))
         return ctype
 
-    def invoke(self, ll_args, ll_res):
+    def invoke(self, ll_args):
         space = self.space
         ctype = self.getfunctype()
         args_w = []
         for i, farg in enumerate(ctype.fargs):
             ll_arg = rffi.cast(rffi.CCHARP, ll_args[i])
             args_w.append(farg.convert_to_object(ll_arg))
-        fresult = ctype.ctitem
-        #
-        w_res = space.call(self.w_callable, space.newtuple(args_w))
-        #
+        return space.call(self.w_callable, space.newtuple(args_w))
+
+    def convert_result(self, ll_res, w_res):
+        fresult = self.getfunctype().ctitem
         convert_from_object_fficallback(fresult, ll_res, w_res)
 
-    def print_error(self, operr):
+    def print_error(self, operr, extra_line):
         space = self.space
         operr.write_unraisable(space, "callback ", self.w_callable,
-                               with_traceback=True)
+                               with_traceback=True, extra_line=extra_line)
 
     def write_error_return_value(self, ll_res):
         fresult = self.getfunctype().ctitem
     try:
         ec = cerrno.get_errno_container(callback.space)
         cerrno.save_errno_into(ec, e)
+        extra_line = ''
         try:
-            callback.invoke(ll_args, ll_res)
+            w_res = callback.invoke(ll_args)
+            extra_line = "Trying to convert the result back to C:\n"
+            callback.convert_result(ll_res, w_res)
         except OperationError, e:
             # got an app-level exception
-            callback.print_error(e)
+            callback.print_error(e, extra_line)
             callback.write_error_return_value(ll_res)
         #
     except Exception, e:

File pypy/module/_cffi_backend/ctypeptr.py

                 cdata = rffi.ptradd(cdata, ctitem.size)
         elif (self.ctitem.is_primitive_integer and
               self.ctitem.size == rffi.sizeof(lltype.Char)):
-            try:
-                s = space.str_w(w_ob)
-            except OperationError, e:
-                if not e.match(space, space.w_TypeError):
-                    raise
+            if not space.isinstance_w(w_ob, space.w_str):
                 raise self._convert_error("str or list or tuple", w_ob)
+            s = space.str_w(w_ob)
             n = len(s)
             if self.length >= 0 and n > self.length:
                 raise operationerrfmt(space.w_IndexError,
             if n != self.length:
                 cdata[n] = '\x00'
         elif isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar):
-            try:
-                s = space.unicode_w(w_ob)
-            except OperationError, e:
-                if not e.match(space, space.w_TypeError):
-                    raise
+            if not space.isinstance_w(w_ob, space.w_unicode):
                 raise self._convert_error("unicode or list or tuple", w_ob)
+            s = space.unicode_w(w_ob)
             n = len(s)
             if self.length >= 0 and n > self.length:
                 raise operationerrfmt(space.w_IndexError,

File pypy/module/_cffi_backend/test/_backend_test_c.py

     complete_struct_or_union(BStruct, [('a', BArray10, -1)])
     BFunc22 = new_function_type((BStruct, BStruct), BStruct, False)
     f = cast(BFunc22, _testfunc(22))
-    p1 = newp(BStructP, {'a': range(100, 110)})
-    p2 = newp(BStructP, {'a': range(1000, 1100, 10)})
+    p1 = newp(BStructP, {'a': list(range(100, 110))})
+    p2 = newp(BStructP, {'a': list(range(1000, 1100, 10))})
     res = f(p1[0], p2[0])
     for i in range(10):
         assert res.a[i] == p1.a[i] - p2.a[i]
     assert str(e.value) == "'int(*)(int)' expects 1 arguments, got 0"
 
 def test_callback_exception():
-    import io, linecache
-    def matches(str, pattern):
+    try:
+        import cStringIO
+    except ImportError:
+        import io as cStringIO    # Python 3
+    import linecache
+    def matches(istr, ipattern):
+        str, pattern = istr, ipattern
         while '$' in pattern:
             i = pattern.index('$')
             assert str[:i] == pattern[:i]
     def check_value(x):
         if x == 10000:
             raise ValueError(42)
-    def cb1(x):
+    def Zcb1(x):
         check_value(x)
         return x * 3
     BShort = new_primitive_type("short")
     BFunc = new_function_type((BShort,), BShort, False)
-    f = callback(BFunc, cb1, -42)
+    f = callback(BFunc, Zcb1, -42)
     orig_stderr = sys.stderr
     orig_getline = linecache.getline
     try:
         assert f(100) == 300
         assert sys.stderr.getvalue() == ''
         assert f(10000) == -42
-        assert 1 or matches(sys.stderr.getvalue(), """\
-From callback <function cb1 at 0x$>:
+        assert matches(sys.stderr.getvalue(), """\
+From callback <function$Zcb1 at 0x$>:
 Traceback (most recent call last):
-  File "$", line $, in cb1
+  File "$", line $, in Zcb1
     $
   File "$", line $, in check_value
     $
         bigvalue = 20000
         assert f(bigvalue) == -42
         assert matches(sys.stderr.getvalue(), """\
-From callback <function cb1 at 0x$>:
+From callback <function$Zcb1 at 0x$>:
+Trying to convert the result back to C:
 OverflowError: integer 60000 does not fit 'short'
 """)
     finally:
                      'uint32_t', 'int64_t', 'uint64_t', 'intptr_t',
                      'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t']:
         new_primitive_type(typename)    # works
+
+def test_cannot_convert_unicode_to_charp():
+    BCharP = new_pointer_type(new_primitive_type("char"))
+    BCharArray = new_array_type(BCharP, None)
+    py.test.raises(TypeError, newp, BCharArray, u+'foobar')

File pypy/module/_cffi_backend/test/test_file.py

     dest = py.path.local(__file__).join('..', '_backend_test_c.py').read()
     #
     source = source[source.index('# _____________'):]
-    assert source == dest
+    if source.strip() != dest.strip():
+        raise AssertionError(
+            "Update test/_backend_test_c.py by copying it from "
+            "https://bitbucket.org/cffi/cffi/raw/default/c/test_c.py "
+            "and killing the import lines at the start")

File pypy/module/_ffi/interp_funcptr.py

 #
 from pypy.rlib import jit
 from pypy.rlib import libffi
-from pypy.rlib.clibffi import get_libc_name, StackCheckError
+from pypy.rlib.clibffi import get_libc_name, StackCheckError, LibFFIError
 from pypy.rlib.rdynload import DLOpenError
 from pypy.rlib.rarithmetic import intmask, r_uint
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter
+from pypy.module._rawffi.interp_rawffi import got_libffi_error
 
 import os
 if os.name == 'nt':
                 raise operationerrfmt(
                     space.w_AttributeError,
                     "No symbol %s found in library %s", name, CDLL.name)
+            except LibFFIError:
+                raise got_libffi_error(space)
 
             return W_FuncPtr(func, argtypes_w, w_restype)
         elif space.isinstance_w(w_name, space.w_int):
                 raise operationerrfmt(
                     space.w_AttributeError,
                     "No ordinal %d found in library %s", ordinal, CDLL.name)
+            except LibFFIError:
+                raise got_libffi_error(space)
+
             return W_FuncPtr(func, argtypes_w, w_restype)
         else:
             raise OperationError(space.w_TypeError, space.wrap(
             raise operationerrfmt(
                 space.w_AttributeError,
                 "No symbol %s found in library %s", name, CDLL.name)
+        except LibFFIError:
+            raise got_libffi_error(space)
 
         return W_FuncPtr(func, argtypes_w, w_restype)
 
                                                                w_argtypes,
                                                                w_restype)
     addr = rffi.cast(rffi.VOIDP, addr)
-    func = libffi.Func(name, argtypes, restype, addr, flags)
     try:
+        func = libffi.Func(name, argtypes, restype, addr, flags)
         return W_FuncPtr(func, argtypes_w, w_restype)
-    except OSError:
-        raise OperationError(space.w_SystemError,
-                         space.wrap("internal error building the Func object"))
+    except LibFFIError:
+        raise got_libffi_error(space)
 
 
 W_FuncPtr.typedef = TypeDef(

File pypy/module/_lsprof/interp_lsprof.py

             if subentry is not None:
                 subentry._stop(tt, it)
 
+
 @jit.elidable_promote()
+def create_spec_for_method(space, w_function, w_type):
+    w_function = w_function
+    if isinstance(w_function, Function):
+        name = w_function.name
+    else:
+        name = '?'
+    # try to get the real class that defines the method,
+    # which is a superclass of the class of the instance
+    from pypy.objspace.std.typeobject import W_TypeObject   # xxx
+    class_name = w_type.getname(space)    # if the rest doesn't work
+    if isinstance(w_type, W_TypeObject) and name != '?':
+        w_realclass, _ = space.lookup_in_type_where(w_type, name)
+        if isinstance(w_realclass, W_TypeObject):
+            class_name = w_realclass.get_module_type_name()
+    return "{method '%s' of '%s' objects}" % (name, class_name)
+
+
+@jit.elidable_promote()
+def create_spec_for_function(space, w_func):
+    if w_func.w_module is None:
+        module = ''
+    else:
+        module = space.str_w(w_func.w_module)
+        if module == '__builtin__':
+            module = ''
+        else:
+            module += '.'
+    return '{%s%s}' % (module, w_func.name)
+
+
+@jit.elidable_promote()
+def create_spec_for_object(space, w_obj):
+    class_name = space.type(w_obj).getname(space)
+    return "{'%s' object}" % (class_name,)
+
+
 def create_spec(space, w_arg):
     if isinstance(w_arg, Method):
-        w_function = w_arg.w_function
-        if isinstance(w_function, Function):
-            name = w_function.name
-        else:
-            name = '?'
-        # try to get the real class that defines the method,
-        # which is a superclass of the class of the instance
-        from pypy.objspace.std.typeobject import W_TypeObject   # xxx
         w_type = space.type(w_arg.w_instance)
-        class_name = w_type.getname(space)    # if the rest doesn't work
-        if isinstance(w_type, W_TypeObject) and name != '?':
-            w_realclass, _ = space.lookup_in_type_where(w_type, name)
-            if isinstance(w_realclass, W_TypeObject):
-                class_name = w_realclass.get_module_type_name()
-        return "{method '%s' of '%s' objects}" % (name, class_name)
+        return create_spec_for_method(space, w_arg.w_function, w_type)
     elif isinstance(w_arg, Function):
-        if w_arg.w_module is None:
-            module = ''
-        else:
-            module = space.str_w(w_arg.w_module)
-            if module == '__builtin__':
-                module = ''
-            else:
-                module += '.'
-        return '{%s%s}' % (module, w_arg.name)
+        return create_spec_for_function(space, w_arg)
     else:
-        class_name = space.type(w_arg).getname(space)
-        return "{'%s' object}" % (class_name,)
+        return create_spec_for_object(space, w_arg)
+
 
 def lsprof_call(space, w_self, frame, event, w_arg):
     assert isinstance(w_self, W_Profiler)

File pypy/module/_rawffi/callback.py

 from pypy.module._rawffi.array import push_elem
 from pypy.module._rawffi.structure import W_Structure
 from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp,
-     unwrap_value, unpack_argshapes)
+     unwrap_value, unpack_argshapes, got_libffi_error)
 from pypy.rlib.clibffi import USERDATA_P, CallbackFuncPtr, FUNCFLAG_CDECL
-from pypy.rlib.clibffi import ffi_type_void
+from pypy.rlib.clibffi import ffi_type_void, LibFFIError
 from pypy.rlib import rweakref
 from pypy.module._rawffi.tracker import tracker
 from pypy.interpreter.error import OperationError
             self.result = None
             ffiresult = ffi_type_void
         self.number = global_counter.add(self)
-        self.ll_callback = CallbackFuncPtr(ffiargs, ffiresult,
-                                           callback, self.number, flags)
+        try:
+            self.ll_callback = CallbackFuncPtr(ffiargs, ffiresult,
+                                               callback, self.number, flags)
+        except LibFFIError:
+            raise got_libffi_error(space)
         self.ll_buffer = rffi.cast(rffi.VOIDP, self.ll_callback.ll_closure)
         if tracker.DO_TRACING:
             addr = rffi.cast(lltype.Signed, self.ll_callback.ll_closure)

File pypy/module/_rawffi/interp_rawffi.py

     return [unpack_simple_shape(space, w_arg)
             for w_arg in space.unpackiterable(w_argtypes)]
 
+def got_libffi_error(space):
+    raise OperationError(space.w_SystemError,
+                         space.wrap("not supported by libffi"))
+
+
 class W_CDLL(Wrappable):
     def __init__(self, space, name, cdll):
         self.cdll = cdll
             except KeyError:
                 raise operationerrfmt(space.w_AttributeError,
                     "No symbol %s found in library %s", name, self.name)
+            except LibFFIError:
+                raise got_libffi_error(space)
 
         elif (_MS_WINDOWS and
               space.is_true(space.isinstance(w_name, space.w_int))):
             except KeyError:
                 raise operationerrfmt(space.w_AttributeError,
                     "No symbol %d found in library %s", ordinal, self.name)
+            except LibFFIError:
+                raise got_libffi_error(space)
         else:
             raise OperationError(space.w_TypeError, space.wrap(
                 "function name must be string or integer"))
     resshape = unpack_resshape(space, w_res)
     ffi_args = [shape.get_basic_ffi_type() for shape in argshapes]
     ffi_res = resshape.get_basic_ffi_type()
-    ptr = RawFuncPtr('???', ffi_args, ffi_res, rffi.cast(rffi.VOIDP, addr),
-                     flags)
+    try:
+        ptr = RawFuncPtr('???', ffi_args, ffi_res, rffi.cast(rffi.VOIDP, addr),
+                         flags)
+    except LibFFIError:
+        raise got_libffi_error(space)
     return space.wrap(W_FuncPtr(space, ptr, argshapes, resshape))
 
 W_FuncPtr.typedef = TypeDef(

File pypy/module/_sre/interp_sre.py

 from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
 from pypy.interpreter.error import OperationError
 from pypy.rlib.rarithmetic import intmask
-from pypy.tool.pairtype import extendabletype
 from pypy.rlib import jit
 
 # ____________________________________________________________
 from pypy.rlib.rsre import rsre_core
 from pypy.rlib.rsre.rsre_char import MAGIC, CODESIZE, getlower, set_unicode_db
 
+
 @unwrap_spec(char_ord=int, flags=int)
 def w_getlower(space, char_ord, flags):
     return space.wrap(getlower(char_ord, flags))
 
+
 def w_getcodesize(space):
     return space.wrap(CODESIZE)
 
 
 # ____________________________________________________________
 #
-# Additional methods on the classes XxxMatchContext
 
-class __extend__(rsre_core.AbstractMatchContext):
-    __metaclass__ = extendabletype
-    def _w_slice(self, space, start, end):
-        raise NotImplementedError
-    def _w_string(self, space):
-        raise NotImplementedError
-
-class __extend__(rsre_core.StrMatchContext):
-    __metaclass__ = extendabletype
-    def _w_slice(self, space, start, end):
-        return space.wrapbytes(self._string[start:end])
-    def _w_string(self, space):
-        return space.wrapbytes(self._string)
-
-class __extend__(rsre_core.UnicodeMatchContext):
-    __metaclass__ = extendabletype
-    def _w_slice(self, space, start, end):
-        return space.wrap(self._unicodestr[start:end])
-    def _w_string(self, space):
-        return space.wrap(self._unicodestr)
 
 def slice_w(space, ctx, start, end, w_default):
     if 0 <= start <= end:
-        return ctx._w_slice(space, start, end)
+        if isinstance(ctx, rsre_core.StrMatchContext):
+            return space.wrapbytes(ctx._string[start:end])
+        elif isinstance(ctx, rsre_core.UnicodeMatchContext):
+            return space.wrap(ctx._unicodestr[start:end])
+        else:
+            # unreachable
+            raise SystemError
     return w_default
 
 def do_flatten_marks(ctx, num_groups):
         return space.newtuple(result_w)
 
     def fget_string(self, space):
-        return self.ctx._w_string(space)
+        ctx = self.ctx
+        if isinstance(ctx, rsre_core.StrMatchContext):
+            return space.wrapbytes(ctx._string)
+        elif isinstance(ctx, rsre_core.UnicodeMatchContext):
+            return space.wrap(ctx._unicodestr)
+        else:
+            raise SystemError
 
 
 W_SRE_Match.typedef = TypeDef(

File pypy/module/cpyext/api.py

                         Py_DecRef(space, arg)
             unwrapper.func = func
             unwrapper.api_func = api_function
-            unwrapper._always_inline_ = True
+            unwrapper._always_inline_ = 'try'
             return unwrapper
 
         unwrapper_catch = make_unwrapper(True)
                 pypy_debug_catch_fatal_exception()
         rffi.stackcounter.stacks_counter -= 1
         return retval
-    callable._always_inline_ = True
+    callable._always_inline_ = 'try'
     wrapper.__name__ = "wrapper for %r" % (callable, )
     return wrapper
 

File pypy/module/cpyext/include/patchlevel.h

 #define PY_VERSION		"3.2.2"
 
 /* PyPy version as a string */
-#define PYPY_VERSION "1.9.1"
+#define PYPY_VERSION "2.0.0-beta1"
 
 /* Subversion Revision number of this file (not of the repository).
  * Empty since Mercurial migration. */

File pypy/module/cpyext/test/test_version.py

         """
         module = self.import_module(name='foo', init=init)
         assert module.py_version == sys.version[:5]
-        assert module.pypy_version == '%d.%d.%d' % sys.pypy_version_info[:3]
+        v = sys.pypy_version_info
+        s = '%d.%d.%d' % (v[0], v[1], v[2])
+        if v.releaselevel != 'final':
+            s += '-%s%d' % (v[3], v[4])
+        assert module.pypy_version == s

File pypy/module/posix/__init__.py

         interpleveldefs['chown'] = 'interp_posix.chown'
     if hasattr(os, 'lchown'):
         interpleveldefs['lchown'] = 'interp_posix.lchown'
+    if hasattr(os, 'fchown'):
+        interpleveldefs['fchown'] = 'interp_posix.fchown'
+    if hasattr(os, 'fchmod'):
+        interpleveldefs['fchmod'] = 'interp_posix.fchmod'
     if hasattr(os, 'ftruncate'):
         interpleveldefs['ftruncate'] = 'interp_posix.ftruncate'
     if hasattr(os, 'fsync'):

File pypy/module/posix/interp_posix.py

     except OSError, e:
         raise wrap_oserror2(space, e, w_path)
 
+@unwrap_spec(mode=c_int)
+def fchmod(space, w_fd, mode):
+    """Change the access permissions of the file given by file
+descriptor fd."""
+    fd = space.c_filedescriptor_w(w_fd)
+    try:
+        os.fchmod(fd, mode)
+    except OSError, e:
+        raise wrap_oserror(space, e)
+
 def rename(space, w_old, w_new):
     "Rename a file or directory."
     try:
 
 @unwrap_spec(path='str0', uid=c_uid_t, gid=c_gid_t)
 def chown(space, path, uid, gid):
+    """Change the owner and group id of path to the numeric uid and gid."""
     check_uid_range(space, uid)
     check_uid_range(space, gid)
     try:
         os.chown(path, uid, gid)
     except OSError, e:
         raise wrap_oserror(space, e, path)
-    return space.w_None
 
 @unwrap_spec(path='str0', uid=c_uid_t, gid=c_gid_t)
 def lchown(space, path, uid, gid):
+    """Change the owner and group id of path to the numeric uid and gid.
+This function will not follow symbolic links."""
     check_uid_range(space, uid)
     check_uid_range(space, gid)
     try:
         os.lchown(path, uid, gid)
     except OSError, e:
         raise wrap_oserror(space, e, path)
-    return space.w_None
+
+@unwrap_spec(uid=c_uid_t, gid=c_gid_t)
+def fchown(space, w_fd, uid, gid):
+    """Change the owner and group id of the file given by file descriptor
+fd to the numeric uid and gid."""
+    fd = space.c_filedescriptor_w(w_fd)
+    check_uid_range(space, uid)
+    check_uid_range(space, gid)
+    try:
+        os.fchown(fd, uid, gid)
+    except OSError, e:
+        raise wrap_oserror(space, e)
 
 def getloadavg(space):
     try:

File pypy/module/posix/test/test_posix2.py

             os.symlink('foobar', self.path)
             os.lchown(self.path, os.getuid(), os.getgid())
 
+    if hasattr(os, 'fchown'):
+        def test_fchown(self):
+            os = self.posix
+            f = open(self.path, "w")
+            os.fchown(f.fileno(), os.getuid(), os.getgid())
+            f.close()
+
+    if hasattr(os, 'chmod'):
+        def test_chmod(self):
+            os = self.posix
+            os.unlink(self.path)
+            raises(OSError, os.chmod, self.path, 0600)
+            f = open(self.path, "w")
+            f.write("this is a test")
+            f.close()
+            os.chmod(self.path, 0200)
+            assert (os.stat(self.path).st_mode & 0777) == 0200
+
+    if hasattr(os, 'fchmod'):
+        def test_fchmod(self):
+            os = self.posix
+            f = open(self.path, "w")
+            os.fchmod(f.fileno(), 0200)
+            assert (os.fstat(f.fileno()).st_mode & 0777) == 0200
+            f.close()
+            assert (os.stat(self.path).st_mode & 0777) == 0200
+
     if hasattr(os, 'mkfifo'):
         def test_mkfifo(self):
             os = self.posix

File pypy/module/pypyjit/interp_resop.py

 
     The hook will be called as in: hook(jitdriver_name, greenkey, reason)
 
-    Where reason is the reason for abort, see documentation for set_compile_hook
-    for descriptions of other arguments.
+    Reason is a string, the meaning of other arguments is the same
+    as attributes on JitLoopInfo object
     """
     cache = space.fromcache(Cache)
     cache.w_abort_hook = w_hook
         return space.wrap('<JitLoopInfo %s, %d operations, starting at <%s>>' %
                           (self.jd_name, lgt, code_repr))
 
+    def descr_get_bridge_no(self, space):
+        if space.is_none(self.w_green_key):
+            return space.wrap(self.bridge_no)
+        raise OperationError(space.w_TypeError, space.wrap("not a bridge"))
+
+
 @unwrap_spec(loopno=int, asmaddr=int, asmlen=int, loop_no=int,
              type=str, jd_name=str, bridge_no=int)
 def descr_new_jit_loop_info(space, w_subtype, w_greenkey, w_ops, loopno,
-                            asmaddr, asmlen, loop_no, type, jd_name, bridge_no):
+                            asmaddr, asmlen, loop_no, type, jd_name,
+                            bridge_no=-1):
     w_info = space.allocate_instance(W_JitLoopInfo, w_subtype)
     w_info.w_green_key = w_greenkey
     w_info.w_ops = w_ops
                                        "List of operations in this loop."),
     loop_no = interp_attrproperty('loop_no', cls=W_JitLoopInfo, doc=
                                   "Loop cardinal number"),
+    bridge_no = GetSetProperty(W_JitLoopInfo.descr_get_bridge_no,
+                               doc="bridge number (if a bridge)"),
+    type = interp_attrproperty('type', cls=W_JitLoopInfo,
+                               doc="Loop type"),
     __repr__ = interp2app(W_JitLoopInfo.descr_repr),
 )
 W_JitLoopInfo.acceptable_as_base_class = False
     ll_times = jit_hooks.stats_get_loop_run_times(None)
     w_times = space.newdict()
     for i in range(len(ll_times)):
-        space.setitem(w_times, space.wrap(ll_times[i].number),
+        w_key = space.newtuple([space.wrap(ll_times[i].type),
+                                space.wrap(ll_times[i].number)])
+        space.setitem(w_times, w_key,
                       space.wrap(ll_times[i].counter))
     w_counters = space.newdict()
     for i, counter_name in enumerate(Counters.counter_names):

File pypy/module/pypyjit/test/test_jit_hook.py

         assert info.greenkey[1] == 0
         assert info.greenkey[2] == False
         assert info.loop_no == 0
+        assert info.type == 'loop'
+        raises(TypeError, 'info.bridge_no')
+        assert info.key == ('loop', 0)
         assert len(info.operations) == 4
         int_add = info.operations[0]
         dmp = info.operations[1]

File pypy/module/select/interp_select.py

         if _c.FD_ISSET(fd, ll_list):
             reslist_w.append(list_w[i])
 
+def _call_select(space, iwtd_w, owtd_w, ewtd_w,
+                 ll_inl, ll_outl, ll_errl, ll_timeval):
+    fdlistin  = None
+    fdlistout = None
+    fdlisterr = None
+    nfds = -1
+    if ll_inl:
+        fdlistin, nfds = _build_fd_set(space, iwtd_w, ll_inl, nfds)
+    if ll_outl:
+        fdlistout, nfds = _build_fd_set(space, owtd_w, ll_outl, nfds)
+    if ll_errl:
+        fdlisterr, nfds = _build_fd_set(space, ewtd_w, ll_errl, nfds)
+
+    res = _c.select(nfds + 1, ll_inl, ll_outl, ll_errl, ll_timeval)
+
+    if res < 0:
+        errno = _c.geterrno()
+        msg = _c.socket_strerror_str(errno)
+        w_errortype = space.fromcache(Cache).w_error
+        raise OperationError(w_errortype, space.newtuple([
+            space.wrap(errno), space.wrap(msg)]))
+
+    resin_w = []
+    resout_w = []
+    reserr_w = []
+    if res > 0:
+        if fdlistin is not None:
+            _unbuild_fd_set(space, iwtd_w, fdlistin,  ll_inl,  resin_w)
+        if fdlistout is not None:
+            _unbuild_fd_set(space, owtd_w, fdlistout, ll_outl, resout_w)
+        if fdlisterr is not None:
+            _unbuild_fd_set(space, ewtd_w, fdlisterr, ll_errl, reserr_w)
+    return space.newtuple([space.newlist(resin_w),
+                           space.newlist(resout_w),
+                           space.newlist(reserr_w)])
 
 @unwrap_spec(w_timeout = WrappedDefault(None))
 def select(space, w_iwtd, w_owtd, w_ewtd, w_timeout):
     owtd_w = space.listview(w_owtd)
     ewtd_w = space.listview(w_ewtd)
 
+    if space.is_w(w_timeout, space.w_None):
+        timeout = -1.0
+    else:
+        timeout = space.float_w(w_timeout)
+
     ll_inl  = lltype.nullptr(_c.fd_set.TO)
     ll_outl = lltype.nullptr(_c.fd_set.TO)
     ll_errl = lltype.nullptr(_c.fd_set.TO)
     ll_timeval = lltype.nullptr(_c.timeval)