Commits

Philip Jenvey committed 35cce2f Merge

merge default

Comments (0)

Files changed (99)

lib-python/2.7/ctypes/test/test_python_api.py

         del pyobj
         self.assertEqual(grc(s), ref)
 
+    @xfail
     def test_PyOS_snprintf(self):
         PyOS_snprintf = pythonapi.PyOS_snprintf
         PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p

lib_pypy/_ctypes/function.py

                 raise ValueError(
                     "native COM method call without 'this' parameter"
                     )
-            thisarg = cast(args[0], POINTER(POINTER(c_void_p)))
-            keepalives, newargs, argtypes, outargs = self._convert_args(argtypes,
-                                                                        args[1:], kwargs)
-            newargs.insert(0, args[0].value)
+            thisvalue = args.pop(0)
+            thisarg = cast(thisvalue, POINTER(POINTER(c_void_p)))
+            keepalives, newargs, argtypes, outargs, errcheckargs = (
+                        self._convert_args(argtypes, args, kwargs))
+            args.insert(0, thisvalue)
+            newargs.insert(0, thisvalue.value)
             argtypes.insert(0, c_void_p)
         else:
             thisarg = None
-            keepalives, newargs, argtypes, outargs = self._convert_args(argtypes,
-                                                                        args, kwargs)
+            keepalives, newargs, argtypes, outargs, errcheckargs = (
+                        self._convert_args(argtypes, args, kwargs))
 
         funcptr = self._getfuncptr(argtypes, self._restype_, thisarg)
         result = self._call_funcptr(funcptr, *newargs)
-        result = self._do_errcheck(result, args)
+        result, forced = self._do_errcheck(result, errcheckargs)
 
-        if not outargs:
+        if not outargs or forced:
             return result
 
         from ctypes import c_void_p
                 set_last_error(tmp)
         #
         try:
-            return self._build_result(self._restype_, result, newargs)
+            return self._build_result(self._restype_, result)
         finally:
             funcptr.free_temp_buffers()
 
     def _do_errcheck(self, result, args):
         # The 'errcheck' protocol
         if self._errcheck_:
-            v = self._errcheck_(result, self, args)
+            v = self._errcheck_(result, self, tuple(args))
             # If the errcheck funtion failed, let it throw
             # If the errcheck function returned newargs unchanged,
             # continue normal processing.
             # If the errcheck function returned something else,
             # use that as result.
             if v is not args:
-                return v
-        return result
+                return v, True
+        return result, False
 
     def _getfuncptr_fromaddress(self, argtypes, restype):
         address = self._get_address()
         newargtypes = []
         total = len(args)
         paramflags = self._paramflags
-        inargs_idx = 0
 
         if not paramflags and total < len(argtypes):
             raise TypeError("not enough arguments")
 
-        for i, argtype in enumerate(argtypes):
-            flag = 0
-            name = None
-            defval = marker
-            if paramflags:
+        if paramflags:
+            errcheckargs = []
+            inargs_idx = 0
+            for i, argtype in enumerate(argtypes):
+                flag = 0
+                defval = marker
                 paramflag = paramflags[i]
                 paramlen = len(paramflag)
                 name = None
                     val = defval
                     if val is marker:
                         val = 0
+                    errcheckargs.append(val)
                     keepalive, newarg, newargtype = self._conv_param(argtype, val)
                     keepalives.append(keepalive)
                     newargs.append(newarg)
                         raise TypeError("required argument '%s' missing" % name)
                     else:
                         raise TypeError("not enough arguments")
+                    errcheckargs.append(val)
                     keepalive, newarg, newargtype = self._conv_param(argtype, val)
                     keepalives.append(keepalive)
                     newargs.append(newarg)
                     newargtypes.append(newargtype)
                 elif flag == PARAMFLAG_FOUT:
                     if defval is not marker:
-                        outargs.append(defval)
+                        val = defval
                         keepalive, newarg, newargtype = self._conv_param(argtype, defval)
                     else:
                         import ctypes
                         val = argtype._type_()
-                        outargs.append(val)
                         keepalive = None
                         newarg = ctypes.byref(val)
                         newargtype = type(newarg)
+                    errcheckargs.append(val)
+                    outargs.append(val)
                     keepalives.append(keepalive)
                     newargs.append(newarg)
                     newargtypes.append(newargtype)
                 else:
                     raise ValueError("paramflag %d not yet implemented" % flag)
-            else:
+        else:
+            errcheckargs = args
+            for i, argtype in enumerate(argtypes):
                 try:
                     keepalive, newarg, newargtype = self._conv_param(argtype, args[i])
                 except (UnicodeError, TypeError, ValueError) as e:
                 keepalives.append(keepalive)
                 newargs.append(newarg)
                 newargtypes.append(newargtype)
-                inargs_idx += 1
 
         if len(newargs) < len(args):
             extra = args[len(newargs):]
                 keepalives.append(keepalive)
                 newargs.append(newarg)
                 newargtypes.append(newargtype)
-        return keepalives, newargs, newargtypes, outargs
+        return keepalives, newargs, newargtypes, outargs, errcheckargs
 
     @staticmethod
     def _is_primitive(argtype):
         retval = restype._CData_retval(buf)
         return retval
 
-    def _build_result(self, restype, result, argsandobjs):
+    def _build_result(self, restype, result):
         """Build the function result:
            If there is no OUT parameter, return the actual function result
            If there is one OUT parameter, return it
         # i.e. an array of ints. Now it takes a result, which is already a
         # python object. All places that do "resbuffer[0]" should check that
         # result is actually an int and just use it.
-        #
-        # Also, argsandobjs used to be "args" in __call__, now it's "newargs"
-        # (i.e., the already unwrapped objects). It's used only when we have a
-        # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a
-        # failing test
 
         retval = None
 
             funcptr = self._getfuncptr(argtypes, restype, thisarg)
             try:
                 result = self._call_funcptr(funcptr, *args)
-                result = self._do_errcheck(result, args)
+                result, _ = self._do_errcheck(result, args)
             except (TypeError, ArgumentError, UnicodeDecodeError):
                 assert self._slowpath_allowed
                 return CFuncPtr.__call__(self, *args)

lib_pypy/_pypy_testcapi.py

     # set link options
     output_filename = modulename + _get_c_extension_suffix()
     if sys.platform == 'win32':
-        # XXX libpypy-c.lib is currently not installed automatically
-        library = os.path.join(thisdir, '..', 'include', 'libpypy-c')
+        # XXX pyconfig.h uses a pragma to link to the import library,
+        #     which is currently python3.lib
+        library = os.path.join(thisdir, '..', 'include', 'python3')
         if not os.path.exists(library + '.lib'):
-            #For a nightly build
-            library = os.path.join(thisdir, '..', 'include', 'python3')
-        if not os.path.exists(library + '.lib'):
-            # For a local translation
-            library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c')
+            # For a local translation or nightly build
+            library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python3')
+        assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library
         libraries = [library, 'oleaut32']
         extra_ldargs = ['/MANIFEST',  # needed for VC10
                         '/EXPORT:PyInit_' + modulename]

lib_pypy/cffi/__init__.py

 from .api import FFI, CDefError, FFIError
 from .ffiplatform import VerificationError, VerificationMissing
 
-__version__ = "0.8"
-__version_info__ = (0, 8)
+__version__ = "0.8.1"
+__version_info__ = (0, 8, 1)
 # The short X.Y version.
 version = '2.2'
 # The full version, including alpha/beta/rc tags.
-release = '2.2.0'
+release = '2.2.1'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.

pypy/doc/cpython_differences.rst

 
     _winreg
 
-* Supported by being rewritten in pure Python (possibly using ``ctypes``):
+* Supported by being rewritten in pure Python (possibly using ``cffi``):
   see the `lib_pypy/`_ directory.  Examples of modules that we
   support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``...
   Note that some modules are both in there and in the list above;
   type and vice versa. For builtin types, a dictionary will be returned that
   cannot be changed (but still looks and behaves like a normal dictionary).
 
-
 .. include:: _ref.txt

pypy/doc/ctypes-implementation.rst

 Here is a list of the limitations and missing features of the
 current implementation:
 
-* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons.
+* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer
+  of PyPy, at your own risks and without doing anything sensible about
+  the GIL.  Since PyPy 2.3, these functions are also named with an extra
+  "Py", for example ``PyPyInt_FromLong()``.  Basically, don't use this,
+  but it might more or less work in simple cases if you do.  (Obviously,
+  assuming the PyObject pointers you get have any particular fields in
+  any particular order is just going to crash.)
 
 * We copy Python strings instead of having pointers to raw buffers
 

pypy/doc/extending.rst

 This document tries to explain how to interface the PyPy python interpreter
 with any external library.
 
-Note: We try to describe state-of-the art, but it
-might fade out of date as this is the front on which things are changing
-in pypy rapidly.
+Right now, there are the following possibilities of providing
+third-party modules for the PyPy python interpreter (in order of
+usefulness):
 
-Possibilities
-=============
+* Write them in pure Python and use CFFI_.
 
-Right now, there are three possibilities of providing third-party modules
-for the PyPy python interpreter (in order of usefulness):
+* Write them in pure Python and use ctypes_.
 
-* Write them in pure python and use ctypes, see ctypes_
-  section
+* Write them in C++ and bind them through Reflex_.
 
-* Write them in pure python and use direct libffi low-level bindings, See
-  \_ffi_ module description.
+* Write them in as `RPython mixed modules`_.
 
-* Write them in RPython as mixedmodule_, using *rffi* as bindings.
 
-* Write them in C++ and bind them through Reflex_
+CFFI
+====
 
-.. _ctypes: #CTypes
-.. _\_ffi: #LibFFI
-.. _mixedmodule: #Mixed Modules
+CFFI__ is the recommended way.  It is a way to write pure Python code
+that accesses C libraries.  The idea is to support either ABI- or
+API-level access to C --- so that you can sanely access C libraries
+without depending on details like the exact field order in the C
+structures or the numerical value of all the constants.  It works on
+both CPython (as a separate ``pip install cffi``) and on PyPy, where it
+is included by default.
+
+PyPy's JIT does a quite reasonable job on the Python code that call C
+functions or manipulate C pointers with CFFI.  (As of PyPy 2.2.1, it
+could still be improved, but is already good.)
+
+See the documentation here__.
+
+.. __: http://cffi.readthedocs.org/
+.. __: http://cffi.readthedocs.org/
+
 
 CTypes
 ======
 
-The ctypes module in PyPy is ready to use.
-It's goal is to be as-compatible-as-possible with the
-`CPython ctypes`_ version. Right now it's able to support large examples,
-such as pyglet. PyPy is planning to have a 100% compatible ctypes
-implementation, without the CPython C-level API bindings (so it is very
-unlikely that direct object-manipulation trickery through this API will work).
+The goal of the ctypes module of PyPy is to be as compatible as possible
+with the `CPython ctypes`_ version.  It works for large examples, such
+as pyglet.  PyPy's implementation is not strictly 100% compatible with
+CPython, but close enough for most cases.
 
-We also provide a `ctypes-configure`_ for overcoming the platform dependencies,
-not relying on the ctypes codegen. This tool works by querying gcc about
-platform-dependent details (compiling small snippets of C code and running
-them), so it'll benefit not pypy-related ctypes-based modules as well.
+We also used to provide ``ctypes-configure`` for some API-level access.
+This is now viewed as a precursor of CFFI, which you should use instead.
+More (but older) information is available here__.
+Also, ctypes' performance is not as good as CFFI's.
 
-ctypes call are optimized by the JIT and the resulting machine code contains a
-direct call to the target C function.  However, due to the very dynamic nature
-of ctypes, some overhead over a bare C call is still present, in particular to
-check/convert the types of the parameters.  Moreover, even if most calls are
-optimized, some cannot and thus need to follow the slow path, not optimized by
-the JIT.
+.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html
+.. __: ctypes-implementation.html
 
-.. _`ctypes-configure`: ctypes-implementation.html#ctypes-configure
-.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html
+PyPy implements ctypes as pure Python code around two built-in modules
+called ``_ffi`` and ``_rawffi``, which give a very low-level binding to
+the C library libffi_.  Nowadays it is not recommended to use directly
+these two modules.
 
-Pros
-----
+.. _libffi: http://sourceware.org/libffi/
 
-Stable, CPython-compatible API.  Most calls are fast, optimized by JIT.
-
-Cons
-----
-
-Problems with platform-dependency (although we partially solve
-those). Although the JIT optimizes ctypes calls, some overhead is still
-present.  The slow-path is very slow.
-
-
-LibFFI
-======
-
-Mostly in order to be able to write a ctypes module, we developed a very
-low-level libffi bindings called ``_ffi``. (libffi is a C-level library for dynamic calling,
-which is used by CPython ctypes). This library provides stable and usable API,
-although it's API is a very low-level one. It does not contain any
-magic.  It is also optimized by the JIT, but has much less overhead than ctypes.
-
-Pros
-----
-
-It Works. Probably more suitable for a delicate code where ctypes magic goes
-in a way.  All calls are optimized by the JIT, there is no slow path as in
-ctypes.
-
-Cons
-----
-
-It combines disadvantages of using ctypes with disadvantages of using mixed
-modules. CPython-incompatible API, very rough and low-level.
-
-Mixed Modules
-=============
-
-This is the most advanced and powerful way of writing extension modules.
-It has some serious disadvantages:
-
-* a mixed module needs to be written in RPython, which is far more
-  complicated than Python (XXX link)
-
-* due to lack of separate compilation (as of July 2011), each
-  compilation-check requires to recompile whole PyPy python interpreter,
-  which takes 0.5-1h. We plan to solve this at some point in near future.
-
-* although rpython is a garbage-collected language, the border between
-  C and RPython needs to be managed by hand (each object that goes into the
-  C level must be explicitly freed).
-
-Some documentation is available `here`_
-
-.. _`here`: rffi.html
-
-XXX we should provide detailed docs about lltype and rffi, especially if we
-    want people to follow that way.
 
 Reflex
 ======
 
-This method is still experimental and is being exercised on a branch,
-`reflex-support`_, which adds the `cppyy`_ module.
+This method is still experimental.  It adds the `cppyy`_ module.
 The method works by using the `Reflex package`_ to provide reflection
 information of the C++ code, which is then used to automatically generate
 bindings at runtime.
 to work around it in python or with a C++ helper function.
 Although Reflex works on various platforms, the bindings with PyPy have only
 been tested on Linux.
+
+
+RPython Mixed Modules
+=====================
+
+This is the internal way to write built-in extension modules in PyPy.
+It cannot be used by any 3rd-party module: the extension modules are
+*built-in*, not independently loadable DLLs.
+
+This is reserved for special cases: it gives direct access to e.g. the
+details of the JIT, allowing us to tweak its interaction with user code.
+This is how the numpy module is being developed.

pypy/doc/extradoc.rst

 
 .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib
 .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf
-.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf
-.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf
+.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf
+.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf
 .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf
 .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf
 .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf
 .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf
-.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`:  http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf
+.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf
 .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07
 .. _`EU Reports`: index-report.html
 .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf
 So the position of the core PyPy developers is that if anyone wants to
 make an N+1'th attempt with LLVM, they are welcome, and will be happy to
 provide help in the IRC channel, but they are left with the burden of proof
-that it works.
+that (a) it works and (b) it gives important benefits.
 
 ----------------------
 How do I compile PyPy?

pypy/doc/index.rst

 
 * `FAQ`_: some frequently asked questions.
 
-* `Release 2.2.0`_: the latest official release
+* `Release 2.2.1`_: the latest official release
 
 * `PyPy Blog`_: news and status info about PyPy 
 
 .. _`Getting Started`: getting-started.html
 .. _`Papers`: extradoc.html
 .. _`Videos`: video-index.html
-.. _`Release 2.2.0`: http://pypy.org/download.html
+.. _`Release 2.2.1`: http://pypy.org/download.html
 .. _`speed.pypy.org`: http://speed.pypy.org
 .. _`RPython toolchain`: translation.html
 .. _`potential project ideas`: project-ideas.html

pypy/doc/release-2.2.1.rst

+=======================================
+PyPy 2.2.1 - Incrementalism.1
+=======================================
+
+We're pleased to announce PyPy 2.2.1, which targets version 2.7.3 of the Python
+language. This is a bugfix release over 2.2.
+
+You can download the PyPy 2.2.1 release here:
+
+    http://pypy.org/download.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows
+32, or ARM (ARMv6 or ARMv7, with VFPv3).
+
+Work on the native Windows 64 is still stalling, we would welcome a volunteer
+to handle that.
+
+.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org
+
+Highlights
+==========
+
+This is a bugfix release.  The most important bugs fixed are:
+
+* an issue in sockets' reference counting emulation, showing up
+  notably when using the ssl module and calling ``makefile()``.
+
+* Tkinter support on Windows.
+
+* If sys.maxunicode==65535 (on Windows and maybe OS/X), the json
+  decoder incorrectly decoded surrogate pairs.
+
+* some FreeBSD fixes.
+
+Note that CFFI 0.8.1 was released.  Both versions 0.8 and 0.8.1 are
+compatible with both PyPy 2.2 and 2.2.1.
+
+
+Cheers,
+Armin Rigo & everybody

pypy/doc/whatsnew-head.rst

 
 .. branch: voidtype_strformat
 Better support for record numpy arrays
+
+.. branch: osx-eci-frameworks-makefile
+OSX: Ensure frameworks end up in Makefile when specified in External compilation info
+
+.. branch: less-stringly-ops
+Use subclasses of SpaceOperation instead of SpaceOperator objects.
+Random cleanups in flowspace and annotator.
+
+.. branch: ndarray-buffer
+adds support for the buffer= argument to the ndarray ctor
+
+.. branch: better_ftime_detect2
+On OpenBSD do not pull in libcompat.a as it is about to be removed.
+And more generally, if you have gettimeofday(2) you will not need ftime(3).
+
+.. branch: timeb_h
+Remove dependency upon <sys/timeb.h> on OpenBSD. This will be disappearing
+along with libcompat.a.
+
+.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215
+Fix 3 broken links on PyPy published papers in docs.

pypy/interpreter/buffer.py

     def get_raw_address(self):
         raise ValueError("no raw buffer")
 
+    def is_writable(self):
+        return False
+
     # __________ app-level support __________
 
     def descr_len(self, space):
 
     __slots__ = ()     # no extra slot here
 
+    def is_writable(self):
+        return True
+
     def setitem(self, index, char):
         "Write a character into the buffer."
         raise NotImplementedError   # Must be overriden.  No bounds checks.

pypy/module/_cffi_backend/cbuffer.py

     def descr__buffer__(self, space):
         return self.buffer.descr__buffer__(space)
 
+    def descr_str(self, space):
+        return space.wrap(self.buffer.as_str())
+
 
 MiniBuffer.typedef = TypeDef(
     "buffer",
     __setitem__ = interp2app(MiniBuffer.descr_setitem),
     __buffer__ = interp2app(MiniBuffer.descr__buffer__),
     __weakref__ = make_weakref_descr(MiniBuffer),
+    __str__ = interp2app(MiniBuffer.descr_str),
     )
 MiniBuffer.typedef.acceptable_as_base_class = False
 

pypy/module/_cffi_backend/cdataobj.py

     def get_array_length(self):
         return self.length
 
+    def _sizeof(self):
+        from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray
+        ctype = self.ctype
+        assert isinstance(ctype, W_CTypePtrOrArray)
+        return self.length * ctype.ctitem.size
+
 
 class W_CDataHandle(W_CData):
     _attrs_ = ['w_keepalive']

pypy/module/_cffi_backend/test/_backend_test_c.py

     c = newp(BCharArray, b"hi there")
     #
     buf = buffer(c)
-    assert str(buf).startswith('<_cffi_backend.buffer object at 0x')
+    assert repr(buf).startswith('<_cffi_backend.buffer object at 0x')
+    assert bytes(buf) == b"hi there\x00"
+    if sys.version_info < (3,):
+        assert str(buf) == "hi there\x00"
+        assert unicode(buf) == u+"hi there\x00"
+    else:
+        assert str(buf) == repr(buf)
     # --mb_length--
     assert len(buf) == len(b"hi there\x00")
     # --mb_item--
     py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)")
     py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)")
 
+def test_sizeof_sliced_array():
+    BInt = new_primitive_type("int")
+    BArray = new_array_type(new_pointer_type(BInt), 10)
+    p = newp(BArray, None)
+    assert sizeof(p[2:9]) == 7 * sizeof(BInt)
+
 
 def test_version():
     # this test is here mostly for PyPy

pypy/module/cpyext/api.py

 
     'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr',
     'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr',
-    'PyCObject_Type', 'init_pycobject',
+    'PyCObject_Type', '_Py_init_pycobject',
 
     'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer',
     'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext',
     'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor',
-    'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', 'init_capsule',
+    'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule',
 
     'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer',
 
         globals()['va_get_%s' % name_no_star] = func
 
 def setup_init_functions(eci, translating):
-    init_pycobject = rffi.llexternal('init_pycobject', [], lltype.Void,
-                                     compilation_info=eci, _nowrapper=True)
-    init_capsule = rffi.llexternal('init_capsule', [], lltype.Void,
+    if translating:
+        prefix = 'PyPy'
+    else:
+        prefix = 'cpyexttest'
+    init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void,
+    init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void,
                                    compilation_info=eci, _nowrapper=True)
     INIT_FUNCTIONS.extend([
         lambda space: init_pycobject(),
         lambda space: init_capsule(),
     ])
     from pypy.module.posix.interp_posix import add_fork_hook
-    prefix = 'Py' if translating else 'PyPy'
-    reinit_tls = rffi.llexternal(prefix + 'Thread_ReInitTLS', [], lltype.Void,
+    reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void,
                                  compilation_info=eci)
     global py_fatalerror
-    py_fatalerror = rffi.llexternal(prefix + '_FatalError',
+    py_fatalerror = rffi.llexternal('%s_FatalError' % prefix,
                                     [CONST_STRING], lltype.Void,
                                     compilation_info=eci)
     add_fork_hook('child', reinit_tls)
     from rpython.translator.c.database import LowLevelDatabase
     db = LowLevelDatabase()
 
-    generate_macros(export_symbols, rename=True, do_deref=True)
+    generate_macros(export_symbols, prefix='cpyexttest')
 
     # Structure declaration code
     members = []
 
         INTERPLEVEL_API[name] = w_obj
 
-        name = name.replace('Py', 'PyPy')
+        name = name.replace('Py', 'cpyexttest')
         if isptr:
             ptr = ctypes.c_void_p.in_dll(bridge, name)
             if typ == 'PyObject*':
             ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value),
                                     ctypes.c_void_p).value
         elif typ in ('PyObject*', 'PyTypeObject*'):
-            if name.startswith('PyPyExc_'):
+            if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'):
                 # we already have the pointer
                 in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name)
                 py_obj = ll2ctypes.ctypes2lltype(PyObject, in_dll)
     setup_init_functions(eci, translating=False)
     return modulename.new(ext='')
 
-def generate_macros(export_symbols, rename=True, do_deref=True):
+def mangle_name(prefix, name):
+    if name.startswith('Py'):
+        return prefix + name[2:]
+    elif name.startswith('_Py'):
+        return '_' + prefix + name[3:]
+    else:
+        return None
+
+def generate_macros(export_symbols, prefix):
     "NOT_RPYTHON"
     pypy_macros = []
     renamed_symbols = []
     for name in export_symbols:
-        if name.startswith("PyPy"):
-            renamed_symbols.append(name)
-            continue
-        if not rename:
-            continue
         name = name.replace("#", "")
-        newname = name.replace('Py', 'PyPy')
-        if not rename:
-            newname = name
+        newname = mangle_name(prefix, name)
+        assert newname, name
         pypy_macros.append('#define %s %s' % (name, newname))
         if name.startswith("PyExc_"):
             pypy_macros.append('#define _%s _%s' % (name, newname))
         renamed_symbols.append(newname)
-    if rename:
-        export_symbols[:] = renamed_symbols
-    else:
-        export_symbols[:] = [sym.replace("#", "") for sym in export_symbols]
+    export_symbols[:] = renamed_symbols
 
     # Generate defines
     for macro_name, size in [
     from rpython.translator.c.database import LowLevelDatabase
     db = LowLevelDatabase()
 
-    generate_macros(export_symbols, rename=False, do_deref=False)
+    generate_macros(export_symbols, prefix='PyPy')
 
     functions = generate_decls_and_callbacks(db, [], api_struct=False)
     code = "#include <Python.h>\n" + "\n".join(functions)
         export_struct(name, struct)
 
     for name, func in FUNCTIONS.iteritems():
-        deco = entrypoint_lowlevel("cpyext", func.argtypes, name, relax=True)
+        newname = mangle_name('PyPy', name) or name
+        deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True)
         deco(func.get_wrapper(space))
 
     setup_init_functions(eci, translating=True)

pypy/module/cpyext/include/pycapsule.h

 
 PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block);
 
-void init_capsule(void);
+void _Py_init_capsule(void);
 
 #ifdef __cplusplus
 }

pypy/module/cpyext/include/pycobject.h

 } PyCObject;
 #endif
 
-void init_pycobject(void);
+void _Py_init_pycobject(void);
  
 #ifdef __cplusplus
 }

pypy/module/cpyext/src/capsule.c

     PyCapsule_Type__doc__       /*tp_doc*/
 };
 
-void init_capsule()
+void _Py_init_capsule()
 {
     PyType_Ready(&PyCapsule_Type);
 }

pypy/module/cpyext/src/cobject.c

     PyCObject_Type__doc__	/*tp_doc*/
 };
 
-void init_pycobject()
+void _Py_init_pycobject()
 {
     PyType_Ready(&PyCObject_Type);
 }

pypy/module/cpyext/test/test_thread.py

         module = self.import_extension('foo', [
             ("get_thread_ident", "METH_NOARGS",
              """
-                 /* Use the 'PyPy' prefix to ensure we access our functions */
-                 return PyLong_FromLong(PyPyThread_get_thread_ident());
+#ifndef PyThread_get_thread_ident
+#error "seems we are not accessing PyPy's functions"
+#endif
+                 return PyLong_FromLong(PyThread_get_thread_ident());
              """),
             ])
         import threading
         module = self.import_extension('foo', [
             ("test_acquire_lock", "METH_NOARGS",
              """
-                 /* Use the 'PyPy' prefix to ensure we access our functions */
-                 PyThread_type_lock lock = PyPyThread_allocate_lock();
-                 if (PyPyThread_acquire_lock(lock, 1) != 1) {
+#ifndef PyThread_allocate_lock
+#error "seems we are not accessing PyPy's functions"
+#endif
+                 PyThread_type_lock lock = PyThread_allocate_lock();
+                 if (PyThread_acquire_lock(lock, 1) != 1) {
                      PyErr_SetString(PyExc_AssertionError, "first acquire");
                      return NULL;
                  }
-                 if (PyPyThread_acquire_lock(lock, 0) != 0) {
+                 if (PyThread_acquire_lock(lock, 0) != 0) {
                      PyErr_SetString(PyExc_AssertionError, "second acquire");
                      return NULL;
                  }
-                 PyPyThread_free_lock(lock);
+                 PyThread_free_lock(lock);
 
                  Py_RETURN_NONE;
              """),
         module = self.import_extension('foo', [
             ("test_release_lock", "METH_NOARGS",
              """
-                 /* Use the 'PyPy' prefix to ensure we access our functions */
-                 PyThread_type_lock lock = PyPyThread_allocate_lock();
-                 PyPyThread_acquire_lock(lock, 1);
-                 PyPyThread_release_lock(lock);
-                 if (PyPyThread_acquire_lock(lock, 0) != 1) {
+#ifndef PyThread_release_lock
+#error "seems we are not accessing PyPy's functions"
+#endif           
+                 PyThread_type_lock lock = PyThread_allocate_lock();
+                 PyThread_acquire_lock(lock, 1);
+                 PyThread_release_lock(lock);
+                 if (PyThread_acquire_lock(lock, 0) != 1) {
                      PyErr_SetString(PyExc_AssertionError, "first acquire");
                      return NULL;
                  }
-                 PyPyThread_free_lock(lock);
+                 PyThread_free_lock(lock);
 
                  Py_RETURN_NONE;
              """),

pypy/module/math/app_math.py

         x = fl
     if x > sys.maxsize:
         raise OverflowError("Too large for a factorial")
-    if x < 0:
-        raise ValueError("x must be >= 0")
-    res = 1
-    for i in range(1, x + 1):
-        res *= i
-    return res
+
+    if x <= 100:
+        if x < 0:
+            raise ValueError("x must be >= 0")
+        res = 1
+        for i in range(2, x + 1):
+            res *= i
+        return res
+
+    #Experimentally this gap seems good
+    gap = max(100, x>>7)
+    def _fac_odd(low, high):
+        if low+gap >= high:
+            t = 1
+            for i in range(low, high, 2):
+                t *= i
+            return t
+        
+        mid = ((low + high) >> 1) | 1
+        return _fac_odd(low, mid) * _fac_odd(mid, high)
+
+    def _fac1(x):
+        if x <= 2:
+            return 1, 1, x - 1
+        x2 = x >> 1
+        f, g, shift = _fac1(x2)
+        g *= _fac_odd((x2 + 1) | 1, x + 1)
+        return (f * g, g, shift + x2)
+
+    res, _, shift = _fac1(x)
+    return res << shift

pypy/module/math/test/test_factorial.py

+import py
+import math
+from pypy.module.math import app_math
+
+def test_factorial_extra():
+    for x in range(1000):
+        r1 = app_math.factorial(x)
+        r2 = math.factorial(x)
+        assert r1 == r2
+        assert type(r1) == type(r2)
+
+def test_timing():
+    py.test.skip("for manual running only")
+    import time
+    x = 5000
+    repeat = 1000
+    r1 = app_math.factorial(x)
+    r2 = math.factorial(x)
+    assert r1 == r2
+    t1 = time.time()
+    for i in range(repeat):
+        app_math.factorial(x)
+    t2 = time.time()
+    for i in range(repeat):
+        math.factorial(x)
+    t3 = time.time()
+    assert r1 == r2
+    print (t2 - t1) / repeat
+    print (t3 - t2) / repeat

pypy/module/micronumpy/app_numpy.py

     if dtype is None:
         test = _numpypy.multiarray.array([start, stop, step, 0])
         dtype = test.dtype
-    arr = _numpypy.multiarray.zeros(int(math.ceil((stop - start) / step)), dtype=dtype)
+    length = math.ceil((float(stop) - start) / step)
+    length = int(length)
+    arr = _numpypy.multiarray.zeros(length, dtype=dtype)
     i = start
     for j in range(arr.size):
         arr[j] = i

pypy/module/micronumpy/arrayimpl/concrete.py

     def __del__(self):
         free_raw_storage(self.storage, track_allocation=False)
 
+class ConcreteArrayWithBase(ConcreteArrayNotOwning):
+    def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base):
+        ConcreteArrayNotOwning.__init__(self, shape, dtype, order,
+                                        strides, backstrides, storage)
+        self.orig_base = orig_base
+
+    def base(self):
+        return self.orig_base
+
+
+class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase):
+    def descr_setitem(self, space, orig_array, w_index, w_value):
+        raise OperationError(space.w_ValueError, space.wrap(
+            "assignment destination is read-only"))
+
 
 class NonWritableArray(ConcreteArray):
     def descr_setitem(self, space, orig_array, w_index, w_value):

pypy/module/micronumpy/base.py

         return W_NDimArray(impl)
 
     @staticmethod
-    def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None):
+    def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False,
+                               w_subtype=None, w_base=None, writable=True):
         from pypy.module.micronumpy.arrayimpl import concrete
         assert shape
         strides, backstrides = calc_strides(shape, dtype, order)
-        if owning:
+        if w_base is not None:
+            if owning:
+                raise OperationError(space.w_ValueError, 
+                        space.wrap("Cannot have owning=True when specifying a buffer"))
+            if writable:
+                impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides,
+                                                      backstrides, storage, w_base)
+            else:
+                impl = concrete.ConcreteNonWritableArrayWithBase(shape, dtype, order,
+                                                                 strides, backstrides,
+                                                                 storage, w_base)
+
+        elif owning:
             # Will free storage when GCd
             impl = concrete.ConcreteArray(shape, dtype, order, strides,
                                                 backstrides, storage=storage)

pypy/module/micronumpy/interp_numarray.py

+from rpython.rtyper.lltypesystem import rffi
+from rpython.rlib.rawstorage import RAW_STORAGE_PTR
 from pypy.interpreter.error import operationerrfmt, OperationError
 from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr
 from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \
 from rpython.rlib.rstring import StringBuilder
 from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation
 from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter
+from pypy.module.micronumpy import support
 from pypy.module.micronumpy.constants import *
 
 def _find_shape(space, w_size, dtype):
                     offset=0, w_strides=None, order='C'):
     from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray
     from pypy.module.micronumpy.support import calc_strides
-    if (offset != 0 or not space.is_none(w_strides) or
-        not space.is_none(w_buffer)):
-        raise OperationError(space.w_NotImplementedError,
-                             space.wrap("unsupported param"))
     dtype = space.interp_w(interp_dtype.W_Dtype,
           space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype))
     shape = _find_shape(space, w_shape, dtype)
+
+    if not space.is_none(w_buffer):
+        if (not space.is_none(w_strides)):
+            raise OperationError(space.w_NotImplementedError,
+                                 space.wrap("unsupported param"))
+
+        buf = space.buffer_w(w_buffer)
+        try:
+            raw_ptr = buf.get_raw_address()
+        except ValueError:
+            raise OperationError(space.w_TypeError, space.wrap(
+                "Only raw buffers are supported"))
+        if not shape:
+            raise OperationError(space.w_TypeError, space.wrap(
+                "numpy scalars from buffers not supported yet"))
+        totalsize = support.product(shape) * dtype.get_size()
+        if totalsize+offset > buf.getlength():
+            raise OperationError(space.w_TypeError, space.wrap(
+                "buffer is too small for requested array"))
+        storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr)
+        storage = rffi.ptradd(storage, offset)
+        return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype,
+                                                  w_subtype=w_subtype,
+                                                  w_base=w_buffer,
+                                                  writable=buf.is_writable())
+
     if not shape:
         return W_NDimArray.new_scalar(space, dtype)
     if space.is_w(w_subtype, space.gettypefor(W_NDimArray)):
     Create an array from an existing buffer, given its address as int.
     PyPy-only implementation detail.
     """
-    from rpython.rtyper.lltypesystem import rffi
-    from rpython.rlib.rawstorage import RAW_STORAGE_PTR
     storage = rffi.cast(RAW_STORAGE_PTR, addr)
     dtype = space.interp_w(interp_dtype.W_Dtype,
                      space.call_function(space.gettypefor(interp_dtype.W_Dtype),

pypy/module/micronumpy/iter.py

         final_strides = arr.get_strides() + strides
         final_backstrides = arr.get_backstrides() + backstrides
         final_dtype = subdtype
-        print self.name,'strides',arr.get_strides(),strides
         if subdtype.subdtype:
             final_dtype = subdtype.subdtype
         return W_NDimArray.new_slice(space, arr.start + ofs, final_strides,

pypy/module/micronumpy/loop.py

     while not out_iter.done():
         axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func,
                                             dtype=dtype)
-        w_val = arr_iter.getitem().convert_to(dtype)
-        if out_iter.first_line:
-            if identity is not None:
-                w_val = func(dtype, identity, w_val)
+        if arr_iter.done():
+            w_val = identity
         else:
-            cur = temp_iter.getitem()
-            w_val = func(dtype, cur, w_val)
+            w_val = arr_iter.getitem().convert_to(dtype)
+            if out_iter.first_line:
+                if identity is not None:
+                    w_val = func(dtype, identity, w_val)
+            else:
+                cur = temp_iter.getitem()
+                w_val = func(dtype, cur, w_val)
         out_iter.setitem(w_val)
         if cumulative:
             temp_iter.setitem(w_val)

pypy/module/micronumpy/test/test_numarray.py

 
 class AppTestNumArray(BaseNumpyAppTest):
     spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"])
+
     def w_CustomIndexObject(self, index):
         class CustomIndexObject(object):
             def __init__(self, index):
         assert a.dtype is dtype(int)
         a = arange(3, 7, 2)
         assert (a == [3, 5]).all()
+        a = arange(3, 8, 2)
+        assert (a == [3, 5, 7]).all()
         a = arange(3, dtype=float)
         assert (a == [0., 1., 2.]).all()
         assert a.dtype is dtype(float)
         assert a.sum() == 105
         assert a.max() == 14
         assert array([]).sum() == 0.0
+        assert array([]).reshape(0, 2).sum() == 0.
+        assert (array([]).reshape(0, 2).sum(0) == [0., 0.]).all()
+        assert (array([]).reshape(0, 2).prod(0) == [1., 1.]).all()
         raises(ValueError, 'array([]).max()')
         assert (a.sum(0) == [30, 35, 40]).all()
         assert (a.sum(axis=0) == [30, 35, 40]).all()
         a = np.ndarray([1], dtype=bool)
         assert a[0] == True
 
+
+class AppTestNumArrayFromBuffer(BaseNumpyAppTest):
+    spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"])
+
+    def setup_class(cls):
+        from rpython.tool.udir import udir
+        BaseNumpyAppTest.setup_class.im_func(cls)
+        cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-')))
+
+    def test_ndarray_from_buffer(self):
+        import numpypy as np
+        import array
+        buf = array.array('c', ['\x00']*2*3)
+        a = np.ndarray((3,), buffer=buf, dtype='i2')
+        a[0] = ord('b')
+        a[1] = ord('a')
+        a[2] = ord('r')
+        assert list(buf) == ['b', '\x00', 'a', '\x00', 'r', '\x00']
+        assert a.base is buf
+
+    def test_ndarray_subclass_from_buffer(self):
+        import numpypy as np
+        import array
+        buf = array.array('c', ['\x00']*2*3)
+        class X(np.ndarray):
+            pass
+        a = X((3,), buffer=buf, dtype='i2')
+        assert type(a) is X
+
+    def test_ndarray_from_buffer_and_offset(self):
+        import numpypy as np
+        import array
+        buf = array.array('c', ['\x00']*7)
+        buf[0] = 'X'
+        a = np.ndarray((3,), buffer=buf, offset=1, dtype='i2')
+        a[0] = ord('b')
+        a[1] = ord('a')
+        a[2] = ord('r')
+        assert list(buf) == ['X', 'b', '\x00', 'a', '\x00', 'r', '\x00']
+
+    def test_ndarray_from_buffer_out_of_bounds(self):
+        import numpypy as np
+        import array
+        buf = array.array('c', ['\x00']*2*10) # 20 bytes
+        info = raises(TypeError, "np.ndarray((11,), buffer=buf, dtype='i2')")
+        assert str(info.value).startswith('buffer is too small')
+        info = raises(TypeError, "np.ndarray((5,), buffer=buf, offset=15, dtype='i2')")
+        assert str(info.value).startswith('buffer is too small')
+
+    def test_ndarray_from_readonly_buffer(self):
+        import numpypy as np
+        from mmap import mmap, ACCESS_READ
+        f = open(self.tmpname, "w+")
+        f.write("hello")
+        f.flush()
+        buf = mmap(f.fileno(), 5, access=ACCESS_READ)
+        a = np.ndarray((5,), buffer=buf, dtype='c')
+        raises(ValueError, "a[0] = 'X'")
+        buf.close()
+        f.close()
+
+
+
 class AppTestMultiDim(BaseNumpyAppTest):
     def test_init(self):
         import numpypy
         exc = raises(IndexError, "a[0][None]")
         assert exc.value.message == "invalid index"
 
-        exc = raises(IndexError, "a[0][None]")
-        assert exc.value.message == 'invalid index'
-
         a[0]["x"][0] = 200
         assert a[0]["x"][0] == 200
 

pypy/module/mmap/interp_mmap.py

         self.check_valid_writeable()
         self.mmap.setslice(start, string)
 
+    def is_writable(self):
+        try:
+            self.mmap.check_writeable()
+        except RMMapError:
+            return False
+        else:
+            return True
+
     def get_raw_address(self):
         self.check_valid()
         return self.mmap.data

pypy/module/posix/app_startfile.py

         ffi.cdef("""
         HINSTANCE ShellExecuteA(HWND, LPCSTR, LPCSTR, LPCSTR, LPCSTR, INT);
         HINSTANCE ShellExecuteW(HWND, LPCWSTR, LPCWSTR, LPCWSTR, LPCWSTR, INT);
-        DWORD GetLastError(void);
         """)
         self.NULL = ffi.NULL
         self.cast = ffi.cast

pypy/module/pypyjit/interp_jit.py

 """This is not the JIT :-)
 
-This is transformed to become a JIT by code elsewhere: pypy/jit/*
+This is transformed to become a JIT by code elsewhere: rpython/jit/*
 """
 
-from rpython.tool.pairtype import extendabletype
 from rpython.rlib.rarithmetic import r_uint, intmask
 from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside
 from rpython.rlib import jit
 from rpython.rlib.jit import current_trace_length, unroll_parameters
 import pypy.interpreter.pyopcode   # for side-effects
 from pypy.interpreter.error import OperationError, operationerrfmt
-from pypy.interpreter.pycode import PyCode, CO_GENERATOR
+from pypy.interpreter.pycode import CO_GENERATOR
 from pypy.interpreter.pyframe import PyFrame
 from pypy.interpreter.pyopcode import ExitFrame, Yield
 from opcode import opmap
 
+
 PyFrame._virtualizable_ = ['last_instr', 'pycode',
                            'valuestackdepth', 'locals_stack_w[*]',
                            'cells[*]',

pypy/module/pypyjit/test_pypy_c/test_math.py

             f1 = cast_int_to_float(i0)
             i6 = --ISINF--(f1)
             guard_false(i6, descr=...)
-            f2 = call(ConstClass(sin), f1, descr=<Callf . f EF=2>)
-            f3 = call(ConstClass(cos), f1, descr=<Callf . f EF=2>)
+            f2 = call(ConstClass(sin), f1, descr=<Callf . f EF=0>)
+            f3 = call(ConstClass(cos), f1, descr=<Callf . f EF=0>)
             f4 = float_sub(f2, f3)
             f5 = float_add(f0, f4)
             i7 = int_add(i0, f1)

pypy/module/test_lib_pypy/cffi_tests/test_version.py

     '0.4.2': '0.4',     # did not change
     '0.7.1': '0.7',     # did not change
     '0.7.2': '0.7',     # did not change
+    '0.8.1': '0.8',     # did not change
     }
 
 def test_version():

pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c

     errno = result + 1;
     return result;
 }
+
+EXPORT(int *) test_issue1655(char const *tag, int *len)
+{
+    static int data[] = { -1, -2, -3, -4 };
+    *len = -42;
+    if (strcmp(tag, "testing!") != 0)
+        return NULL;
+    *len = sizeof(data) / sizeof(data[0]);
+    return data;
+}

pypy/module/test_lib_pypy/ctypes_tests/test_functions.py

         assert (res, n) == (42, 43)
         set_errno(0)
         assert get_errno() == 0
+
+    def test_issue1655(self):
+        def ret_list_p(icount):
+            def sz_array_p(obj, func, args):
+                assert ('.LP_c_int object' in repr(obj) or
+                        '.LP_c_long object' in repr(obj))
+                assert repr(args) in ("('testing!', c_int(4))",
+                                      "('testing!', c_long(4))")
+                assert args[icount].value == 4
+                return [ obj[i] for i in range(args[icount].value) ]
+            return sz_array_p
+
+        get_data_prototype = CFUNCTYPE(POINTER(c_int),
+                                       c_char_p, POINTER(c_int))
+        get_data_paramflag = ((1,), (2,))
+        get_data_signature = ('test_issue1655', dll)
+
+        get_data = get_data_prototype( get_data_signature, get_data_paramflag )
+        assert get_data('testing!') == 4
+
+        get_data.errcheck = ret_list_p(1)
+        assert get_data('testing!') == [-1, -2, -3, -4]

pypy/testrunner_cfg.py

 
 DIRS_SPLIT = [
     'translator/c', 'rlib',
-    'rpython/memory', 'jit/metainterp', 'rpython/test',
+    'memory/test', 'jit/metainterp',
     'jit/backend/arm', 'jit/backend/x86',
 ]
 

pypy/tool/release/package.py

 def package(basedir, name='pypy-nightly', rename_pypy_c='pypy',
             copy_to_dir=None, override_pypy_c=None, nostrip=False,
             withouttk=False):
+    assert '/' not in rename_pypy_c
     basedir = py.path.local(basedir)
     if override_pypy_c is None:
         basename = 'pypy-c'

rpython/annotator/annrpython.py

     def consider_op(self, block, opindex):
         op = block.operations[opindex]
         argcells = [self.binding(a) for a in op.args]
-        consider_meth = getattr(self,'consider_op_'+op.opname,
-                                None)
-        if not consider_meth:
-            raise Exception,"unknown op: %r" % op
 
         # let's be careful about avoiding propagated SomeImpossibleValues
         # to enter an op; the latter can result in violations of the
             if isinstance(arg, annmodel.SomeImpossibleValue):
                 raise BlockedInference(self, op, opindex)
         try:
-            resultcell = consider_meth(*argcells)
+            resultcell = op.consider(self, *argcells)
         except annmodel.AnnotatorError as e: # note that UnionError is a subclass
             graph = self.bookkeeper.position_key[0]
             e.source = gather_error(self, graph, block, opindex)

rpython/annotator/argument.py

 """
 Arguments objects.
 """
-from rpython.annotator.model import SomeTuple, SomeObject
+from rpython.annotator.model import SomeTuple
+from rpython.flowspace.argument import CallSpec
 
-# for parsing call arguments
-class RPythonCallsSpace(object):
-    """Pseudo Object Space providing almost no real operation.
-    For the Arguments class: if it really needs other operations, it means
-    that the call pattern is too complex for R-Python.
-    """
-    def newtuple(self, items_s):
-        if len(items_s) == 1 and items_s[0] is Ellipsis:
-            res = SomeObject()   # hack to get a SomeObject as the *arg
-            res.from_ellipsis = True
-            return res
-        else:
-            return SomeTuple(items_s)
-
-    def unpackiterable(self, s_obj, expected_length=None):
-        if isinstance(s_obj, SomeTuple):
-            return list(s_obj.items)
-        if (s_obj.__class__ is SomeObject and
-            getattr(s_obj, 'from_ellipsis', False)):    # see newtuple()
-            return [Ellipsis]
-        raise CallPatternTooComplex("'*' argument must be SomeTuple")
-
-    def bool(self, s_tup):
-        assert isinstance(s_tup, SomeTuple)
-        return bool(s_tup.items)
-
-
-class CallPatternTooComplex(Exception):
-    pass
-
-
-class ArgumentsForTranslation(object):
-    w_starstararg = None
-    def __init__(self, space, args_w, keywords=None, keywords_w=None,
-                 w_stararg=None, w_starstararg=None):
-        self.w_stararg = w_stararg
-        assert w_starstararg is None
-        self.space = space
-        assert isinstance(args_w, list)
-        self.arguments_w = args_w
-        self.keywords = keywords
-        self.keywords_w = keywords_w
-        self.keyword_names_w = None
-
-    def __repr__(self):
-        """ NOT_RPYTHON """
-        name = self.__class__.__name__
-        if not self.keywords:
-            return '%s(%s)' % (name, self.arguments_w,)
-        else:
-            return '%s(%s, %s, %s)' % (name, self.arguments_w,
-                                       self.keywords, self.keywords_w)
-
+class ArgumentsForTranslation(CallSpec):
     @property
     def positional_args(self):
         if self.w_stararg is not None:
-            args_w = self.space.unpackiterable(self.w_stararg)
+            args_w = self.unpackiterable(self.w_stararg)
             return self.arguments_w + args_w
         else:
             return self.arguments_w
 
+    def newtuple(self, items_s):
+        return SomeTuple(items_s)
+
+    def unpackiterable(self, s_obj):
+        assert isinstance(s_obj, SomeTuple)
+        return list(s_obj.items)
+
     def fixedunpack(self, argcount):
         """The simplest argument parsing: get the 'argcount' arguments,
         or raise a real ValueError if the length is wrong."""
 
     def prepend(self, w_firstarg): # used often
         "Return a new Arguments with a new argument inserted first."
-        return ArgumentsForTranslation(self.space, [w_firstarg] + self.arguments_w,
-                                       self.keywords, self.keywords_w, self.w_stararg,
-                                       self.w_starstararg)
+        return ArgumentsForTranslation([w_firstarg] + self.arguments_w,
+                                       self.keywords, self.w_stararg)
 
     def copy(self):
-        return ArgumentsForTranslation(self.space, self.arguments_w,
-                                       self.keywords, self.keywords_w, self.w_stararg,
-                                       self.w_starstararg)
+        return ArgumentsForTranslation(self.arguments_w, self.keywords,
+                self.w_stararg)
 
     def _match_signature(self, scope_w, signature, defaults_w=None):
         """Parse args and kwargs according to the signature of a code object,
 
         args_w = self.positional_args
         num_args = len(args_w)
-        keywords = self.keywords or []
+        keywords = self.keywords
         num_kwds = len(keywords)
 
         # put as many positional input arguments into place as available
                 starargs_w = args_w[co_argcount:]
             else:
                 starargs_w = []
-            scope_w[co_argcount] = self.space.newtuple(starargs_w)
+            scope_w[co_argcount] = self.newtuple(starargs_w)
         elif num_args > co_argcount:
             raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0)
 
 
         # handle keyword arguments
         num_remainingkwds = 0
-        keywords_w = self.keywords_w
         kwds_mapping = None
         if num_kwds:
             # kwds_mapping maps target indexes in the scope (minus input_argcount)
-            # to positions in the keywords_w list
-            kwds_mapping = [-1] * (co_argcount - input_argcount)
+            # to keyword names
+            kwds_mapping = []
             # match the keywords given at the call site to the argument names
             # the called function takes
             # this function must not take a scope_w, to make the scope not
             # escape
             num_remainingkwds = len(keywords)
-            for i, name in enumerate(keywords):
-                # If name was not encoded as a string, it could be None. In that
-                # case, it's definitely not going to be in the signature.
-                if name is None:
-                    continue
+            for name in keywords:
                 j = signature.find_argname(name)
                 # if j == -1 nothing happens
                 if j < input_argcount:
                     if j >= 0:
                         raise ArgErrMultipleValues(name)
                 else:
-                    kwds_mapping[j - input_argcount] = i # map to the right index
+                    kwds_mapping.append(name)
                     num_remainingkwds -= 1
 
             if num_remainingkwds:
                 if co_argcount == 0:
                     raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0)
-                raise ArgErrUnknownKwds(self.space, num_remainingkwds, keywords,
-                                        kwds_mapping, self.keyword_names_w)
+                raise ArgErrUnknownKwds(num_remainingkwds, keywords,
+                                        kwds_mapping)
 
         # check for missing arguments and fill them from the kwds,
         # or with defaults, if available
         if input_argcount < co_argcount:
             def_first = co_argcount - (0 if defaults_w is None else len(defaults_w))
             j = 0
-            kwds_index = -1
             for i in range(input_argcount, co_argcount):
-                if kwds_mapping is not None:
-                    kwds_index = kwds_mapping[j]
-                    j += 1
-                    if kwds_index >= 0:
-                        scope_w[i] = keywords_w[kwds_index]
-                        continue
+                name = signature.argnames[i]
+                if name in keywords:
+                    scope_w[i] = keywords[name]
+                    continue
                 defnum = i - def_first
                 if defnum >= 0:
                     scope_w[i] = defaults_w[defnum]
 
     def unpack(self):
         "Return a ([w1,w2...], {'kw':w3...}) pair."
-        kwds_w = dict(zip(self.keywords, self.keywords_w)) if self.keywords else {}
-        return self.positional_args, kwds_w
+        return self.positional_args, self.keywords
 
     def match_signature(self, signature, defaults_w):
         """Parse args and kwargs according to the signature of a code object,
 
     def unmatch_signature(self, signature, data_w):
         """kind of inverse of match_signature"""
-        need_cnt = len(self.positional_args)
-        need_kwds = self.keywords or []
-        space = self.space
         argnames, varargname, kwargname = signature
         assert kwargname is None
         cnt = len(argnames)
-        data_args_w = data_w[:cnt]
+        need_cnt = len(self.positional_args)
         if varargname:
-            data_w_stararg = data_w[cnt]
-            cnt += 1
-        else:
-            data_w_stararg = space.newtuple([])
+            assert len(data_w) == cnt + 1
+            stararg_w = self.unpackiterable(data_w[cnt])
+            if stararg_w:
+                args_w = data_w[:cnt] + stararg_w
+                assert len(args_w) == need_cnt
+                assert not self.keywords
+                return ArgumentsForTranslation(args_w, {})
+            else:
+                data_w = data_w[:-1]
         assert len(data_w) == cnt
+        assert len(data_w) >= need_cnt
+        args_w = data_w[:need_cnt]
+        _kwds_w = dict(zip(argnames[need_cnt:], data_w[need_cnt:]))
+        keywords_w = [_kwds_w[key] for key in self.keywords]
+        return ArgumentsForTranslation(args_w, dict(zip(self.keywords, keywords_w)))
 
-        unfiltered_kwds_w = {}
-        if len(data_args_w) >= need_cnt:
-            args_w = data_args_w[:need_cnt]
-            for argname, w_arg in zip(argnames[need_cnt:], data_args_w[need_cnt:]):
-                unfiltered_kwds_w[argname] = w_arg
-            assert not space.bool(data_w_stararg)
-        else:
-            stararg_w = space.unpackiterable(data_w_stararg)
-            args_w = data_args_w + stararg_w
-            assert len(args_w) == need_cnt
-
-        keywords = []
-        keywords_w = []
-        for key in need_kwds:
-            keywords.append(key)
-            keywords_w.append(unfiltered_kwds_w[key])
-
-        return ArgumentsForTranslation(self.space, args_w, keywords, keywords_w)
-
-    @staticmethod
-    def fromshape(space, (shape_cnt, shape_keys, shape_star, shape_stst), data_w):
+    @classmethod
+    def fromshape(cls, (shape_cnt, shape_keys, shape_star), data_w):
         args_w = data_w[:shape_cnt]
         p = end_keys = shape_cnt + len(shape_keys)
         if shape_star:
             p += 1
         else:
             w_star = None
-        if shape_stst:
-            w_starstar = data_w[p]
-            p += 1
-        else:
-            w_starstar = None
-        return ArgumentsForTranslation(space, args_w, list(shape_keys),
-                                       data_w[shape_cnt:end_keys], w_star,
-                                       w_starstar)
+        return cls(args_w, dict(zip(shape_keys, data_w[shape_cnt:end_keys])),
+                w_star)
 
-    def flatten(self):
-        """ Argument <-> list of w_objects together with "shape" information """
-        shape_cnt, shape_keys, shape_star, shape_stst = self._rawshape()
-        data_w = self.arguments_w + [self.keywords_w[self.keywords.index(key)]
-                                         for key in shape_keys]
-        if shape_star:
-            data_w.append(self.w_stararg)
-        if shape_stst:
-            data_w.append(self.w_starstararg)
-        return (shape_cnt, shape_keys, shape_star, shape_stst), data_w
 
-    def _rawshape(self, nextra=0):
-        shape_cnt = len(self.arguments_w) + nextra       # Number of positional args
-        if self.keywords:
-            shape_keys = self.keywords[:]                # List of keywords (strings)
-            shape_keys.sort()
-        else:
-            shape_keys = []
-        shape_star = self.w_stararg is not None   # Flag: presence of *arg
-        shape_stst = self.w_starstararg is not None # Flag: presence of **kwds
-        return shape_cnt, tuple(shape_keys), shape_star, shape_stst # shape_keys are sorted
-
-
-def rawshape(args, nextra=0):
-    return args._rawshape(nextra)
+def rawshape(args):
+    return args._rawshape()
 
 
 #
 
 
 class ArgErrUnknownKwds(ArgErr):
-    def __init__(self, space, num_remainingkwds, keywords, kwds_mapping,
-                 keyword_names_w):
+    def __init__(self, num_remainingkwds, keywords, kwds_mapping):
         name = ''
         self.num_kwds = num_remainingkwds
         if num_remainingkwds == 1:
-            for i in range(len(keywords)):
-                if i not in kwds_mapping:
-                    name = keywords[i]
-                    if name is None:
-                        # We'll assume it's unicode. Encode it.
-                        # Careful, I *think* it should not be possible to
-                        # get an IndexError here but you never know.
-                        try:
-                            if keyword_names_w is None:
-                                raise IndexError
-                            # note: negative-based indexing from the end
-                            w_name = keyword_names_w[i - len(keywords)]
-                        except IndexError:
-                            name = '?'
-                        else:
-                            w_enc = space.wrap(space.sys.defaultencoding)
-                            w_err = space.wrap("replace")
-                            w_name = space.call_method(w_name, "encode", w_enc,
-                                                       w_err)
-                            name = space.str_w(w_name)
+            for name in keywords:
+                if name not in kwds_mapping:
                     break
         self.kwd_name = name
 

rpython/annotator/binaryop.py

 import py
 import operator
 from rpython.tool.pairtype import pair, pairtype
-from rpython.annotator.model import SomeObject, SomeInteger, SomeBool, s_Bool
-from rpython.annotator.model import SomeString, SomeChar, SomeList, SomeDict,\
-     SomeOrderedDict
-from rpython.annotator.model import SomeUnicodeCodePoint, SomeUnicodeString
-from rpython.annotator.model import SomeTuple, SomeImpossibleValue, s_ImpossibleValue
-from rpython.annotator.model import SomeInstance, SomeBuiltin, SomeIterator
-from rpython.annotator.model import SomePBC, SomeFloat, s_None, SomeByteArray
-from rpython.annotator.model import SomeWeakRef
-from rpython.annotator.model import SomeAddress, SomeTypedAddressAccess
-from rpython.annotator.model import SomeSingleFloat, SomeLongFloat, SomeType
-from rpython.annotator.model import unionof, UnionError, missing_operation
-from rpython.annotator.model import read_can_only_throw
-from rpython.annotator.model import add_knowntypedata, merge_knowntypedata
+from rpython.annotator.model import (
+    SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList,
+    SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString,
+    SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance,
+    SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray,
+    SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat,
+    SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError,
+    missing_operation, read_can_only_throw, add_knowntypedata,
+    merge_knowntypedata,)
 from rpython.annotator.bookkeeper import getbookkeeper
 from rpython.flowspace.model import Variable, Constant
 from rpython.rlib import rarithmetic
     getitem_key = getitem_idx_key
 
 
-class __extend__(pairtype(SomeType, SomeType)):
+class __extend__(pairtype(SomeType, SomeType),
+                 pairtype(SomeType, SomeConstantType),
+                 pairtype(SomeConstantType, SomeType),):
 
     def union((obj1, obj2)):
         result = SomeType()

rpython/annotator/bookkeeper.py

     SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint,
     s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple,
     SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked,
-    SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray)
+    SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType)
 from rpython.annotator.classdef import InstanceSource, ClassDef
 from rpython.annotator.listdef import ListDef, ListItem
 from rpython.annotator.dictdef import DictDef
 from rpython.annotator import description
 from rpython.annotator.signature import annotationoftype
-from rpython.annotator.argument import ArgumentsForTranslation, RPythonCallsSpace
+from rpython.annotator.argument import ArgumentsForTranslation
 from rpython.rlib.objectmodel import r_dict, Symbolic
 from rpython.tool.algo.unionfind import UnionFind
 from rpython.rtyper.lltypesystem import lltype, llmemory
         elif isinstance(x, llmemory.fakeaddress):
             result = SomeAddress()
         elif tp is type:
-            if (x is type(None) or      # add cases here if needed
-                x.__module__ == 'rpython.rtyper.lltypesystem.lltype'):
-                result = SomeType()
-            else:
-                result = SomePBC([self.getdesc(x)])
+            result = SomeConstantType(x, self)
         elif callable(x):
             if hasattr(x, 'im_self') and hasattr(x, 'im_func'):
                 # on top of PyPy, for cases like 'l.append' where 'l' is a
         return op
 
     def build_args(self, op, args_s):
-        space = RPythonCallsSpace()
         if op == "simple_call":
-            return ArgumentsForTranslation(space, list(args_s))
+            return ArgumentsForTranslation(list(args_s))
         elif op == "call_args":
             return ArgumentsForTranslation.fromshape(
-                    space, args_s[0].const, # shape
+                    args_s[0].const, # shape
                     list(args_s[1:]))
 
     def ondegenerated(self, what, s_value, where=None, called_from_graph=None):

rpython/annotator/builtin.py

     r_func, nimplicitarg = s_repr.const.get_r_implfunc()
 
     nbargs = len(args_s) + nimplicitarg
-    s_sigs = r_func.get_s_signatures((nbargs, (), False, False))
+    s_sigs = r_func.get_s_signatures((nbargs, (), False))
     if len(s_sigs) != 1:
         raise TyperError("cannot hlinvoke callable %r with not uniform"
                          "annotations: %r" % (s_repr.const,