Commits

Philip Jenvey  committed 487697a Merge

merge default

  • Participants
  • Parent commits bf8917a, 2eef951
  • Branches py3k

Comments (0)

Files changed (160)

File lib-python/2.7/ctypes/__init__.py

             func.__name__ = name_or_ordinal
         return func
 
-class PyDLL(CDLL):
-    """This class represents the Python library itself.  It allows to
-    access Python API functions.  The GIL is not released, and
-    Python exceptions are handled correctly.
-    """
-    _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
+# Not in PyPy
+#class PyDLL(CDLL):
+#    """This class represents the Python library itself.  It allows to
+#    access Python API functions.  The GIL is not released, and
+#    Python exceptions are handled correctly.
+#    """
+#    _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
 
 if _os.name in ("nt", "ce"):
 
         return self._dlltype(name)
 
 cdll = LibraryLoader(CDLL)
-pydll = LibraryLoader(PyDLL)
-
-if _os.name in ("nt", "ce"):
-    pythonapi = PyDLL("python dll", None, _sys.dllhandle)
-elif _sys.platform == "cygwin":
-    pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2])
-else:
-    pythonapi = PyDLL(None)
-
+# not on PyPy
+#pydll = LibraryLoader(PyDLL)
 
 if _os.name in ("nt", "ce"):
     windll = LibraryLoader(WinDLL)

File lib-python/2.7/ctypes/test/test_values.py

 
 import unittest
 from ctypes import *
+from ctypes.test import xfail
 
 import _ctypes_test
 
 
     class Win_ValuesTestCase(unittest.TestCase):
         """This test only works when python itself is a dll/shared library"""
-
+        
+        @xfail
         def test_optimizeflag(self):
             # This test accesses the Py_OptimizeFlag intger, which is
             # exported by the Python dll.
             else:
                 self.assertEqual(opt, 2)
 
+        @xfail
         def test_frozentable(self):
             # Python exports a PyImport_FrozenModules symbol. This is a
             # pointer to an array of struct _frozen entries.  The end of the
             from ctypes import _pointer_type_cache
             del _pointer_type_cache[struct_frozen]
 
+        @xfail
         def test_undefined(self):
             self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol")
 

File lib-python/2.7/test/test_gdbm.py

         self.assertTrue(key in self.g)
         self.assertTrue(self.g.has_key(key))
 
+    def test_unicode_key(self):
+        key = u'ab'
+        value = u'cd'
+        self.g = gdbm.open(filename, 'cf')
+        self.g[key] = value
+        self.g.close()
+        self.g = gdbm.open(filename, 'r')
+        self.assertEquals(self.g[key], value)
+        self.assertTrue(key in self.g)
+        self.assertTrue(self.g.has_key(key))
+
 def test_main():
     run_unittest(TestGdbm)
 

File lib-python/2.7/timeit.py

 import gc
 import sys
 import time
-try:
-    import itertools
-except ImportError:
-    # Must be an older Python version (see timeit() below)
-    itertools = None
 
 __all__ = ["Timer"]
 
 def inner(_it, _timer):
     %(setup)s
     _t0 = _timer()
-    for _i in _it:
+    while _it > 0:
+        _it -= 1
         %(stmt)s
     _t1 = _timer()
     return _t1 - _t0
     def inner(_it, _timer, _func=func):
         setup()
         _t0 = _timer()
-        for _i in _it:
+        while _it > 0:
+            _it -= 1
             _func()
         _t1 = _timer()
         return _t1 - _t0
             else:
                 raise ValueError("setup is neither a string nor callable")
             self.src = src # Save for traceback display
-            code = compile(src, dummy_src_name, "exec")
-            exec code in globals(), ns
-            self.inner = ns["inner"]
+            def make_inner():
+                # PyPy tweak: recompile the source code each time before
+                # calling inner(). There are situations like Issue #1776
+                # where PyPy tries to reuse the JIT code from before,
+                # but that's not going to work: the first thing the
+                # function does is the "-s" statement, which may declare
+                # new classes (here a namedtuple). We end up with
+                # bridges from the inner loop; more and more of them
+                # every time we call inner().
+                code = compile(src, dummy_src_name, "exec")
+                exec code in globals(), ns
+                return ns["inner"]
+            self.make_inner = make_inner
         elif hasattr(stmt, '__call__'):
             self.src = None
             if isinstance(setup, basestring):
                     exec _setup in globals(), ns
             elif not hasattr(setup, '__call__'):
                 raise ValueError("setup is neither a string nor callable")
-            self.inner = _template_func(setup, stmt)
+            inner = _template_func(setup, stmt)
+            self.make_inner = lambda: inner
         else:
             raise ValueError("stmt is neither a string nor callable")
 
         to one million.  The main statement, the setup statement and
         the timer function to be used are passed to the constructor.
         """
-        if itertools:
-            it = itertools.repeat(None, number)
-        else:
-            it = [None] * number
+        inner = self.make_inner()
         gcold = gc.isenabled()
         if '__pypy__' not in sys.builtin_module_names:
             gc.disable()    # only do that on CPython
         try:
-            timing = self.inner(it, self.timer)
+            timing = inner(number, self.timer)
         finally:
             if gcold:
                 gc.enable()

File lib_pypy/_gdbm.py

 
     def _raise_from_errno(self):
         if ffi.errno:
-            raise error(os.strerror(ffi.errno))
-        raise error(lib.gdbm_strerror(lib.gdbm_errno))
+            raise error(ffi.errno, os.strerror(ffi.errno))
+        raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno))
 
     def __len__(self):
         if self.size < 0:
 
     def _check_closed(self):
         if not self.ll_dbm:
-            raise error("GDBM object has already been closed")
+            raise error(0, "GDBM object has already been closed")
 
     __del__ = close
 
     elif flags[0] == 'n':
         iflags = lib.GDBM_NEWDB
     else:
-        raise error("First flag must be one of 'r', 'w', 'c' or 'n'")
+        raise error(0, "First flag must be one of 'r', 'w', 'c' or 'n'")
     for flag in flags[1:]:
         if flag == 'f':
             iflags |= lib.GDBM_FAST
         elif flag == 'u':
             iflags |= lib.GDBM_NOLOCK
         else:
-            raise error("Flag '%s' not supported" % flag)
+            raise error(0, "Flag '%s' not supported" % flag)
     return gdbm(filename, iflags, mode)
 
 open_flags = "rwcnfsu"

File lib_pypy/cffi/__init__.py

 from .api import FFI, CDefError, FFIError
 from .ffiplatform import VerificationError, VerificationMissing
 
-__version__ = "0.8.2"
-__version_info__ = (0, 8, 2)
+__version__ = "0.8.6"
+__version_info__ = (0, 8, 6)

File lib_pypy/cffi/api.py

             # _cffi_backend.so compiled.
             import _cffi_backend as backend
             from . import __version__
-            assert (backend.__version__ == __version__ or
-                    backend.__version__ == __version__[:3])
+            assert backend.__version__ == __version__
             # (If you insist you can also try to pass the option
             # 'backend=backend_ctypes.CTypesBackend()', but don't
             # rely on it!  It's probably not going to work well.)

File lib_pypy/cffi/vengine_gen.py

                     enumerator, enumerator, enumvalue))
             prnt('    char buf[64];')
             prnt('    if ((%s) < 0)' % enumerator)
-            prnt('        snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator)
+            prnt('        sprintf(buf, "%%ld", (long)(%s));' % enumerator)
             prnt('    else')
-            prnt('        snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
+            prnt('        sprintf(buf, "%%lu", (unsigned long)(%s));' %
                  enumerator)
-            prnt('    snprintf(out_error, 255,'
+            prnt('    sprintf(out_error,'
                              ' "%s has the real value %s, not %s",')
             prnt('            "%s", buf, "%d");' % (
-                enumerator, enumvalue))
+                enumerator[:100], enumvalue))
             prnt('    return -1;')
             prnt('  }')
         prnt('  return 0;')

File pypy/doc/cpython_differences.rst

File contents unchanged.

File pypy/doc/ctypes-implementation.rst

 Here is a list of the limitations and missing features of the
 current implementation:
 
-* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer
-  of PyPy, at your own risks and without doing anything sensible about
-  the GIL.  Since PyPy 2.3, these functions are also named with an extra
-  "Py", for example ``PyPyInt_FromLong()``.  Basically, don't use this,
-  but it might more or less work in simple cases if you do.  (Obviously,
-  assuming the PyObject pointers you get have any particular fields in
-  any particular order is just going to crash.)
+* ``ctypes.pythonapi`` is missing.  In previous versions, it was present
+  and redirected to the `cpyext` C API emulation layer, but our
+  implementation did not do anything sensible about the GIL and the
+  functions were named with an extra "Py", for example
+  ``PyPyInt_FromLong()``.  It was removed for being unhelpful.
 
 * We copy Python strings instead of having pointers to raw buffers
 

File pypy/doc/faq.rst

 You might be interested in our `benchmarking site`_ and our 
 `jit documentation`_.
 
-Note that the JIT has a very high warm-up cost, meaning that the
-programs are slow at the beginning.  If you want to compare the timings
-with CPython, even relatively simple programs need to run *at least* one
-second, preferrably at least a few seconds.  Large, complicated programs
-need even more time to warm-up the JIT.
+`Your tests are not a benchmark`_: tests tend to be slow under PyPy
+because they run exactly once; if they are good tests, they exercise
+various corner cases in your code.  This is a bad case for JIT
+compilers.  Note also that our JIT has a very high warm-up cost, meaning
+that any program is slow at the beginning.  If you want to compare the
+timings with CPython, even relatively simple programs need to run *at
+least* one second, preferrably at least a few seconds.  Large,
+complicated programs need even more time to warm-up the JIT.
 
 .. _`benchmarking site`: http://speed.pypy.org
 
 .. _`jit documentation`: jit/index.html
 
+.. _`your tests are not a benchmark`: http://alexgaynor.net/2013/jul/15/your-tests-are-not-benchmark/
+
 ---------------------------------------------------------------
 Couldn't the JIT dump and reload already-compiled machine code?
 ---------------------------------------------------------------
 
 This is documented (here__ and here__).  It needs 4 GB of RAM to run
 "rpython targetpypystandalone" on top of PyPy, a bit more when running
-on CPython.  If you have less than 4 GB it will just swap forever (or
-fail if you don't have enough swap).  On 32-bit, divide the numbers by
-two.
+on top of CPython.  If you have less than 4 GB free, it will just swap
+forever (or fail if you don't have enough swap).  And we mean *free:*
+if the machine has 4 GB *in total,* then it will swap.
+
+On 32-bit, divide the numbers by two.  (We didn't try recently, but in
+the past it was possible to compile a 32-bit version on a 2 GB Linux
+machine with nothing else running: no Gnome/KDE, for example.)
 
 .. __: http://pypy.org/download.html#building-from-source
 .. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter

File pypy/doc/jit-hooks.rst

     aborted due to some reason.
 
     The hook will be invoked with the siagnture:
-    ``hook(jitdriver_name, greenkey, reason)``
+    ``hook(jitdriver_name, greenkey, reason, oplist)``
 
     Reason is a string, the meaning of other arguments is the same
     as attributes on JitLoopInfo object

File pypy/doc/objspace-proxies.rst

 Transparent Proxies
 ================================
 
+.. warning::
+
+    This is a feature that was tried experimentally long ago, and we
+    found no really good use cases.  The basic functionality is still
+    there, but we don't recommend using it.  Some of the examples below
+    might not work any more (e.g. you can't tproxy a list object any
+    more).  The rest can be done by hacking in standard Python.  If
+    anyone is interested in working on tproxy again, he is welcome, but
+    we don't regard this as an interesting extension.
+
 PyPy's Transparent Proxies allow routing of operations on objects
 to a callable.  Application level code can customize objects without
 interfering with the type system - ``type(proxied_list) is list`` holds true

File pypy/doc/stm.rst

 Introduction
 ============
 
-``pypy-stm`` is a variant of the regular PyPy interpreter.  With caveats_
+``pypy-stm`` is a variant of the regular PyPy interpreter.  (This
+version supports Python 2.7; see below for `Python 3`_.)  With caveats_
 listed below, it should be in theory within 20%-50% slower than a
 regular PyPy, comparing the JIT version in both cases (but see below!).
 It is called
   We're busy fixing them as we find them; feel free to `report bugs`_.
 
 * It runs with an overhead as low as 20% on examples like "richards".
-  There are also other examples with higher overheads --up to 10x for
-  "translate.py"-- which we are still trying to understand.  One suspect
-  is our partial GC implementation, see below.
+  There are also other examples with higher overheads --currently up to
+  2x for "translate.py"-- which we are still trying to understand.
+  One suspect is our partial GC implementation, see below.
 
 * Currently limited to 1.5 GB of RAM (this is just a parameter in
   `core.h`__).  Memory overflows are not correctly handled; they cause
 
 * The GC is new; although clearly inspired by PyPy's regular GC, it
   misses a number of optimizations for now.  Programs allocating large
-  numbers of small objects that don't immediately die, as well as
-  programs that modify large lists or dicts, suffer from these missing
-  optimizations.
+  numbers of small objects that don't immediately die (surely a common
+  situation) suffer from these missing optimizations.
 
 * The GC has no support for destructors: the ``__del__`` method is never
   called (including on file objects, which won't be closed for you).
 
 
 
+Python 3
+========
+
+In this document I describe "pypy-stm", which is based on PyPy's Python
+2.7 interpreter.  Supporting Python 3 should take about half an
+afternoon of work.  Obviously, what I *don't* mean is that by tomorrow
+you can have a finished and polished "pypy3-stm" product.  General py3k
+work is still missing; and general stm work is also still missing.  But
+they are rather independent from each other, as usual in PyPy.  The
+required afternoon of work will certainly be done one of these days now
+that the internal interfaces seem to stabilize.
+
+The same is true for other languages implemented in the RPython
+framework, although the amount of work to put there might vary, because
+the STM framework within RPython is currently targeting the PyPy
+interpreter and other ones might have slightly different needs.
+
+
+
 User Guide
 ==========
   
 The last two lines are special; they are an internal marker read by
 ``transactional_memory.print_abort_info()``.
 
-These statistics are not printed out for the main thread, for now.
-
 
 Reference to implementation details
 -----------------------------------

File pypy/doc/whatsnew-head.rst

 x86-64, the JIT backend has a special optimization that lets it emit
 directly a single MOV from a %gs- or %fs-based address. It seems
 actually to give a good boost in performance.
+
+.. branch: fast-gil
+A faster way to handle the GIL, particularly in JIT code. The GIL is
+now a composite of two concepts: a global number (it's just set from
+1 to 0 and back around CALL_RELEASE_GIL), and a real mutex. If there
+are threads waiting to acquire the GIL, one of them is actively
+checking the global number every 0.1 ms to 1 ms.  Overall, JIT loops
+full of external function calls now run a bit faster (if no thread was
+started yet), or a *lot* faster (if threads were started already).
+
+.. branch: jit-get-errno
+Optimize the errno handling in the JIT, notably around external
+function calls. Linux-only.
+
+.. branch: disable_pythonapi
+Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this
+incompatibility with cpython. Recast sys.dllhandle to an int.
+
+.. branch: scalar-operations
+Fix performance regression on ufunc(<scalar>, <scalar>) in numpy.

File pypy/doc/windows.rst

 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Download http://www.gzip.org/zlib/zlib-1.2.3.tar.gz and extract it in
-the base directory.  Then compile::
+the base directory.  Then compile as a static library::
 
     cd zlib-1.2.3
     nmake -f win32\Makefile.msc
-    copy zlib1.dll <somewhere in the PATH>\zlib.dll
+    copy zlib1.lib <somewhere in LIB>
+    copy zlib.h zconf.h <somewhere in INCLUDE>
 
 The bz2 compression library
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Get the same version of bz2 used by python and compile as a static library::
 
     svn export http://svn.python.org/projects/external/bzip2-1.0.6
     cd bzip2-1.0.6
     nmake -f makefile.msc
-    copy bzip.dll <somewhere in the PATH>\bzip.dll
+    copy libbz2.lib <somewhere in LIB>
+    copy bzlib.h <somewhere in INCLUDE>
+
     
 The sqlite3 database library
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 is actually enough for pypy).
 
 Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in
-your PATH.
+your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and
+both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE.
 
 The OpenSSL library
 ~~~~~~~~~~~~~~~~~~~

File pypy/goal/targetpypystandalone.py

File contents unchanged.

File pypy/interpreter/baseobjspace.py

         else:
             # translated case follows.  self.threadlocals is either from
             # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'.
-            # the result is assumed to be non-null: enter_thread() was called.
-            return self.threadlocals.get_ec()
+            # the result is assumed to be non-null: enter_thread() was called
+            # by space.startup().
+            ec = self.threadlocals.get_ec()
+            assert ec is not None
+            return ec
 
     def _freeze_(self):
         return True
         """
         return self.unpackiterable(w_iterable, expected_length)
 
+    def listview_no_unpack(self, w_iterable):
+        """ Same as listview() if cheap.  If 'w_iterable' is something like
+        a generator, for example, then return None instead.
+        May return None anyway.
+        """
+        return None
+
     def listview_bytes(self, w_list):
         """ Return a list of unwrapped strings out of a list of strings. If the
         argument is not a list or does not contain only strings, return None.

File pypy/interpreter/executioncontext.py

     """
 
 
+class UserDelCallback(object):
+    def __init__(self, w_obj, callback, descrname):
+        self.w_obj = w_obj
+        self.callback = callback
+        self.descrname = descrname
+        self.next = None
+
 class UserDelAction(AsyncAction):
     """An action that invokes all pending app-level __del__() method.
     This is done as an action instead of immediately when the
 
     def __init__(self, space):
         AsyncAction.__init__(self, space)
-        self.dying_objects = []
+        self.dying_objects = None
+        self.dying_objects_last = None
         self.finalizers_lock_count = 0
         self.enabled_at_app_level = True
         self._invoke_immediately = False
 
     def register_callback(self, w_obj, callback, descrname):
-        self.dying_objects.append((w_obj, callback, descrname))
+        cb = UserDelCallback(w_obj, callback, descrname)
+        if self.dying_objects_last is None:
+            self.dying_objects = cb
+        else:
+            self.dying_objects_last.next = cb
+        self.dying_objects_last = cb
         if not self._invoke_immediately:
             self.fire()
         else:
         # avoid too deep recursions of the kind of __del__ being called
         # while in the middle of another __del__ call.
         pending = self.dying_objects
-        self.dying_objects = []
+        self.dying_objects = None
+        self.dying_objects_last = None
         space = self.space
-        for i in range(len(pending)):
-            w_obj, callback, descrname = pending[i]
-            pending[i] = (None, None, None)
+        while pending is not None:
             try:
-                callback(w_obj)
+                pending.callback(pending.w_obj)
             except OperationError, e:
-                e.write_unraisable(space, descrname, w_obj)
+                e.write_unraisable(space, pending.descrname, pending.w_obj)
                 e.clear(space)   # break up reference cycles
+            pending = pending.next
+        #
+        # Note: 'dying_objects' used to be just a regular list instead
+        # of a chained list.  This was the cause of "leaks" if we have a
+        # program that constantly creates new objects with finalizers.
+        # Here is why: say 'dying_objects' is a long list, and there
+        # are n instances in it.  Then we spend some time in this
+        # function, possibly triggering more GCs, but keeping the list
+        # of length n alive.  Then the list is suddenly freed at the
+        # end, and we return to the user program.  At this point the
+        # GC limit is still very high, because just before, there was
+        # a list of length n alive.  Assume that the program continues
+        # to allocate a lot of instances with finalizers.  The high GC
+        # limit means that it could allocate a lot of instances before
+        # reaching it --- possibly more than n.  So the whole procedure
+        # repeats with higher and higher values of n.
+        #
+        # This does not occur in the current implementation because
+        # there is no list of length n: if n is large, then the GC
+        # will run several times while walking the list, but it will
+        # see lower and lower memory usage, with no lower bound of n.

File pypy/interpreter/generator.py

         return self.send_ex(w_arg)
 
     def send_ex(self, w_arg, operr=None):
+        pycode = self.pycode
+        if jit.we_are_jitted() and should_not_inline(pycode):
+            generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg,
+                                                  operr=operr, pycode=pycode)
+        return self._send_ex(w_arg, operr)
+
+    def _send_ex(self, w_arg, operr):
         space = self.space
         if self.running:
             raise OperationError(space.w_ValueError,
             if operr is None:
                 operr = OperationError(space.w_StopIteration, space.w_None)
             raise operr
-        # XXX it's not clear that last_instr should be promoted at all
-        # but as long as it is necessary for call_assembler, let's do it early
+
         last_instr = jit.promote(frame.last_instr)
         if last_instr == -1:
             if w_arg and not space.is_w(w_arg, space.w_None):
                                                  "interrupting generator of ")
                     break
                 block = block.previous
+
+
+
+def get_printable_location_genentry(bytecode):
+    return '%s <generator>' % (bytecode.get_repr(),)
+generatorentry_driver = jit.JitDriver(greens=['pycode'],
+                                      reds=['gen', 'w_arg', 'operr'],
+                                      get_printable_location =
+                                          get_printable_location_genentry,
+                                      name='generatorentry')
+
+from pypy.tool.stdlib_opcode import HAVE_ARGUMENT, opmap
+YIELD_VALUE = opmap['YIELD_VALUE']
+
+@jit.elidable_promote()
+def should_not_inline(pycode):
+    # Should not inline generators with more than one "yield",
+    # as an approximative fix (see issue #1782).  There are cases
+    # where it slows things down; for example calls to a simple
+    # generator that just produces a few simple values with a few
+    # consecutive "yield" statements.  It fixes the near-infinite
+    # slow-down in issue #1782, though...
+    count_yields = 0
+    code = pycode.co_code
+    n = len(code)
+    i = 0
+    while i < n:
+        c = code[i]
+        op = ord(c)
+        if op == YIELD_VALUE:
+            count_yields += 1
+        i += 1
+        if op >= HAVE_ARGUMENT:
+            i += 2
+    return count_yields >= 2

File pypy/interpreter/test/test_generator.py

             yield 1
             raise StopIteration
         assert tuple(f()) == (1,)
+
+
+def test_should_not_inline(space):
+    from pypy.interpreter.generator import should_not_inline
+    w_co = space.appexec([], '''():
+        def g(x):
+            yield x + 5
+        return g.func_code
+    ''')
+    assert should_not_inline(w_co) == False
+    w_co = space.appexec([], '''():
+        def g(x):
+            yield x + 5
+            yield x + 6
+        return g.func_code
+    ''')
+    assert should_not_inline(w_co) == True

File pypy/module/__pypy__/__init__.py

         'hidden_applevel'           : 'interp_magic.hidden_applevel',
         'lookup_special'            : 'interp_magic.lookup_special',
         'do_what_I_mean'            : 'interp_magic.do_what_I_mean',
-        'list_strategy'             : 'interp_magic.list_strategy',
         'validate_fd'               : 'interp_magic.validate_fd',
         'resizelist_hint'           : 'interp_magic.resizelist_hint',
         'newlist_hint'              : 'interp_magic.newlist_hint',
         'add_memory_pressure'       : 'interp_magic.add_memory_pressure',
         'newdict'                   : 'interp_dict.newdict',
-        'dictstrategy'              : 'interp_dict.dictstrategy',
+        'strategy'                  : 'interp_magic.strategy',  # dict,set,list
         'set_debug'                 : 'interp_magic.set_debug',
         'locals_to_fast'            : 'interp_magic.locals_to_fast',
         'normalize_exc'             : 'interp_magic.normalize_exc',

File pypy/module/__pypy__/interp_dict.py

 
 from pypy.interpreter.error import OperationError, oefmt
 from pypy.interpreter.gateway import unwrap_spec
-from pypy.objspace.std.dictmultiobject import W_DictMultiObject
 
 @unwrap_spec(type=str)
 def newdict(space, type):
         return space.newdict(strdict=True)
     else:
         raise oefmt(space.w_TypeError, "unknown type of dict %s", type)
-
-def dictstrategy(space, w_obj):
-    """ dictstrategy(dict)
-
-    show the underlaying strategy used by a dict object
-    """
-    if not isinstance(w_obj, W_DictMultiObject):
-        raise OperationError(space.w_TypeError,
-                             space.wrap("expecting dict object"))
-    return space.wrap('%r' % (w_obj.strategy,))

File pypy/module/__pypy__/interp_magic.py

 from pypy.interpreter.gateway import WrappedDefault, unwrap_spec
 from pypy.interpreter.pyframe import PyFrame
 from rpython.rlib.objectmodel import we_are_translated
+from pypy.objspace.std.dictmultiobject import W_DictMultiObject
 from pypy.objspace.std.listobject import W_ListObject
+from pypy.objspace.std.setobject import W_BaseSetObject
 from pypy.objspace.std.typeobject import MethodCache
 from pypy.objspace.std.mapdict import MapAttrCache
 from rpython.rlib import rposix, rgc
 def do_what_I_mean(space):
     return space.wrap(42)
 
-def list_strategy(space, w_list):
-    if isinstance(w_list, W_ListObject):
-        return space.wrap(w_list.strategy._applevel_repr)
+
+def strategy(space, w_obj):
+    """ strategy(dict or list or set)
+
+    Return the underlying strategy currently used by a dict, list or set object
+    """
+    if isinstance(w_obj, W_DictMultiObject):
+        name = w_obj.strategy.__class__.__name__
+    elif isinstance(w_obj, W_ListObject):
+        name = w_obj.strategy.__class__.__name__
+    elif isinstance(w_obj, W_BaseSetObject):
+        name = w_obj.strategy.__class__.__name__
     else:
-        w_msg = space.wrap("Can only get the list strategy of a list")
-        raise OperationError(space.w_TypeError, w_msg)
+        raise OperationError(space.w_TypeError,
+                             space.wrap("expecting dict or list or set object"))
+    return space.wrap(name)
+
 
 @unwrap_spec(fd='c_int')
 def validate_fd(space, fd):

File pypy/module/__pypy__/test/test_special.py

         assert x == 42
 
     def test_list_strategy(self):
-        from __pypy__ import list_strategy
+        from __pypy__ import strategy
 
         l = [1, 2, 3]
-        assert list_strategy(l) == "int"
-        l = list(range(1, 2))
-        assert list_strategy(l) == "int"
+        assert strategy(l) == "IntegerListStrategy"
         l = [b"a", b"b", b"c"]
-        assert list_strategy(l) == "bytes"
-        l = ["a", "b", "c"]
-        assert list_strategy(l) == "unicode"
+        assert strategy(l) == "BytesListStrategy"
+        l = [u"a", u"b", u"c"]
+        assert strategy(l) == "UnicodeListStrategy"
         l = [1.1, 2.2, 3.3]
-        assert list_strategy(l) == "float"
+        assert strategy(l) == "FloatListStrategy"
         l = [1, "b", 3]
-        assert list_strategy(l) == "object"
+        assert strategy(l) == "ObjectListStrategy"
         l = []
-        assert list_strategy(l) == "empty"
+        assert strategy(l) == "EmptyListStrategy"
         o = 5
-        raises(TypeError, list_strategy, 5)
+        raises(TypeError, strategy, 5)
+
+    def test_dict_strategy(self):
+        from __pypy__ import strategy
+
+        d = {}
+        assert strategy(d) == "EmptyDictStrategy"
+        d = {1: None, 5: None}
+        assert strategy(d) == "IntDictStrategy"
+
+    def test_set_strategy(self):
+        from __pypy__ import strategy
+
+        s = set()
+        assert strategy(s) == "EmptySetStrategy"
+        s = set([2, 3, 4])
+        assert strategy(s) == "IntegerSetStrategy"
 
     def test_normalize_exc(self):
         from __pypy__ import normalize_exc

File pypy/module/_cffi_backend/__init__.py

     appleveldefs = {
         }
     interpleveldefs = {
-        '__version__': 'space.wrap("0.8.2")',
+        '__version__': 'space.wrap("0.8.6")',
 
         'load_library': 'libraryobj.load_library',
 

File pypy/module/_cffi_backend/ctypefunc.py

 
 import sys
 
-from rpython.rlib import jit, clibffi, jit_libffi
+from rpython.rlib import jit, clibffi, jit_libffi, rgc
 from rpython.rlib.jit_libffi import (CIF_DESCRIPTION, CIF_DESCRIPTION_P,
     FFI_TYPE, FFI_TYPE_P, FFI_TYPE_PP, SIZE_OF_FFI_ARG)
 from rpython.rlib.objectmodel import we_are_translated, instantiate
         CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc)
         return ctypefunc
 
+    @rgc.must_be_light_finalizer
     def __del__(self):
         if self.cif_descr:
             lltype.free(self.cif_descr, flavor='raw')
                     data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
                     flag = get_mustfree_flag(data)
                     if flag == 1:
-                        raw_string = rffi.cast(rffi.CCHARPP, data)[0]
-                        lltype.free(raw_string, flavor='raw')
+                        raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
+                        lltype.free(raw_cdata, flavor='raw')
             lltype.free(buffer, flavor='raw')
         return w_res
 

File pypy/module/_cffi_backend/test/_backend_test_c.py

 
 def test_version():
     # this test is here mostly for PyPy
-    assert __version__ == "0.8.2"
+    assert __version__ == "0.8.6"

File pypy/module/_io/interp_textio.py

             space.wrap("<_io.TextIOWrapper %s%sencoding=%r>"), w_args
         )
 
-    def isatty_w(self, space):
-        self._check_init(space)
-        return space.call_method(self.w_buffer, "isatty")
-
     def readable_w(self, space):
         self._check_init(space)
         return space.call_method(self.w_buffer, "readable")
         self._check_init(space)
         return space.call_method(self.w_buffer, "seekable")
 
+    def isatty_w(self, space):
+        self._check_init(space)
+        return space.call_method(self.w_buffer, "isatty")
+
     def fileno_w(self, space):
         self._check_init(space)
         return space.call_method(self.w_buffer, "fileno")
     close = interp2app(W_TextIOWrapper.close_w),
 
     line_buffering = interp_attrproperty("line_buffering", W_TextIOWrapper),
-    isatty = interp2app(W_TextIOWrapper.isatty_w),
     readable = interp2app(W_TextIOWrapper.readable_w),
     writable = interp2app(W_TextIOWrapper.writable_w),
     seekable = interp2app(W_TextIOWrapper.seekable_w),
+    isatty = interp2app(W_TextIOWrapper.isatty_w),
     fileno = interp2app(W_TextIOWrapper.fileno_w),
     _dealloc_warn = interp2app(W_TextIOWrapper._dealloc_warn_w),
     name = GetSetProperty(W_TextIOWrapper.name_get_w),

File pypy/module/_io/test/test_textio.py

         t = _io.TextIOWrapper(b)
         assert t.readable()
         assert t.seekable()
+        #
+        class CustomFile(object):
+            def isatty(self): return 'YES'
+            readable = writable = seekable = lambda self: False
+        t = _io.TextIOWrapper(CustomFile())
+        assert t.isatty() == 'YES'
 
     def test_default_implementations(self):
         import _io

File pypy/module/_lsprof/interp_lsprof.py

 from rpython.rlib.rtimer import read_timestamp, _is_64_bit
 from rpython.rtyper.lltypesystem import rffi, lltype
 from rpython.translator.tool.cbuild import ExternalCompilationInfo
-from rpython.conftest import cdir
+from rpython.translator import cdir
 from rpython.rlib.rarithmetic import r_longlong
 
 import time, sys

File pypy/module/_rawffi/alt/interp_funcptr.py

File contents unchanged.

File pypy/module/_socket/__init__.py

     }
 
     interpleveldefs = {
-        'SocketType':  'interp_socket.W_RSocket',
-        'socket'    :  'interp_socket.W_RSocket',
+        'SocketType':  'interp_socket.W_Socket',
+        'socket'    :  'interp_socket.W_Socket',
         'error'     :  'interp_socket.get_error(space, "error")',
         'herror'    :  'interp_socket.get_error(space, "herror")',
         'gaierror'  :  'interp_socket.get_error(space, "gaierror")',

File pypy/module/_socket/interp_func.py

+from rpython.rlib import rsocket
+from rpython.rlib.rsocket import SocketError, INVALID_SOCKET
+
+from pypy.interpreter.error import OperationError
 from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
 from pypy.module._socket.interp_socket import (
-    converted_error, W_RSocket, addr_as_object, fill_from_object, get_error)
-from rpython.rlib import rsocket
-from rpython.rlib.rsocket import SocketError, INVALID_SOCKET
-from pypy.interpreter.error import OperationError
+    converted_error, W_Socket, addr_as_object, fill_from_object, get_error,
+    ipaddr_from_object
+)
+
 
 def gethostname(space):
     """gethostname() -> string
     AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
     """
     try:
-        sock1, sock2 = rsocket.socketpair(family, type, proto, W_RSocket)
+        sock1, sock2 = rsocket.socketpair(family, type, proto)
     except SocketError, e:
         raise converted_error(space, e)
-    return space.newtuple([space.wrap(sock1), space.wrap(sock2)])
+    return space.newtuple([
+        space.wrap(W_Socket(sock1)),
+        space.wrap(W_Socket(sock2))
+    ])
 
 # The following 4 functions refuse all negative numbers, like CPython 2.6.
 # They could also check that the argument is not too large, but CPython 2.6

File pypy/module/_socket/interp_socket.py

+from rpython.rlib import rsocket
+from rpython.rlib.rarithmetic import intmask
+from rpython.rlib.rsocket import (
+    RSocket, AF_INET, SOCK_STREAM, SocketError, SocketErrorWithErrno,
+    RSocketError
+)
+from rpython.rtyper.lltypesystem import lltype, rffi
+
+from pypy.interpreter import gateway
 from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.typedef import TypeDef, interp_attrproperty
+from pypy.interpreter.error import OperationError, oefmt
 from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
-from rpython.rlib.rarithmetic import intmask
-from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.rlib import rsocket
-from rpython.rlib.rsocket import RSocket, AF_INET, SOCK_STREAM
-from rpython.rlib.rsocket import SocketError, SocketErrorWithErrno, RSocketError
-from pypy.interpreter.error import OperationError, oefmt
-from pypy.interpreter import gateway
+from pypy.interpreter.typedef import (
+    GetSetProperty, TypeDef, interp_attrproperty, make_weakref_descr
+)
 
 
 # XXX Hack to seperate rpython and pypy
     return addr
 
 
-class W_RSocket(W_Root, RSocket):
+class W_Socket(W_Root):
 
     # for _dealloc_warn
     space = None
 
+    def __init__(self, sock):
+        self.sock = sock
+
     def descr_new(space, w_subtype, __args__):
-        sock = space.allocate_instance(W_RSocket, w_subtype)
+        sock = space.allocate_instance(W_Socket, w_subtype)
         return space.wrap(sock)
 
     @unwrap_spec(family=int, type=int, proto=int,
                    w_fileno=None):
         try:
             if not space.is_w(w_fileno, space.w_None):
-                W_RSocket.__init__(self, family, type, proto,
-                                   fd=space.c_filedescriptor_w(w_fileno))
+                sock = RSocket(family, type, proto,
+                               fd=space.c_filedescriptor_w(w_fileno))
             else:
-                W_RSocket.__init__(self, family, type, proto)
+                sock = RSocket(family, type, proto)
+            W_Socket.__init__(self, sock)
             self.space = space
         except SocketError, e:
             raise converted_error(space, e)
     def __del__(self):
         self.clear_all_weakrefs()
         if self.space:
-            self.enqueue_for_destruction(self.space, W_RSocket.destructor,
+            self.enqueue_for_destruction(self.space, W_Socket.destructor,
                                          'internal __del__ of ')
 
     def destructor(self):
-        assert isinstance(self, W_RSocket)
+        assert isinstance(self, W_Socket)
         if self.fd != rsocket.INVALID_SOCKET:
             try:
                 self._dealloc_warn()
             finally:
                 self.close_w(self.space)
+    # --XXX--
+
+    def get_type_w(self, space):
+        return space.wrap(self.sock.type)
+
+    def get_proto_w(self, space):
+        return space.wrap(self.sock.proto)
+
+    def get_family_w(self, space):
+        return space.wrap(self.sock.family)
 
     def _dealloc_warn(self):
         space = self.space
         For IP sockets, the address info is a pair (hostaddr, port).
         """
         try:
-            fd, addr = self.accept()
+            fd, addr = self.sock.accept()
             return space.newtuple([space.wrap(fd),
                                    addr_as_object(addr, fd, space)])
-        except SocketError, e:
+        except SocketError as e:
             raise converted_error(space, e)
 
     # convert an Address into an app-level object
     def addr_as_object(self, space, address):
-        return addr_as_object(address, self.fd, space)
+        return addr_as_object(address, self.sock.fd, space)
 
     # convert an app-level object into an Address
     # based on the current socket's family
     def addr_from_object(self, space, w_address):
-        return addr_from_object(self.family, space, w_address)
+        return addr_from_object(self.sock.family, space, w_address)
 
     def bind_w(self, space, w_addr):
         """bind(address)
         sockets the address is a tuple (ifname, proto [,pkttype [,hatype]])
         """
         try:
-            self.bind(self.addr_from_object(space, w_addr))
-        except SocketError, e:
+            self.sock.bind(self.addr_from_object(space, w_addr))
+        except SocketError as e:
             raise converted_error(space, e)
 
     def close_w(self, space):
         Close the socket.  It cannot be used after this call.
         """
         try:
-            self.close()
+            self.sock.close()
         except SocketError:
             # cpython doesn't return any errors on close
             pass
         is a pair (host, port).
         """
         try:
-            self.connect(self.addr_from_object(space, w_addr))
-        except SocketError, e:
+            self.sock.connect(self.addr_from_object(space, w_addr))
+        except SocketError as e:
             raise converted_error(space, e)
 
     def connect_ex_w(self, space, w_addr):
         """
         try:
             addr = self.addr_from_object(space, w_addr)
-        except SocketError, e:
+        except SocketError as e:
             raise converted_error(space, e)
-        error = self.connect_ex(addr)
+        error = self.sock.connect_ex(addr)
         return space.wrap(error)
 
     def fileno_w(self, space):
 
         Return the integer file descriptor of the socket.
         """
-        return space.wrap(intmask(self.fd))
+        return space.wrap(intmask(self.sock.fd))
 
     def detach_w(self, space):
         """detach()
         info is a pair (hostaddr, port).
         """
         try:
-            addr = self.getpeername()
-            return addr_as_object(addr, self.fd, space)
-        except SocketError, e:
+            addr = self.sock.getpeername()
+            return addr_as_object(addr, self.sock.fd, space)
+        except SocketError as e:
             raise converted_error(space, e)
 
     def getsockname_w(self, space):
         info is a pair (hostaddr, port).
         """
         try:
-            addr = self.getsockname()
-            return addr_as_object(addr, self.fd, space)
-        except SocketError, e:
+            addr = self.sock.getsockname()
+            return addr_as_object(addr, self.sock.fd, space)
+        except SocketError as e:
             raise converted_error(space, e)
 
     @unwrap_spec(level=int, optname=int)
         """
         if w_buflen is None:
             try:
-                return space.wrap(self.getsockopt_int(level, optname))
-            except SocketError, e:
+                return space.wrap(self.sock.getsockopt_int(level, optname))
+            except SocketError as e:
                 raise converted_error(space, e)
         buflen = space.int_w(w_buflen)
-        return space.wrapbytes(self.getsockopt(level, optname, buflen))
+        return space.wrapbytes(self.sock.getsockopt(level, optname, buflen))
 
     def gettimeout_w(self, space):
         """gettimeout() -> timeout
         Returns the timeout in floating seconds associated with socket
         operations. A timeout of None indicates that timeouts on socket
         """
-        timeout = self.gettimeout()
+        timeout = self.sock.gettimeout()
         if timeout < 0.0:
             return space.w_None
         return space.wrap(timeout)
         will allow before refusing new connections.
         """
         try:
-            self.listen(backlog)
-        except SocketError, e:
+            self.sock.listen(backlog)
+        except SocketError as e:
             raise converted_error(space, e)
 
     @unwrap_spec(buffersize='nonnegint', flags=int)
         the remote end is closed and all data is read, return the empty string.
         """
         try:
-            data = self.recv(buffersize, flags)
-        except SocketError, e:
+            data = self.sock.recv(buffersize, flags)
+        except SocketError as e:
             raise converted_error(space, e)
         return space.wrapbytes(data)
 
         Like recv(buffersize, flags) but also return the sender's address info.
         """
         try:
-            data, addr = self.recvfrom(buffersize, flags)
+            data, addr = self.sock.recvfrom(buffersize, flags)
             if addr:
-                w_addr = addr_as_object(addr, self.fd, space)
+                w_addr = addr_as_object(addr, self.sock.fd, space)
             else:
                 w_addr = space.w_None
             return space.newtuple([space.wrapbytes(data), w_addr])
-        except SocketError, e:
+        except SocketError as e:
             raise converted_error(space, e)
 
     @unwrap_spec(data='bufferstr', flags=int)
         sent; this may be less than len(data) if the network is busy.
         """
         try:
-            count = self.send(data, flags)
-        except SocketError, e:
+            count = self.sock.send(data, flags)
+        except SocketError as e:
             raise converted_error(space, e)
         return space.wrap(count)
 
         to tell how much data has been sent.
         """
         try:
-            self.sendall(data, flags, space.getexecutioncontext().checksignals)
-        except SocketError, e:
+            self.sock.sendall(
+                data, flags, space.getexecutioncontext().checksignals)
+        except SocketError as e:
             raise converted_error(space, e)
 
     @unwrap_spec(data='bufferstr')
             w_addr = w_param3
         try:
             addr = self.addr_from_object(space, w_addr)
-            count = self.sendto(data, flags, addr)
-        except SocketError, e:
+            count = self.sock.sendto(data, flags, addr)
+        except SocketError as e:
             raise converted_error(space, e)
         return space.wrap(count)
 
         setblocking(True) is equivalent to settimeout(None);
         setblocking(False) is equivalent to settimeout(0.0).
         """
-        self.setblocking(flag)
+        self.sock.setblocking(flag)
 
     @unwrap_spec(level=int, optname=int)
     def setsockopt_w(self, space, level, optname, w_optval):
         except:
             optval = space.bytes_w(w_optval)
             try:
-                self.setsockopt(level, optname, optval)
-            except SocketError, e:
+                self.sock.setsockopt(level, optname, optval)
+            except SocketError as e:
                 raise converted_error(space, e)
             return
         try:
-            self.setsockopt_int(level, optname, optval)
-        except SocketError, e:
+            self.sock.setsockopt_int(level, optname, optval)
+        except SocketError as e:
             raise converted_error(space, e)
 
     def settimeout_w(self, space, w_timeout):
             if timeout < 0.0:
                 raise OperationError(space.w_ValueError,
                                      space.wrap('Timeout value out of range'))
-        self.settimeout(timeout)
+        self.sock.settimeout(timeout)
 
     @unwrap_spec(nbytes=int, flags=int)
     def recv_into_w(self, space, w_buffer, nbytes=0, flags=0):
         if nbytes == 0 or nbytes > lgt:
             nbytes = lgt
         try:
-            return space.wrap(self.recvinto(rwbuffer, nbytes, flags))
-        except SocketError, e:
+            return space.wrap(self.sock.recvinto(rwbuffer, nbytes, flags))
+        except SocketError as e:
             raise converted_error(space, e)
 
     @unwrap_spec(nbytes=int, flags=int)
         if nbytes == 0 or nbytes > lgt:
             nbytes = lgt
         try:
-            readlgt, addr = self.recvfrom_into(rwbuffer, nbytes, flags)
+            readlgt, addr = self.sock.recvfrom_into(rwbuffer, nbytes, flags)
             if addr:
-                w_addr = addr_as_object(addr, self.fd, space)
+                w_addr = addr_as_object(addr, self.sock.fd, space)
             else:
                 w_addr = space.w_None
             return space.newtuple([space.wrap(readlgt), w_addr])
-        except SocketError, e:
+        except SocketError as e:
             raise converted_error(space, e)
 
     @unwrap_spec(cmd=int)
                     option_ptr.c_keepaliveinterval = space.uint_w(w_interval)
 
                 res = _c.WSAIoctl(
-                    self.fd, cmd, value_ptr, value_size,
+                    self.sock.fd, cmd, value_ptr, value_size,
                     rffi.NULL, 0, recv_ptr, rffi.NULL, rffi.NULL)
                 if res < 0:
                     raise converted_error(space, rsocket.last_error())
         (flag == SHUT_RDWR).
         """
         try:
-            self.shutdown(how)
-        except SocketError, e:
+            self.sock.shutdown(how)
+        except SocketError as e:
             raise converted_error(space, e)
 
     #------------------------------------------------------------
 
 socketmethods = {}
 for methodname in socketmethodnames:
-    method = getattr(W_RSocket, methodname + '_w')
+    method = getattr(W_Socket, methodname + '_w')
     socketmethods[methodname] = interp2app(method)
 
-W_RSocket.typedef = TypeDef("_socket.socket",
+W_Socket.typedef = TypeDef("_socket.socket",
     __doc__ = """\
 socket([family[, type[, proto]]]) -> socket object
 
 shutdown(how) -- shut down traffic in one or both directions
 
  [*] not available on all platforms!""",
-    __new__ = interp2app(W_RSocket.descr_new.im_func),
-    __init__ = interp2app(W_RSocket.descr_init),
-    type = interp_attrproperty('type', W_RSocket),
-    proto = interp_attrproperty('proto', W_RSocket),
-    family = interp_attrproperty('family', W_RSocket),
+    __new__ = interp2app(W_Socket.descr_new.im_func),
+    __init__ = interp2app(W_Socket.descr_init),
+    type = interp_attrproperty('type', W_Socket),
+    proto = interp_attrproperty('proto', W_Socket),
+    family = interp_attrproperty('family', W_Socket),
     ** socketmethods
     )

File pypy/module/_ssl/interp_ssl.py

             lgt = rwbuffer.getlength()
             if num_bytes < 0 or num_bytes > lgt:
                 num_bytes = lgt
-        raw_buf, gc_buf = rffi.alloc_buffer(num_bytes)
-        while True:
-            err = 0
 
-            count = libssl_SSL_read(self.ssl, raw_buf, num_bytes)
-            err = libssl_SSL_get_error(self.ssl, count)
+        with rffi.scoped_alloc_buffer(num_bytes) as buf:
+            while True:
+                err = 0
 
-            if err == SSL_ERROR_WANT_READ:
-                sockstate = check_socket_and_wait_for_timeout(
-                    space, w_socket, False)
-            elif err == SSL_ERROR_WANT_WRITE:
-                sockstate = check_socket_and_wait_for_timeout(
-                    space, w_socket, True)
-            elif (err == SSL_ERROR_ZERO_RETURN and
-                  libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN):
-                rffi.keep_buffer_alive_until_here(raw_buf, gc_buf)
-                if space.is_none(w_buf):
-                    return space.wrapbytes('')
+                count = libssl_SSL_read(self.ssl, buf.raw, num_bytes)
+                err = libssl_SSL_get_error(self.ssl, count)
+
+                if err == SSL_ERROR_WANT_READ:
+                    sockstate = check_socket_and_wait_for_timeout(self.space,
+                        self.w_socket, False)
+                elif err == SSL_ERROR_WANT_WRITE:
+                    sockstate = check_socket_and_wait_for_timeout(self.space,
+                        self.w_socket, True)
+                elif (err == SSL_ERROR_ZERO_RETURN and
+                   libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN):
+                    if space.is_none(w_buf):
+                        return space.wrapbytes('')
+                    else:
+                        return space.wrap(0)
                 else:
-                    return space.wrap(0)
-            else:
-                sockstate = SOCKET_OPERATION_OK
+                    sockstate = SOCKET_OPERATION_OK
 
-            if sockstate == SOCKET_HAS_TIMED_OUT:
-                raise ssl_error(space, "The read operation timed out")
-            elif sockstate == SOCKET_IS_NONBLOCKING:
-                break
+                if sockstate == SOCKET_HAS_TIMED_OUT:
+                    raise ssl_error(self.space, "The read operation timed out")
+                elif sockstate == SOCKET_IS_NONBLOCKING:
+                    break
 
-            if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
-                continue
-            else:
-                break
+                if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
+                    continue
+                else:
+                    break
 
-        if count <= 0:
-            raise _ssl_seterror(space, self, count)
+            if count <= 0:
+                raise _ssl_seterror(self.space, self, count)
 
-        result = rffi.str_from_buffer(raw_buf, gc_buf, num_bytes, count)
-        rffi.keep_buffer_alive_until_here(raw_buf, gc_buf)
+            result = buf.str(count)
+
         if rwbuffer is not None:
             rwbuffer.setslice(0, result)
-            return space.wrap(count)
+            return self.space.wrap(count)
         else:
             return space.wrapbytes(result)
 

File pypy/module/array/interp_array.py

         Convert the array to an array of machine values and return the
         bytes representation.
         """
+        size = self.len
+        if size == 0:
+            return space.wrap('')
         cbuf = self._charbuf_start()
-        s = rffi.charpsize2str(cbuf, self.len * self.itemsize)
+        s = rffi.charpsize2str(cbuf, size * self.itemsize)
         self._charbuf_stop()
         return self.space.wrapbytes(s)
 
 def make_array(mytype):
     W_ArrayBase = globals()['W_ArrayBase']
 
+    unpack_driver = jit.JitDriver(name='unpack_array',
+                                  greens=['tp'],
+                                  reds=['self', 'w_iterator'])
+
     class W_Array(W_ArrayBase):
         itemsize = mytype.bytes
         typecode = mytype.typecode
                 return rffi.cast(mytype.itemtype, item)
             #
             # "regular" case: it fits in an rpython integer (lltype.Signed)
+            # or it is a float
+            return self.item_from_int_or_float(item)
+
+        def item_from_int_or_float(self, item):
             result = rffi.cast(mytype.itemtype, item)
             if mytype.canoverflow:
                 if rffi.cast(lltype.Signed, result) != item:
                                % mytype.bytes)
                     if not mytype.signed:
                         msg = 'un' + msg      # 'signed' => 'unsigned'
-                    raise OperationError(space.w_OverflowError,
-                                         space.wrap(msg))
+                    raise OperationError(self.space.w_OverflowError,
+                                         self.space.wrap(msg))
             return result
 
         def __del__(self):
         def fromsequence(self, w_seq):
             space = self.space
             oldlen = self.len
-            try:
-                new = space.len_w(w_seq)
-                self.setlen(self.len + new)
-            except OperationError:
-                pass
+            newlen = oldlen
 
-            i = 0
-            try:
-                if mytype.typecode == 'u':
-                    myiter = space.unpackiterable
-                else:
-                    myiter = space.listview
-                for w_i in myiter(w_seq):
-                    if oldlen + i >= self.len:
-                        self.setlen(oldlen + i + 1)
-                    self.buffer[oldlen + i] = self.item_w(w_i)
-                    i += 1
-            except OperationError:
-                self.setlen(oldlen + i)
-                raise
-            self.setlen(oldlen + i)
+            # optimized case for arrays of integers or floats
+            if mytype.unwrap == 'int_w':
+                lst = space.listview_int(w_seq)
+            elif mytype.unwrap == 'float_w':
+                lst = space.listview_float(w_seq)
+            else:
+                lst = None
+            if lst is not None:
+                self.setlen(oldlen + len(lst))
+                try:
+                    buf = self.buffer
+                    for num in lst:
+                        buf[newlen] = self.item_from_int_or_float(num)
+                        newlen += 1
+                except OperationError:
+                    self.setlen(newlen)
+                    raise
+                return
+
+            # this is the common case: w_seq is a list or a tuple
+            lst_w = space.listview_no_unpack(w_seq)
+            if lst_w is not None:
+                self.setlen(oldlen + len(lst_w))
+                buf = self.buffer
+                try:
+                    for w_num in lst_w:
+                        # note: self.item_w() might invoke arbitrary code.
+                        # In case it resizes the same array, then strange
+                        # things may happen, but as we don't reload 'buf'
+                        # we know that one is big enough for all items
+                        # (so at least we avoid crashes)
+                        buf[newlen] = self.item_w(w_num)
+                        newlen += 1
+                except OperationError:
+                    if buf == self.buffer:
+                        self.setlen(newlen)
+                    raise
+                return
+
+            self._fromiterable(w_seq)
+
+        def _fromiterable(self, w_seq):
+            # a more careful case if w_seq happens to be a very large
+            # iterable: don't copy the items into some intermediate list
+            w_iterator = self.space.iter(w_seq)
+            tp = self.space.type(w_iterator)
+            while True:
+                unpack_driver.jit_merge_point(tp=tp, self=self,
+                                              w_iterator=w_iterator)
+                space = self.space
+                try:
+                    w_item = space.next(w_iterator)
+                except OperationError, e:
+                    if not e.match(space, space.w_StopIteration):
+                        raise
+                    break  # done
+                self.descr_append(space, w_item)
 
         def extend(self, w_iterable, accept_different_array=False):
             space = self.space
 
         def descr_append(self, space, w_x):
             x = self.item_w(w_x)
-            self.setlen(self.len + 1)
-            self.buffer[self.len - 1] = x
+            index = self.len
+            self.setlen(index + 1)
+            self.buffer[index] = x
 
         # List interface
         def descr_count(self, space, w_val):

File pypy/module/array/test/test_array.py

         raises(ValueError, self.array('i').tounicode)
         assert self.array('u', 'hello').tounicode() == 'hello'
 
+    def test_empty_tostring(self):
+        a = self.array('l')
+        assert a.tostring() == b''
+
     def test_buffer(self):
         a = self.array('h', b'Hi')
         buf = memoryview(a)

File pypy/module/cpyext/api.py

 from rpython.rtyper.lltypesystem import ll2ctypes
 from rpython.rtyper.annlowlevel import llhelper
 from rpython.rlib.objectmodel import we_are_translated
-from rpython.conftest import cdir
+from rpython.translator import cdir
 from rpython.translator.tool.cbuild import ExternalCompilationInfo
 from rpython.translator.gensupp import NameManager
 from rpython.tool.udir import udir

File pypy/module/cpyext/listobject.py

     IndexError exception."""
     if not isinstance(w_list, W_ListObject):
         PyErr_BadInternalCall(space)
-    wrappeditems = w_list.getitems()
-    if index < 0 or index >= len(wrappeditems):
+    if index < 0 or index >= w_list.length():
         raise OperationError(space.w_IndexError, space.wrap(
             "list index out of range"))
-    return borrow_from(w_list, wrappeditems[index])
+    w_item = w_list.getitem(index)
+    return borrow_from(w_list, w_item)
 
 
 @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1)

File pypy/module/cpyext/pystate.py

File contents unchanged.

File pypy/module/cpyext/state.py

File contents unchanged.

File pypy/module/cpyext/test/test_cpyext.py

         import sys
         if sys.platform != "win32" or sys.version_info < (2, 6):
             skip("Windows Python >= 2.6 only")
-        assert sys.dllhandle
-        assert sys.dllhandle.getaddressindll('PyPyErr_NewException')
-        import ctypes # slow
-        PyUnicode_GetDefaultEncoding = ctypes.pythonapi.PyPyUnicode_GetDefaultEncoding
-        PyUnicode_GetDefaultEncoding.restype = ctypes.c_char_p
-        assert PyUnicode_GetDefaultEncoding() == 'ascii'
+        assert isinstance(sys.dllhandle, int)
 
 class AppTestCpythonExtensionBase(LeakCheckingTest):
 

File pypy/module/cpyext/test/test_unicodeobject.py

 
     def test_copy(self, space, api):
         w_x = space.wrap(u"abcd\u0660")
-        target_chunk, _ = rffi.alloc_unicodebuffer(space.int_w(space.len(w_x)))
-        #lltype.malloc(Py_UNICODE, space.int_w(space.len(w_x)), flavor='raw')
+        count1 = space.int_w(space.len(w_x))
+        target_chunk = lltype.malloc(rffi.CWCHARP.TO, count1, flavor='raw')
 
         x_chunk = api.PyUnicode_AS_UNICODE(w_x)
         api.Py_UNICODE_COPY(target_chunk, x_chunk, 4)

File pypy/module/micronumpy/__init__.py

         'scalar' : 'ctors.build_scalar',
         'array': 'ctors.array',
         'zeros': 'ctors.zeros',
-        'empty': 'ctors.zeros',
+        'empty': 'ctors.empty',
         'empty_like': 'ctors.empty_like',
         'fromstring': 'ctors.fromstring',
         'frombuffer': 'ctors.frombuffer',

File pypy/module/micronumpy/base.py

     pass
 
 
-class W_NDimArray(W_Root):
+class W_NumpyObject(W_Root):
+    """Base class for ndarrays and scalars (aka boxes)."""
+    _attrs_ = []
+
+
+class W_NDimArray(W_NumpyObject):
     __metaclass__ = extendabletype
 
     def __init__(self, implementation):
         self.implementation = implementation
 
     @staticmethod
-    def from_shape(space, shape, dtype, order='C', w_instance=None):
+    def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True):
         from pypy.module.micronumpy import concrete
         from pypy.module.micronumpy.strides import calc_strides
         strides, backstrides = calc_strides(shape, dtype.base, order)
         impl = concrete.ConcreteArray(shape, dtype.base, order, strides,
-                                      backstrides)
+                                      backstrides, zero=zero)
         if w_instance:
             return wrap_impl(space, space.type(w_instance), w_instance, impl)
         return W_NDimArray(impl)
             w_val = dtype.coerce(space, space.wrap(0))