Commits

Maciej Fijalkowski committed bc24f91 Merge

merge heads

Comments (0)

Files changed (16)

lib_pypy/_ctypes/structure.py

             return
         if '_fields_' not in self.__dict__:
             self._fields_ = []
-            self._names = []
             _set_shape(self, [], self._is_union)
 
     __setattr__ = struct_setattr

pypy/doc/release-2.0.0.rst

+============================
+PyPy 2.0 - Einstein Sandwich
+============================
+
+We're pleased to announce PyPy 2.0. This is a stable release that brings
+a swath of bugfixes, small performance improvements and compatibility fixes.
+
+You can download the PyPy 2.0 release here:
+
+    http://pypy.org/download.html
+
+The two biggest changes since PyPy 1.9 are:
+
+* stackless is now supported including greenlets, which means eventlet
+  and gevent should work (but read below about gevent)
+
+* PyPy now contains release 0.6 of `cffi`_ as a builtin module, which
+  is preferred way of calling C from Python that works well on PyPy
+
+.. _`cffi`: http://cffi.readthedocs.org
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy 2.0 and cpython 2.7.3`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+This release supports x86 machines running Linux 32/64, Mac OS X 64 or
+Windows 32.  Windows 64 work is still stalling, we would welcome a volunteer
+to handle that. ARM support is on the way and we're expecting to release
+an alpha ARM version shortly.
+
+.. _`pypy 2.0 and cpython 2.7.3`: http://speed.pypy.org
+
+Highlights
+==========
+
+* Stackless including greenlets should work. For gevent, you need to check
+  out `pypycore`_ and use the `pypy-hacks`_ branch of gevent.
+
+* cffi is now a module included with PyPy.  (`cffi`_ also exists for
+  CPython; the two versions should be fully compatible.)  It is the
+  preferred way of calling C from Python that works on PyPy.
+
+* Callbacks from C are now JITted, which means XML parsing is much faster.
+
+* A lot of speed improvements in various language corners, most of them small,
+  but speeding up some particular corners a lot.
+
+* The JIT was refactored to emit machine code which manipulates a "frame"
+  that lives on the heap rather than on the stack.  This is what makes
+  Stackless work, and it could bring another future speed-up (not done yet).
+
+* A lot of stability issues fixed.
+
+.. _`pypycore`: https://github.com/gevent-on-pypy/pypycore/
+.. _`pypy-hacks`: https://github.com/schmir/gevent/tree/pypy-hacks
+
+Cheers,
+fijal, arigo and the PyPy team

pypy/doc/whatsnew-head.rst

 .. this is a revision shortly after release-2.0
 .. startrev: a13c07067613
 
-

pypy/goal/getnightly.py

 
 if sys.platform.startswith('linux'):
     arch = 'linux'
+    cmd = 'wget "%s"'
+    tar = "tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy'"
+if sys.platform.startswith('darwin'):
+    arch = 'osx'
+    cmd = 'curl -O "%s"'
+    tar = "tar -x -v --strip-components=2 -f %s '*/bin/pypy'"
 else:
-    print 'Cannot determine the platform, please update this scrip'
+    print 'Cannot determine the platform, please update this script'
     sys.exit(1)
 
 if sys.maxint == 2**63 - 1:
 tmp = py.path.local.mkdtemp()
 mydir = tmp.chdir()
 print 'Downloading pypy to', tmp
-if os.system('wget "%s"' % url) != 0:
+if os.system(cmd % url) != 0:
     sys.exit(1)
 
 print 'Extracting pypy binary'
 mydir.chdir()
-os.system("tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy'" % tmp.join(filename))
-
+os.system(tar % tmp.join(filename))

pypy/module/__pypy__/__init__.py

         from rpython.jit.backend import detect_cpu
         model = detect_cpu.autodetect_main_model_and_size()
         self.extra_interpdef('cpumodel', 'space.wrap(%r)' % model)
+        if self.space.config.translation.jit:
+            features = detect_cpu.getcpufeatures(model)
+            self.extra_interpdef('jit_backend_features',
+                                    'space.wrap(%r)' % features)

pypy/module/__pypy__/test/test_special.py

         assert list_strategy(l) == "empty"
         o = 5
         raises(TypeError, list_strategy, 5)
+
+
+class AppTestJitFeatures(object):
+    spaceconfig = {"translation.jit": True}
+
+    def test_jit_backend_features(self):
+        from __pypy__ import jit_backend_features
+        supported_types = jit_backend_features
+        assert isinstance(supported_types, list)
+        for x in supported_types:
+            assert x in ['floats', 'singlefloats', 'longlong']

pypy/module/_random/interp_random.py

             elif space.isinstance_w(w_n, space.w_long):
                 w_n = space.abs(w_n)
             else:
-                # XXX not perfectly like CPython
-                w_n = space.abs(space.hash(w_n))
+                n = space.hash_w(w_n)
+                w_n = space.wrap(r_uint(n))
         key = []
         w_one = space.newint(1)
         w_two = space.newint(2)

pypy/module/_random/test/test_random.py

         rnd1.setstate((-1, ) * 624 + (0, ))
 
     def test_seed(self):
-        import _random
+        import _random, sys
         rnd = _random.Random()
         rnd.seed()
         different_nums = []
+        mask = sys.maxint * 2 + 1
         for obj in ["spam and eggs", 3.14, 1+2j, 'a', tuple('abc')]:
             nums = []
-            for o in [obj, hash(obj), -hash(obj)]:
+            for o in [obj, hash(obj) & mask, -(hash(obj) & mask)]:
                 rnd.seed(o)
                 nums.append([rnd.random() for i in range(100)])
             n1 = nums[0]

pypy/module/pypyjit/test_pypy_c/test_array.py

         """)
 
     def test_array_of_floats(self):
+        try:
+            from __pypy__ import jit_backend_features
+            if 'singlefloats' not in jit_backend_features:
+                py.test.skip("test requres singlefloats support from the JIT backend")
+        except ImportError:
+            pass
         def main():
             from array import array
             img = array('f', [21.5]*1000)

pypy/module/test_lib_pypy/ctypes_tests/test_structures.py

         pt = POINT(y=2, x=1)
         assert (pt.x, pt.y) == (1, 2)
 
+    def test_subclass_initializer(self):
+        class POINT(Structure):
+            _fields_ = [("x", c_int), ("y", c_int)]
+
+        class POSITION(POINT):
+            # A subclass without _fields_
+            pass
+        pos = POSITION(1, 2)
+        assert (pos.x, pos.y) == (1, 2)
+        
+
     def test_invalid_field_types(self):
         class POINT(Structure):
             pass
         raises(AttributeError, setattr, X, "_fields_", [])
         Y.__fields__ = []
 
+
 class TestPatologicalCases(BaseCTypesTestChecker):
     def test_structure_overloading_getattr(self):
         class X(Structure):

pypy/objspace/std/stringobject.py

 
     def unicode_w(w_self, space):
         # Use the default encoding.
-        from pypy.objspace.std.unicodetype import unicode_from_string, \
-                decode_object
+        from pypy.objspace.std.unicodetype import (unicode_from_string,
+            decode_object, _get_encoding_and_errors)
         w_defaultencoding = space.call_function(space.sys.get(
                                                 'getdefaultencoding'))
-        from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \
-            unicode_from_string, decode_object
         encoding, errors = _get_encoding_and_errors(space, w_defaultencoding,
                                                     space.w_None)
         if encoding is None and errors is None:
 def str_title__String(space, w_self):
     input = w_self._value
     builder = StringBuilder(len(input))
-    prev_letter=' '
+    prev_letter = ' '
 
     for pos in range(len(input)):
         ch = input[pos]
             space.wrap("rjust() argument 2 must be a single character"))
 
     d = u_arg - len(u_self)
-    if d>0:
+    if d > 0:
         fillchar = fillchar[0]    # annotator hint: it's a single character
         u_self = d * fillchar + u_self
 
             space.wrap("ljust() argument 2 must be a single character"))
 
     d = u_arg - len(u_self)
-    if d>0:
+    if d > 0:
         fillchar = fillchar[0]    # annotator hint: it's a single character
         u_self += d * fillchar
 
     return space.newbool(self.find(sub) >= 0)
 
 def str_find__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
-    (self, start, end) =  _convert_idx_params(space, w_self, w_start, w_end)
+    (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end)
     res = self.find(w_sub._value, start, end)
     return space.wrap(res)
 
 def str_rfind__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
-    (self, start, end) =  _convert_idx_params(space, w_self, w_start, w_end)
+    (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end)
     res = self.rfind(w_sub._value, start, end)
     return space.wrap(res)
 
 
 
 def str_index__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
-    (self, start, end) =  _convert_idx_params(space, w_self, w_start, w_end)
+    (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end)
     res = self.find(w_sub._value, start, end)
     if res < 0:
         raise OperationError(space.w_ValueError,
 
 
 def str_rindex__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
-    (self, start, end) =  _convert_idx_params(space, w_self, w_start, w_end)
+    (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end)
     res = self.rfind(w_sub._value, start, end)
     if res < 0:
         raise OperationError(space.w_ValueError,
         while 1:
             #no sophisticated linebreak support now, '\r' just for passing adapted CPython test
             if u_token[offset-1] == "\n" or u_token[offset-1] == "\r":
-                break;
+                break
             distance += 1
             offset -= 1
             if offset == 0:
         #print '<offset:%d distance:%d tabsize:%d token:%s>' % (offset, distance, u_tabsize, u_token)
         distance = (u_tabsize-distance) % u_tabsize
         if distance == 0:
-            distance=u_tabsize
+            distance = u_tabsize
 
     return distance
 
 
         for token in split:
             #print  "%d#%d -%s-" % (_tabindent(oldtoken,u_tabsize), u_tabsize, token)
-            u_expanded += " " * _tabindent(oldtoken,u_tabsize) + token
+            u_expanded += " " * _tabindent(oldtoken, u_tabsize) + token
             oldtoken = token
 
     return wrapstr(space, u_expanded)
 
 
 def str_splitlines__String_ANY(space, w_self, w_keepends):
-    u_keepends  = space.int_w(w_keepends)  # truth value, but type checked
+    u_keepends = space.int_w(w_keepends)  # truth value, but type checked
     data = w_self._value
     selflen = len(data)
     strs_w = []
     return wrapchar(space, str[ival])
 
 def getitem__String_Slice(space, w_str, w_slice):
-    w = space.wrap
     s = w_str._value
     length = len(s)
     start, stop, step, sl = w_slice.indices4(space, length)
             'interpreter/pyparser/test',
             'interpreter/test',
             'interpreter/test2',
+            'module/test_lib_pypy',
             'objspace/std/test',
     ],
 }

rpython/jit/backend/arm/test/test_fficall.py

+import py
+from rpython.jit.metainterp.test import test_fficall
+from rpython.jit.backend.arm.test.support import JitARMMixin
+
+class TestFfiCall(JitARMMixin, test_fficall.FfiCallTests):
+    # for the individual tests see
+    # ====> ../../../metainterp/test/test_fficall.py
+
+    def _add_libffi_types_to_ll2types_maybe(self):
+        # this is needed by test_guard_not_forced_fails, because it produces a
+        # loop which reads the value of types.* in a variable, then a guard
+        # fail and we switch to blackhole: the problem is that at this point
+        # the blackhole interp has a real integer, but it needs to convert it
+        # back to a lltype pointer (which is handled by ll2ctypes, deeply in
+        # the logic). The workaround is to teach ll2ctypes in advance which
+        # are the addresses of the various types.* structures.
+        # Try to comment this code out and run the test to see how it fails :)
+        from rpython.rtyper.lltypesystem import rffi, lltype, ll2ctypes
+        from rpython.rlib.jit_libffi import types
+        for key, value in types.__dict__.iteritems():
+            if isinstance(value, lltype._ptr):
+                addr = rffi.cast(lltype.Signed, value)
+                ll2ctypes._int2obj[addr] = value

rpython/jit/backend/detect_cpu.py

     mod = __import__(modname, {}, {}, clsname)
     return getattr(mod, clsname)
 
+
+def getcpufeatures(backend_name="auto"):
+    """NOT_RPYTHON"""
+    cpucls = getcpuclass(backend_name)
+    return [attr[len('supports_'):] for attr in dir(cpucls)
+                                if attr.startswith('supports_')]
+
 if __name__ == '__main__':
     print autodetect()
     print getcpuclassname()

rpython/jit/backend/test/test_detect_cpu.py

 def test_detect_main_model_and_size_from_platform():
     info = autodetect_main_model_and_size()
     assert detect_main_model_and_size_from_platform() == info
+
+def test_getcpufeatures():
+    features = getcpufeatures()
+    assert isinstance(features, list)
+    for x in features:
+        assert x in ['floats', 'singlefloats', 'longlong']

rpython/memory/gctransform/transform.py

     def finish_helpers(self, backendopt=True):
         if self.translator is not None:
             self.mixlevelannotator.finish_annotate()
-        self.finished_helpers = True
         if self.translator is not None:
             self.mixlevelannotator.finish_rtype()
             if backendopt:
                 self.mixlevelannotator.backend_optimize()
+        self.finished_helpers = True
         # Make sure that the database also sees all finalizers now.
         # It is likely that the finalizers need special support there
         newgcdependencies = self.ll_finalizers_ptrs