Commits

David Schneider committed 53fcebf Merge

merge heads

Comments (0)

Files changed (9)

+
+all: pypy-c
+
+pypy-c:
+	@echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM"
+	@sleep 3
+	rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py
 and send us feedback!
 
     the pypy-dev team <pypy-dev@python.org>
+
+Building
+========
+
+build with::
+
+  rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py
+
+This ends up with ``pypy-c`` binary in the main pypy directory. We suggest
+to use virtualenv with the resulting pypy-c as the interpreter, you can
+find more details about various installation schemes here:
+
+http://doc.pypy.org/en/latest/getting-started.html#installing-pypy

lib-python/2.7/json/decoder.py

         if nextchar == '}':
             if object_pairs_hook is not None:
                 result = object_pairs_hook(pairs)
-                return result, end
+                return result, end + 1
             pairs = {}
             if object_hook is not None:
                 pairs = object_hook(pairs)

lib-python/2.7/json/tests/test_decode.py

                                     object_pairs_hook=OrderedDict,
                                     object_hook=lambda x: None),
                          OrderedDict(p))
+        self.assertEqual(self.loads("{}", object_pairs_hook=list), [])
 
 
 class TestPyDecode(TestDecode, PyTest): pass

pypy/doc/config/translation.gcrootfinder.txt

 - ``--gcrootfinder=asmgcc``: use assembler hackery to find the
   roots directly from the normal stack.  This is a bit faster,
   but platform specific.  It works so far with GCC or MSVC,
-  on i386 and x86-64.
+  on i386 and x86-64.  It is tested only on Linux (where it is
+  the default) so other platforms (as well as MSVC) may need
+  various fixes before they can be used.
 
 You may have to force the use of the shadowstack root finder if
 you are running into troubles or if you insist on translating

rpython/jit/backend/arm/assembler.py

         ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc')
         # make sure ofs fits into a register
         assert check_imm_arg(ofs)
-        mc.LDR_ri(r.r0.value, r.fp.value, imm=ofs)
+        self.store_reg(mc, r.r0, r.fp, ofs)
         propagate_exception_descr = rffi.cast(lltype.Signed,
                   cast_instance_to_gcref(self.cpu.propagate_exception_descr))
         # put propagate_exception_descr into frame
         ofs = self.cpu.get_ofs_of_frame_field('jf_descr')
         # make sure ofs fits into a register
         assert check_imm_arg(ofs)
-        mc.gen_load_int(r.r0.value, propagate_exception_descr)
-        mc.STR_ri(r.r0.value, r.fp.value, imm=ofs)
+        mc.gen_load_int(r.r1.value, propagate_exception_descr)
+        self.store_reg(mc, r.r0, r.fp, ofs)
         mc.MOV_rr(r.r0.value, r.fp.value)
         self.gen_func_epilog(mc)
         rawstart = mc.materialize(self.cpu.asmmemmgr, [])

rpython/jit/backend/test/runner_test.py

         a = lltype.malloc(A, 2, flavor='raw')
         a[0] = rffi.cast(rffi.SHORT, 666)
         a[1] = rffi.cast(rffi.SHORT, 777)
-        a_int = rffi.cast(lltype.Signed, a)
+        addr = llmemory.cast_ptr_to_adr(a)
+        a_int = heaptracker.adr2int(addr)
         print 'a_int:', a_int
         self.execute_operation(rop.SETARRAYITEM_RAW,
                                [ConstInt(a_int), ConstInt(0), ConstInt(-7654)],

rpython/jit/backend/x86/rx86.py

 
 def define_modrm_modes(insnname_template, before_modrm, after_modrm=[], regtype='GPR'):
     def add_insn(code, *modrm):
-        args = before_modrm + list(modrm) + after_modrm
+        args = before_modrm + list(modrm)
         methname = insnname_template.replace('*', code)
-        if methname.endswith('_rr') or methname.endswith('_xx'):
+        if (methname.endswith('_rr') or methname.endswith('_xx')
+                or methname.endswith('_ri')):
             args.append('\xC0')
+        args += after_modrm
 
         if regtype == 'XMM':
             insn_func = xmminsn(*args)

rpython/jit/backend/x86/test/test_regloc.py

 import struct, sys
+from rpython.jit.backend.x86.rx86 import R
 from rpython.jit.backend.x86.regloc import *
 from rpython.jit.backend.x86.test.test_rx86 import CodeBuilder32, CodeBuilder64, assert_encodes_as
 from rpython.jit.backend.x86.assembler import heap
 cb32 = LocationCodeBuilder32
 cb64 = LocationCodeBuilder64
 
+def test_mov_8():
+    assert_encodes_as(cb32, "MOV8_ri", (R.cl, 25), '\xB1\x19')
+
 def test_mov_16():
+    # only 'MOV16_*r' and 'MOV16_*i' are supported
     # 32-bit
     assert_encodes_as(cb32, "MOV16", (ecx, ebx), '\x66\x89\xD9')
-    assert_encodes_as(cb32, "MOV16", (ecx, ImmedLoc(12345)), '\x66\xB9\x39\x30')
-
+    assert_encodes_as(cb32, "MOV16",
+                      (AddressLoc(ecx, ImmedLoc(16), 0, 0), ebx),
+                      '\x66\x89\x59\x10')
     # 64-bit
     assert_encodes_as(cb64, "MOV16", (r8, ebx), '\x66\x41\x89\xD8')  # 11 011 000
     assert_encodes_as(cb64, "MOV16", (ebx, r8), '\x66\x44\x89\xC3')  # 11 000 011
-    assert_encodes_as(cb64, "MOV16", (ecx, ebx), '\x66\x40\x89\xD9')
-    assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(12345)), '\x66\xB9\x39\x30')
+    assert_encodes_as(cb64, "MOV16", (ecx, ebx), '\x66\x89\xD9')
     # for the next case we don't pick the most efficient encoding, but well
-    expected = '\x66\x40\xC7\xC1\xC7\xCF'  # could be '\x66\xB9\xC7\xCF'
+    expected = '\x66\xC7\xC1\x39\x30'      # could be '\x66\xB9\x39\x30'
+    assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(12345)), expected)
+    # for the next case we don't pick the most efficient encoding, but well
+    expected = '\x66\xC7\xC1\xC7\xCF'      # could be '\x66\xB9\xC7\xCF'
     assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(-12345)), expected)
-    assert_encodes_as(cb64, "MOV16", (r9, ImmedLoc(12345)), '\x66\x41\xB9\x39\x30')
+    # for the next case we don't pick the most efficient encoding, but well
+    expected = '\x66\x41\xC7\xC1\x39\x30'  # could be '\x66\x41\xB9\x39\x30'
+    assert_encodes_as(cb64, "MOV16", (r9, ImmedLoc(12345)), expected)
     # for the next case we don't pick the most efficient encoding, but well
     expected = '\x66\x41\xC7\xC1\xC7\xCF'  # could be '\x66\x41\xB9\xC7\xCF'
     assert_encodes_as(cb64, "MOV16", (r9, ImmedLoc(-12345)), expected)
-    assert_encodes_as(cb64, "MOV16", (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), '\x66\x41\xC7\x45\x00\x39\x30')
+    assert_encodes_as(cb64, "MOV16",
+                      (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)),
+                      '\x66\x41\xC7\x45\x00\x39\x30')
 
 def test_cmp_16():
+    # only 'CMP16_mi' is supported
     # 32-bit
-    assert_encodes_as(cb32, "CMP16", (ecx, ebx), '\x66\x39\xD9')
-    assert_encodes_as(cb32, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x81\xF9\x39\x30')
-
+    assert_encodes_as(cb32, "CMP16",
+                      (AddressLoc(ecx, ImmedLoc(0), 0, 0), ImmedLoc(21324)),
+                      '\x66\x81\x39\x4c\x53')
+    assert_encodes_as(cb32, "CMP16",
+                      (AddressLoc(esi, ImmedLoc(2), 0, 0), ImmedLoc(-12345)),
+                      '\x66\x81\x7e\x02\xc7\xcf')
     # 64-bit
-    assert_encodes_as(cb64, "CMP16", (r8, ebx), '\x66\x41\x39\xD8')  # 11 011 000
-    assert_encodes_as(cb64, "CMP16", (ebx, r8), '\x66\x44\x39\xC3')  # 11 000 011
-    assert_encodes_as(cb64, "CMP16", (ecx, ebx), '\x66\x40\x39\xD9')
-    assert_encodes_as(cb64, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x40\x81\xF9\x39\x30')
-    assert_encodes_as(cb64, "CMP16", (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), '\x66\x41\x81\x7D\x00\x39\x30')
+    assert_encodes_as(cb64, "CMP16",
+                      (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)),
+                      '\x66\x41\x81\x7D\x00\x39\x30')
 
 def test_relocation():
     from rpython.rtyper.lltypesystem import lltype, rffi