Commits

Antonio Cuni committed 47af987

merge from trunk: svn merge svn+ssh://codespeak.net/svn/pypy/trunk -r77303:HEAD

Comments (0)

Files changed (115)

lib_pypy/_ctypes/function.py

         return self._build_result(restype, resbuffer, argtypes, argsandobjs)
 
     def _getfuncptr(self, argtypes, restype, thisarg=None):
-        if self._ptr is not None:
+        if self._ptr is not None and argtypes is self._argtypes_:
             return self._ptr
         if restype is None or not isinstance(restype, _CDataMeta):
             import ctypes

pypy/annotation/policy.py

-# base annotation policy for overrides and specialization
+# base annotation policy for specialization
 from pypy.annotation.specialize import default_specialize as default
 from pypy.annotation.specialize import specialize_argvalue, specialize_argtype, specialize_arglistitemtype
 from pypy.annotation.specialize import memo
         if directive is None:
             return pol.default_specialize
 
-        # specialize|override:name[(args)]
+        # specialize[(args)]
         directive_parts = directive.split('(', 1)
         if len(directive_parts) == 1:
             [name] = directive_parts
         except AttributeError:
             raise AttributeError("%r specialize tag not defined in annotation"
                                  "policy %s" % (name, pol))
-        if directive.startswith('override:'):
-            # different signature: override__xyz(*args_s)
-            if parms:
-                raise Exception, "override:* specialisations don't support parameters"
-            def specialize_override(funcdesc, args_s):
-                funcdesc.overridden = True
-                return specializer(*args_s)
-            return specialize_override
         else:
             if not parms:
                 return specializer
         from pypy.rpython.annlowlevel import LowLevelAnnotatorPolicy
         return LowLevelAnnotatorPolicy.specialize__ll_and_arg(*args)
 
-    def override__ignore(pol, *args):
-        bk = getbookkeeper()
-        return bk.immutablevalue(None)
-
 class StrictAnnotatorPolicy(AnnotatorPolicy):
     allow_someobjects = False

pypy/annotation/test/test_annrpython.py

         s = a.build_types(f, [list])
         assert s.classdef is a.bookkeeper.getuniqueclassdef(IndexError)  # KeyError ignored because l is a list
 
-    def test_overrides(self):
-        excs = []
-        def record_exc(e):
-            """NOT_RPYTHON"""
-            excs.append(sys.exc_info)
-        record_exc._annspecialcase_ = "override:record_exc"
-        def g():
-            pass
-        def f():
-            try:
-                g()
-            except Exception, e:
-                record_exc(e)
-        class MyAnnotatorPolicy(policy.AnnotatorPolicy):
-
-            def override__record_exc(pol, s_e):
-                return a.bookkeeper.immutablevalue(None)
-            
-        a = self.RPythonAnnotator(policy=MyAnnotatorPolicy())
-        s = a.build_types(f, [])
-        assert s.const is None
-
     def test_freeze_protocol(self):
         class Stuff:
             def __init__(self, flag):

pypy/config/test/test_pypyoption.py

     assert not conf.translation.backendopt.none
     conf = get_pypy_config()
     set_opt_level(conf, 'mem')
-    assert conf.translation.gc == 'markcompact'
+    assert conf.translation.gcremovetypeptr
     assert not conf.translation.backendopt.none
 
 def test_set_pypy_opt_level():

pypy/config/translationoption.py

 DEFL_CLEVER_MALLOC_REMOVAL_INLINE_THRESHOLD = 32.4
 DEFL_LOW_INLINE_THRESHOLD = DEFL_INLINE_THRESHOLD / 2.0
 
+DEFL_GC = "minimark"
+
 IS_64_BITS = sys.maxint > 2147483647
 
 PLATFORMS = [
     # JIT generation: use -Ojit to enable it
     BoolOption("jit", "generate a JIT",
                default=False,
-               suggests=[("translation.gc", "hybrid"),
+               suggests=[("translation.gc", DEFL_GC),
                          ("translation.gcrootfinder", "asmgcc"),
                          ("translation.list_comprehension_operations", True)]),
     ChoiceOption("jit_backend", "choose the backend for the JIT",
     '0':    'boehm       nobackendopt',
     '1':    'boehm       lowinline',
     'size': 'boehm       lowinline     remove_asserts',
-    'mem':  'markcompact lowinline     remove_asserts    removetypeptr',
-    '2':    'hybrid      extraopts',
-    '3':    'hybrid      extraopts     remove_asserts',
-    'jit':  'hybrid      extraopts     jit',
+    'mem':  DEFL_GC + '  lowinline     remove_asserts    removetypeptr',
+    '2':    DEFL_GC + '  extraopts',
+    '3':    DEFL_GC + '  extraopts     remove_asserts',
+    'jit':  DEFL_GC + '  extraopts     jit',
     }
 
 def final_check_config(config):

pypy/doc/docindex.txt

 
 PyPy's own tests `summary`_, daily updated, run through BuildBot infrastructure.
 You can also find CPython's compliance tests run with compiled ``pypy-c``
-exeuctables there.
+executables there.
 
 information dating from early 2007: 
 

pypy/interpreter/astcompiler/test/test_compiler.py

         co_expr = compile(evalexpr, '<evalexpr>', 'eval')
         space = self.space
         pyco_expr = PyCode._from_code(space, co_expr)
-        w_res = pyco_expr.exec_code(space, w_dict, w_dict)
+        w_res = pyco_expr.exec_host_bytecode(space, w_dict, w_dict)
         res = space.str_w(space.repr(w_res))
         if not isinstance(expected, float):
             assert res == repr(expected)

pypy/interpreter/baseobjspace.py

 from pypy.rlib.timer import DummyTimer, Timer
 from pypy.rlib.rarithmetic import r_uint
 from pypy.rlib import jit
+from pypy.tool.sourcetools import func_with_new_name
 import os, sys, py
 
 __all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'W_Root']
                                 (i, plural)))
         return items
 
+    unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable,
+                                            'unpackiterable_unroll'))
+
     def fixedview(self, w_iterable, expected_length=-1):
         """ A fixed list view of w_iterable. Don't modify the result
         """
         return make_sure_not_resized(self.unpackiterable(w_iterable,
                                                          expected_length)[:])
 
+    fixedview_unroll = fixedview
+
     def listview(self, w_iterable, expected_length=-1):
         """ A non-fixed view of w_iterable. Don't modify the result
         """

pypy/interpreter/function.py

         self.w_class = w_class         # possibly space.w_None
 
     def descr_method__new__(space, w_subtype, w_function, w_instance, w_class=None):
-        if space.is_w( w_instance, space.w_None ):
+        if space.is_w(w_instance, space.w_None):
             w_instance = None
+        if w_instance is None and space.is_w(w_class, space.w_None):
+            raise OperationError(space.w_TypeError,
+                                 space.wrap("unbound methods must have class"))
         method = space.allocate_instance(Method, w_subtype)
         Method.__init__(method, space, w_function, w_instance, w_class)
         return space.wrap(method)

pypy/interpreter/pycode.py

                          tuple(self.co_freevars),
                          tuple(self.co_cellvars) )
 
+    def exec_host_bytecode(self, w_dict, w_globals, w_locals):
+        from pypy.interpreter.pyframe import CPythonFrame
+        frame = CPythonFrame(self.space, self, w_globals, None)
+        frame.setdictscope(w_locals)
+        return frame.run()
+
     def dump(self):
         """A dis.dis() dump of the code object."""
         co = self._to_code()

pypy/interpreter/pyframe.py

 from pypy.rlib.rarithmetic import intmask
 from pypy.rlib import jit, rstack
 from pypy.tool import stdlib_opcode
+from pypy.tool.stdlib_opcode import host_bytecode_spec
 
 # Define some opcodes used
 g = globals()
         # the following 'assert' is an annotation hint: it hides from
         # the annotator all methods that are defined in PyFrame but
         # overridden in the {,Host}FrameClass subclasses of PyFrame.
-        assert isinstance(self, self.space.FrameClass)
+        assert (isinstance(self, self.space.FrameClass) or
+                not self.space.config.translating)
         executioncontext = self.space.getexecutioncontext()
         executioncontext.enter(self)
         try:
             return space.wrap(self.builtin is not space.builtin)
         return space.w_False
 
+class CPythonFrame(PyFrame):
+    """
+    Execution of host (CPython) opcodes.
+    """
+
+    bytecode_spec = host_bytecode_spec
+    opcode_method_names = host_bytecode_spec.method_names
+    opcodedesc = host_bytecode_spec.opcodedesc
+    opdescmap = host_bytecode_spec.opdescmap
+    HAVE_ARGUMENT = host_bytecode_spec.HAVE_ARGUMENT
+
+
 # ____________________________________________________________
 
 def get_block_class(opname):

pypy/interpreter/pyopcode.py

 
     def UNPACK_SEQUENCE(self, itemcount, next_instr):
         w_iterable = self.popvalue()
-        items = self.space.fixedview(w_iterable, itemcount)
+        items = self.space.fixedview_unroll(w_iterable, itemcount)
         self.pushrevvalues(itemcount, items)
 
     def STORE_ATTR(self, nameindex, next_instr):

pypy/interpreter/pyparser/pytokenize.py

 __all__ = [ "tokenize" ]
 
 # ______________________________________________________________________
-# Automatically generated DFA's (with one or two hand tweeks):
-pseudoStatesAccepts = [True, True, True, True, True, True, True, True,
-                       True, True, False, True, True, True, False, False,
-                       False, False, True, False, False, True, True, False,
-                       True, False, True, False, True, False, True, False,
-                       False, False, True, False, False, False, True]
+# Automatically generated DFA's
 
-pseudoStates = [
-    {'\t': 0, '\n': 13, '\x0c': 0, '\r': 14, ' ': 0, '!': 10,
-     '"': 16, '#': 18, '%': 12, '&': 12,
-     "'": 15, '(': 13, ')': 13, '*': 7,
-     '+': 12, ',': 13, '-': 12, '.': 6,
-     '/': 11, '0': 4, '1': 5, '2': 5,
-     '3': 5, '4': 5, '5': 5, '6': 5,
-     '7': 5, '8': 5, '9': 5, ':': 13,
-     ';': 13, '<': 9, '=': 12, '>': 8,
-     '@': 13, 'A': 1,
-     'B': 1, 'C': 1, 'D': 1, 'E': 1,
-     'F': 1, 'G': 1, 'H': 1, 'I': 1,
-     'J': 1, 'K': 1, 'L': 1, 'M': 1,
-     'N': 1, 'O': 1, 'P': 1, 'Q': 1,
-     'R': 2, 'S': 1, 'T': 1, 'U': 3,
-     'V': 1, 'W': 1, 'X': 1, 'Y': 1,
-     'Z': 1, '[': 13, '\\': 17, ']': 13,
-     '^': 12, '_': 1, '`': 13, 'a': 1,
-     'b': 1, 'c': 1, 'd': 1, 'e': 1,
-     'f': 1, 'g': 1, 'h': 1, 'i': 1,
-     'j': 1, 'k': 1, 'l': 1, 'm': 1,
-     'n': 1, 'o': 1, 'p': 1, 'q': 1,
-     'r': 2, 's': 1, 't': 1, 'u': 3,
-     'v': 1, 'w': 1, 'x': 1, 'y': 1,
-     'z': 1, '{': 13, '|': 12, '}': 13,
-     '~': 13},
-
+accepts = [False, True, True, True, True, True, True, True,
+           True, True, False, True, True, True, False, False,
+           False, False, True, True, True, False, True,
+           False, True, False, True, False, False, True,
+           False, False, False, False, True, False, False,
+           False, True]
+states = [
+    # 0
+    {'\t': 0, '\n': 13, '\x0c': 0,
+     '\r': 14, ' ': 0, '!': 10, '"': 16,
+     '#': 18, '%': 12, '&': 12, "'": 15,
+     '(': 13, ')': 13, '*': 7, '+': 12,
+     ',': 13, '-': 12, '.': 6, '/': 11,
+     '0': 4, '1': 5, '2': 5, '3': 5,
+     '4': 5, '5': 5, '6': 5, '7': 5,
+     '8': 5, '9': 5, ':': 13, ';': 13,
+     '<': 9, '=': 12, '>': 8, '@': 13,
+     'A': 1, 'B': 1, 'C': 1, 'D': 1,
+     'E': 1, 'F': 1, 'G': 1, 'H': 1,
+     'I': 1, 'J': 1, 'K': 1, 'L': 1,
+     'M': 1, 'N': 1, 'O': 1, 'P': 1,
+     'Q': 1, 'R': 2, 'S': 1, 'T': 1,
+     'U': 3, 'V': 1, 'W': 1, 'X': 1,
+     'Y': 1, 'Z': 1, '[': 13, '\\': 17,
+     ']': 13, '^': 12, '_': 1, '`': 13,
+     'a': 1, 'b': 1, 'c': 1, 'd': 1,
+     'e': 1, 'f': 1, 'g': 1, 'h': 1,
+     'i': 1, 'j': 1, 'k': 1, 'l': 1,
+     'm': 1, 'n': 1, 'o': 1, 'p': 1,
+     'q': 1, 'r': 2, 's': 1, 't': 1,
+     'u': 3, 'v': 1, 'w': 1, 'x': 1,
+     'y': 1, 'z': 1, '{': 13, '|': 12,
+     '}': 13, '~': 13},
+    # 1
     {'0': 1, '1': 1, '2': 1, '3': 1,
      '4': 1, '5': 1, '6': 1, '7': 1,
      '8': 1, '9': 1, 'A': 1, 'B': 1,
      'p': 1, 'q': 1, 'r': 1, 's': 1,
      't': 1, 'u': 1, 'v': 1, 'w': 1,
      'x': 1, 'y': 1, 'z': 1},
-
+    # 2
     {'"': 16, "'": 15, '0': 1, '1': 1,
      '2': 1, '3': 1, '4': 1, '5': 1,
      '6': 1, '7': 1, '8': 1, '9': 1,
      'r': 1, 's': 1, 't': 1, 'u': 1,
      'v': 1, 'w': 1, 'x': 1, 'y': 1,
      'z': 1},
-
+    # 3
     {'"': 16, "'": 15, '0': 1, '1': 1,
      '2': 1, '3': 1, '4': 1, '5': 1,
      '6': 1, '7': 1, '8': 1, '9': 1,
      'r': 2, 's': 1, 't': 1, 'u': 1,
      'v': 1, 'w': 1, 'x': 1, 'y': 1,
      'z': 1},
-
-    {'.': 24, '0': 22, '1': 22, '2': 22,
-     '3': 22, '4': 22, '5': 22, '6': 22,
-     '7': 22, '8': 23, '9': 23, 'E': 25,
-     'J': 13, 'L': 13, 'X': 21, 'e': 25,
-     'j': 13, 'l': 13, 'x': 21},
-
-    {'.': 24, '0': 5, '1': 5, '2': 5,
+    # 4
+    {'.': 22, '0': 20, '1': 20, '2': 20,
+     '3': 20, '4': 20, '5': 20, '6': 20,
+     '7': 20, '8': 21, '9': 21, 'E': 23,
+     'J': 13, 'L': 13, 'X': 19, 'e': 23,
+     'j': 13, 'l': 13, 'x': 19},
+    # 5
+    {'.': 22, '0': 5, '1': 5, '2': 5,
      '3': 5, '4': 5, '5': 5, '6': 5,
-     '7': 5, '8': 5, '9': 5, 'E': 25,
-     'J': 13, 'L': 13, 'e': 25, 'j': 13,
+     '7': 5, '8': 5, '9': 5, 'E': 23,
+     'J': 13, 'L': 13, 'e': 23, 'j': 13,
      'l': 13},
-
-    {'0': 26, '1': 26, '2': 26, '3': 26,
-     '4': 26, '5': 26, '6': 26, '7': 26,
-     '8': 26, '9': 26},
-
+    # 6
+    {'0': 24, '1': 24, '2': 24, '3': 24,
+     '4': 24, '5': 24, '6': 24, '7': 24,
+     '8': 24, '9': 24},
+    # 7
     {'*': 12, '=': 13},
-
+    # 8
     {'=': 13, '>': 12},
-
-    {'=': 13, '<': 12, '>': 13},
-
+    # 9
+    {'<': 12, '=': 13, '>': 13},
+    # 10
     {'=': 13},
-
-    {'=': 13, '/': 12},
-
+    # 11
+    {'/': 12, '=': 13},
+    # 12
     {'=': 13},
-
+    # 13
     {},
-
+    # 14
     {'\n': 13},
-
-    {automata.DEFAULT: 19, '\n': 27, '\\': 29, "'": 28},
-
-    {automata.DEFAULT: 20, '"': 30, '\n': 27, '\\': 31},
-
+    # 15
+    {automata.DEFAULT: 28, '\n': 25, "'": 26, '\\': 27},
+    # 16
+    {automata.DEFAULT: 31, '\n': 25, '"': 29, '\\': 30},
+    # 17
     {'\n': 13, '\r': 14},
-
-    {automata.DEFAULT: 18, '\n': 27, '\r': 27},
-
-    {automata.DEFAULT: 19, '\n': 27, '\\': 29, "'": 13},
-
-    {automata.DEFAULT: 20, '"': 13, '\n': 27, '\\': 31},
-                
-    {'0': 21, '1': 21, '2': 21, '3': 21,
-     '4': 21, '5': 21, '6': 21, '7': 21,
-     '8': 21, '9': 21, 'A': 21, 'B': 21,
-     'C': 21, 'D': 21, 'E': 21, 'F': 21,
-     'L': 13, 'a': 21, 'b': 21, 'c': 21,
-     'd': 21, 'e': 21, 'f': 21, 'l': 13},
-    
-    {'.': 24, '0': 22, '1': 22, '2': 22,
-     '3': 22, '4': 22, '5': 22, '6': 22,
-     '7': 22, '8': 23, '9': 23, 'E': 25,
-     'J': 13, 'L': 13, 'e': 25, 'j': 13,
+    # 18
+    {automata.DEFAULT: 18, '\n': 25, '\r': 25},
+    # 19
+    {'0': 19, '1': 19, '2': 19, '3': 19,
+     '4': 19, '5': 19, '6': 19, '7': 19,
+     '8': 19, '9': 19, 'A': 19, 'B': 19,
+     'C': 19, 'D': 19, 'E': 19, 'F': 19,
+     'L': 13, 'a': 19, 'b': 19, 'c': 19,
+     'd': 19, 'e': 19, 'f': 19, 'l': 13},
+    # 20
+    {'.': 22, '0': 20, '1': 20, '2': 20,
+     '3': 20, '4': 20, '5': 20, '6': 20,
+     '7': 20, '8': 21, '9': 21, 'E': 23,
+     'J': 13, 'L': 13, 'e': 23, 'j': 13,
      'l': 13},
-    
-    {'.': 24, '0': 23, '1': 23, '2': 23,
-     '3': 23, '4': 23, '5': 23, '6': 23,
-     '7': 23, '8': 23, '9': 23, 'E': 25,
-     'J': 13, 'e': 25, 'j': 13},
-    
+    # 21
+    {'.': 22, '0': 21, '1': 21, '2': 21,
+     '3': 21, '4': 21, '5': 21, '6': 21,
+     '7': 21, '8': 21, '9': 21, 'E': 23,
+     'J': 13, 'e': 23, 'j': 13},
+    # 22
+    {'0': 22, '1': 22, '2': 22, '3': 22,
+     '4': 22, '5': 22, '6': 22, '7': 22,
+     '8': 22, '9': 22, 'E': 32, 'J': 13,
+     'e': 32, 'j': 13},
+    # 23
+    {'+': 33, '-': 33, '0': 34, '1': 34,
+     '2': 34, '3': 34, '4': 34, '5': 34,
+     '6': 34, '7': 34, '8': 34, '9': 34},
+    # 24
     {'0': 24, '1': 24, '2': 24, '3': 24,
      '4': 24, '5': 24, '6': 24, '7': 24,
      '8': 24, '9': 24, 'E': 32, 'J': 13,
      'e': 32, 'j': 13},
-    
-    {'+': 33, '-': 33, '0': 34, '1': 34,
-     '2': 34, '3': 34, '4': 34, '5': 34,
-     '6': 34, '7': 34, '8': 34, '9': 34},
-    
-    {'0': 26, '1': 26, '2': 26, '3': 26,
-     '4': 26, '5': 26, '6': 26, '7': 26,
-     '8': 26, '9': 26, 'E': 32, 'J': 13,
-     'e': 32, 'j': 13},
-    
+    # 25
     {},
-
+    # 26
     {"'": 13},
-
+    # 27
     {automata.DEFAULT: 35, '\n': 13, '\r': 14},
-
+    # 28
+    {automata.DEFAULT: 28, '\n': 25, "'": 13, '\\': 27},
+    # 29
     {'"': 13},
-
+    # 30
     {automata.DEFAULT: 36, '\n': 13, '\r': 14},
-
+    # 31
+    {automata.DEFAULT: 31, '\n': 25, '"': 13, '\\': 30},
+    # 32
     {'+': 37, '-': 37, '0': 38, '1': 38,
      '2': 38, '3': 38, '4': 38, '5': 38,
      '6': 38, '7': 38, '8': 38, '9': 38},
-    
-    
+    # 33
     {'0': 34, '1': 34, '2': 34, '3': 34,
      '4': 34, '5': 34, '6': 34, '7': 34,
      '8': 34, '9': 34},
-    
+    # 34
     {'0': 34, '1': 34, '2': 34, '3': 34,
      '4': 34, '5': 34, '6': 34, '7': 34,
      '8': 34, '9': 34, 'J': 13, 'j': 13},
-    
-    {automata.DEFAULT: 35, '\n': 27, '\\': 29, "'": 13},
-    
-    {automata.DEFAULT: 36, '"': 13, '\n': 27, '\\': 31},
-    
+    # 35
+    {automata.DEFAULT: 35, '\n': 25, "'": 13, '\\': 27},
+    # 36
+    {automata.DEFAULT: 36, '\n': 25, '"': 13, '\\': 30},
+    # 37
     {'0': 38, '1': 38, '2': 38, '3': 38,
      '4': 38, '5': 38, '6': 38, '7': 38,
      '8': 38, '9': 38},
-
+    # 38
     {'0': 38, '1': 38, '2': 38, '3': 38,
      '4': 38, '5': 38, '6': 38, '7': 38,
      '8': 38, '9': 38, 'J': 13, 'j': 13},
     ]
+pseudoDFA = automata.DFA(states, accepts)
 
-pseudoDFA = automata.DFA(pseudoStates, pseudoStatesAccepts)
-
-double3StatesAccepts = [False, False, False, False, False, True]
-double3States = [
+accepts = [False, False, False, False, False, True]
+states = [
+    # 0
     {automata.DEFAULT: 0, '"': 1, '\\': 2},
+    # 1
     {automata.DEFAULT: 4, '"': 3, '\\': 2},
+    # 2
     {automata.DEFAULT: 4},
+    # 3
     {automata.DEFAULT: 4, '"': 5, '\\': 2},
+    # 4
     {automata.DEFAULT: 4, '"': 1, '\\': 2},
+    # 5
     {automata.DEFAULT: 4, '"': 5, '\\': 2},
     ]
-double3DFA = automata.NonGreedyDFA(double3States, double3StatesAccepts)
+double3DFA = automata.NonGreedyDFA(states, accepts)
 
-single3StatesAccepts = [False, False, False, False, False, True]
-single3States = [
-    {automata.DEFAULT: 0, '\\': 2, "'": 1},
-    {automata.DEFAULT: 4, '\\': 2, "'": 3},
+accepts = [False, False, False, False, False, True]
+states = [
+    # 0
+    {automata.DEFAULT: 0, "'": 1, '\\': 2},
+    # 1
+    {automata.DEFAULT: 4, "'": 3, '\\': 2},
+    # 2
     {automata.DEFAULT: 4},
-    {automata.DEFAULT: 4, '\\': 2, "'": 5},
-    {automata.DEFAULT: 4, '\\': 2, "'": 1},
-    {automata.DEFAULT: 4, '\\': 2, "'": 5},
+    # 3
+    {automata.DEFAULT: 4, "'": 5, '\\': 2},
+    # 4
+    {automata.DEFAULT: 4, "'": 1, '\\': 2},
+    # 5
+    {automata.DEFAULT: 4, "'": 5, '\\': 2},
     ]
-single3DFA = automata.NonGreedyDFA(single3States, single3StatesAccepts)
+single3DFA = automata.NonGreedyDFA(states, accepts)
 
-singleStatesAccepts = [False, True, False]
-singleStates = [
-    {automata.DEFAULT: 0, '\\': 2, "'": 1},
+accepts = [False, True, False, False]
+states = [
+    # 0
+    {automata.DEFAULT: 0, "'": 1, '\\': 2},
+    # 1
     {},
-    {automata.DEFAULT: 0},
+    # 2
+    {automata.DEFAULT: 3},
+    # 3
+    {automata.DEFAULT: 3, "'": 1, '\\': 2},
     ]
-singleDFA = automata.DFA(singleStates, singleStatesAccepts)
+singleDFA = automata.DFA(states, accepts)
 
-doubleStatesAccepts = [False, True, False]
-doubleStates = [
+accepts = [False, True, False, False]
+states = [
+    # 0
     {automata.DEFAULT: 0, '"': 1, '\\': 2},
+    # 1
     {},
-    {automata.DEFAULT: 0},
+    # 2
+    {automata.DEFAULT: 3},
+    # 3
+    {automata.DEFAULT: 3, '"': 1, '\\': 2},
     ]
-doubleDFA = automata.DFA(doubleStates, doubleStatesAccepts)
+doubleDFA = automata.DFA(states, accepts)
+
+
+#_______________________________________________________________________
+# End of automatically generated DFA's
 
 endDFAs = {"'" : singleDFA,
            '"' : doubleDFA,

pypy/interpreter/test/test_function.py

         raises(TypeError, m, MyInst(None))
         raises(TypeError, m, MyInst(42))
 
+    def test_invalid_creation(self):
+        import new
+        def f(): pass
+        raises(TypeError, new.instancemethod, f, None)
+
 
 class TestMethod: 
     def setup_method(self, method):

pypy/jit/backend/llgraph/llimpl.py

     uni = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string)
     uni.chars[index] = unichr(newvalue)
 
+def do_copystrcontent(src, dst, srcstart, dststart, length):
+    src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src)
+    dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst)
+    assert 0 <= srcstart <= srcstart + length <= len(src.chars)
+    assert 0 <= dststart <= dststart + length <= len(dst.chars)
+    rstr.copy_string_contents(src, dst, srcstart, dststart, length)
+
+def do_copyunicodecontent(src, dst, srcstart, dststart, length):
+    src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src)
+    dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst)
+    assert 0 <= srcstart <= srcstart + length <= len(src.chars)
+    assert 0 <= dststart <= dststart + length <= len(dst.chars)
+    rstr.copy_unicode_contents(src, dst, srcstart, dststart, length)
+
 # ---------- call ----------
 
 _call_args_i = []

pypy/jit/backend/llsupport/gc.py

 
 
 # ____________________________________________________________
-# All code below is for the hybrid GC
+# All code below is for the hybrid or minimark GC
 
 
 class GcRefList:
 
     def alloc_gcref_list(self, n):
         # Important: the GRREF_LISTs allocated are *non-movable*.  This
-        # requires support in the gc (only the hybrid GC supports it so far).
+        # requires support in the gc (hybrid GC or minimark GC so far).
         if we_are_translated():
             list = rgc.malloc_nonmovable(self.GCREF_LIST, n)
             assert list, "malloc_nonmovable failed!"
         self.translator = translator
         self.llop1 = llop1
 
-        # we need the hybrid GC for GcRefList.alloc_gcref_list() to work
-        if gcdescr.config.translation.gc != 'hybrid':
+        # we need the hybrid or minimark GC for GcRefList.alloc_gcref_list()
+        # to work
+        if gcdescr.config.translation.gc not in ('hybrid', 'minimark'):
             raise NotImplementedError("--gc=%s not implemented with the JIT" %
                                       (gcdescr.config.translation.gc,))
 
         self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO)
         (self.array_basesize, _, self.array_length_ofs) = \
              symbolic.get_array_token(lltype.GcArray(lltype.Signed), True)
-        min_ns = self.GCClass.TRANSLATION_PARAMS['min_nursery_size']
-        self.max_size_of_young_obj = self.GCClass.get_young_fixedsize(min_ns)
+        self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj()
 
         # make a malloc function, with three arguments
         def malloc_basic(size, tid):
                 v = op.getarg(2)
                 if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
                                              bool(v.value)): # store a non-NULL
+                    # XXX detect when we should produce a
+                    # write_barrier_from_array
                     self._gen_write_barrier(newops, op.getarg(0), v)
                     op = op.copy_and_change(rop.SETARRAYITEM_RAW)
             # ----------

pypy/jit/backend/llsupport/test/test_gc.py

 
 
 class TestFramework:
+    gc = 'hybrid'
 
     def setup_method(self, meth):
         class config_:
             class translation:
-                gc = 'hybrid'
+                gc = self.gc
                 gcrootfinder = 'asmgcc'
                 gctransformer = 'framework'
                 gcremovetypeptr = False
         assert operations[1].getarg(1) == v_index
         assert operations[1].getarg(2) == v_value
         assert operations[1].getdescr() == array_descr
+
+
+class TestFrameworkMiniMark(TestFramework):
+    gc = 'minimark'

pypy/jit/backend/test/conftest.py

-import py, random
-
-option = py.test.config.option
-
-def pytest_addoption(parser):
-    group = parser.getgroup('random test options')
-    group.addoption('--random-seed', action="store", type="int",
-                    default=random.randrange(0, 10000),
-                    dest="randomseed",
-                    help="choose a fixed random seed")
-    group.addoption('--backend', action="store",
-                    default='llgraph',
-                    choices=['llgraph', 'x86'],
-                    dest="backend",
-                    help="select the backend to run the functions with")
-    group.addoption('--block-length', action="store", type="int",
-                    default=30,
-                    dest="block_length",
-                    help="insert up to this many operations in each test")
-    group.addoption('--n-vars', action="store", type="int",
-                    default=10,
-                    dest="n_vars",
-                    help="supply this many randomly-valued arguments to "
-                         "the function")
-    group.addoption('--repeat', action="store", type="int",
-                    default=15,
-                    dest="repeat",
-                    help="run the test this many times"),
-    group.addoption('--output', '-O', action="store", type="str",
-                    default="", dest="output",
-                    help="dump output to a file")
-

pypy/jit/backend/test/runner_test.py

         r = self.execute_operation(rop.STRGETITEM, [s_box, BoxInt(4)], 'int')
         assert r.value == 153
 
+    def test_copystrcontent(self):
+        s_box = self.alloc_string("abcdef")
+        for s_box in [s_box, s_box.constbox()]:
+            for srcstart_box in [BoxInt(2), ConstInt(2)]:
+                for dststart_box in [BoxInt(3), ConstInt(3)]:
+                    for length_box in [BoxInt(4), ConstInt(4)]:
+                        for r_box_is_const in [False, True]:
+                            r_box = self.alloc_string("!???????!")
+                            if r_box_is_const:
+                                r_box = r_box.constbox()
+                                self.execute_operation(rop.COPYSTRCONTENT,
+                                                       [s_box, r_box,
+                                                        srcstart_box,
+                                                        dststart_box,
+                                                        length_box], 'void')
+                                assert self.look_string(r_box) == "!??cdef?!"
+
     def test_do_unicode_basic(self):
         u = self.cpu.bh_newunicode(5)
         self.cpu.bh_unicodesetitem(u, 4, 123)
         s_box = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, s))
         return s_box
 
+    def look_string(self, string_box):
+        s = string_box.getref(lltype.Ptr(rstr.STR))
+        return ''.join(s.chars)
+
     def alloc_unicode(self, unicode):
         u = rstr.mallocunicode(len(unicode))
         for i in range(len(unicode)):

pypy/jit/backend/test/test_ll_random.py

         v_string = self.get_string(builder, r)
         builder.do(self.opnum, [v_string])
 
+class AbstractCopyContentOperation(AbstractStringOperation):
+    def produce_into(self, builder, r):
+        v_srcstring = self.get_string(builder, r)
+        v_dststring = self.get_string(builder, r)
+        if v_srcstring.value == v_dststring.value:    # because it's not a
+            raise test_random.CannotProduceOperation  # memmove(), but memcpy()
+        srclen = len(v_srcstring.getref(self.ptr).chars)
+        dstlen = len(v_dststring.getref(self.ptr).chars)
+        v_length = builder.get_index(min(srclen, dstlen), r)
+        v_srcstart = builder.get_index(srclen - v_length.value + 1, r)
+        v_dststart = builder.get_index(dstlen - v_length.value + 1, r)
+        builder.do(self.opnum, [v_srcstring, v_dststring,
+                                v_srcstart, v_dststart, v_length])
+
 class StrGetItemOperation(AbstractGetItemOperation, _StrOperation):
     pass
 
 class UnicodeLenOperation(AbstractStringLenOperation, _UnicodeOperation):
     pass
 
+class CopyStrContentOperation(AbstractCopyContentOperation, _StrOperation):
+    pass
+
+class CopyUnicodeContentOperation(AbstractCopyContentOperation,
+                                  _UnicodeOperation):
+    pass
+
 
 # there are five options in total:
 # 1. non raising call and guard_no_exception
     OPERATIONS.append(UnicodeSetItemOperation(rop.UNICODESETITEM))
     OPERATIONS.append(StrLenOperation(rop.STRLEN))
     OPERATIONS.append(UnicodeLenOperation(rop.UNICODELEN))
+    OPERATIONS.append(CopyStrContentOperation(rop.COPYSTRCONTENT))
+    #OPERATIONS.append(CopyUnicodeContentOperation(rop.COPYUNICODECONTENT))
 
 for i in range(2):
     OPERATIONS.append(GuardClassOperation(rop.GUARD_CLASS))

pypy/jit/backend/test/test_random.py

 import py, sys
 from pypy.rlib.rarithmetic import intmask, LONG_BIT
 from pypy.rpython.lltypesystem import llmemory
-from pypy.jit.backend.test import conftest as demo_conftest
+from pypy.jit.backend import conftest as demo_conftest
 from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop
 from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken
 from pypy.jit.metainterp.history import BoxPtr, ConstPtr
             elif isinstance(v, ConstFloat):
                 args.append('ConstFloat(%r)' % v.value)
             elif isinstance(v, ConstInt):
-                args.append('ConstInt(%d)' % v.value)
+                args.append('ConstInt(%s)' % v.value)
             else:
                 raise NotImplementedError(v)
         if op.getdescr() is None:
             except AttributeError:
                 descrstr = ', descr=...'
         print >>s, '        ResOperation(rop.%s, [%s], %s%s),' % (
-            opname[op.opnum], ', '.join(args), names[op.result], descrstr)
+            opname[op.getopnum()], ', '.join(args), names[op.result], descrstr)
         #if getattr(op, 'suboperations', None) is not None:
         #    subops.append(op)
 
                                                                        v.value)
         print >>s, '    op = cpu.execute_token(looptoken)'
         if self.should_fail_by is None:
-            fail_args = self.loop.operations[-1].args
+            fail_args = self.loop.operations[-1].getarglist()
         else:
             fail_args = self.should_fail_by.getfailargs()
         for i, v in enumerate(fail_args):

pypy/jit/backend/x86/assembler.py

         self.malloc_fixedsize_slowpath1 = 0
         self.malloc_fixedsize_slowpath2 = 0
         self.pending_guard_tokens = None
+        self.memcpy_addr = 0
         self.setup_failure_recovery()
         self._debug = False
         self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i')
                 ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode()
                 self.malloc_unicode_func_addr = rffi.cast(lltype.Signed,
                                                           ll_new_unicode)
+            self.memcpy_addr = self.cpu.cast_ptr_to_int(codebuf.memcpy_fn)
             self.mc = MachineCodeBlockWrapper(self, self.mc_size, self.cpu.profile_agent)
             self._build_failure_recovery(False)
             self._build_failure_recovery(True)
             f = open_file_as_stream(output_log, "w")
             for i in range(len(self.loop_run_counters)):
                 name, struct = self.loop_run_counters[i]
-                f.write(str(struct.i) + " " * (8 - len(str(struct.i))) + name + "\n")
+                f.write(str(name) + ":" +  str(struct.i) + "\n")
             f.close()
 
     def _build_float_constants(self):
         if self._debug:
             struct = lltype.malloc(DEBUG_COUNTER, flavor='raw')
             struct.i = 0
-            self.loop_run_counters.append((funcname, struct))
+            self.loop_run_counters.append((len(self.loop_run_counters), struct))
         return funcname
         
     def patch_jump_for_descr(self, faildescr, adr_new_target):
         self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs,
                                          resloc, current_depths)
 
-    def load_effective_addr(self, sizereg, baseofs, scale, result):
-        self.mc.LEA(result, addr_add(imm(0), sizereg, baseofs, scale))
+    def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm(0)):
+        self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale))
 
     def _unaryop(asmop):
         def genop_unary(self, op, arglocs, resloc):
         if self.cpu.vtable_offset is not None:
             assert isinstance(loc, RegLoc)
             assert isinstance(loc_vtable, ImmedLoc)
-            self.mc.MOV_mi((loc.value, self.cpu.vtable_offset), loc_vtable.value)
+            self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable)
 
     # XXX genop_new is abused for all varsized mallocs with Boehm, for now
     # (instead of genop_new_array, genop_newstr, genop_newunicode)
         jz_location = self.mc.get_relative_pos()
         # the following is supposed to be the slow path, so whenever possible
         # we choose the most compact encoding over the most efficient one.
+        # XXX improve a bit, particularly for IS_X86_64.
         for i in range(len(arglocs)-1, -1, -1):
             loc = arglocs[i]
             if isinstance(loc, RegLoc):
         offset = self.mc.get_relative_pos() - jmp_adr
         assert 0 < offset <= 127
         self.mc.overwrite(jmp_adr-1, [chr(offset)])
+        # on 64-bits, 'tid' is a value that fits in 31 bits
         self.mc.MOV_mi((eax.value, 0), tid)
         self.mc.MOV(heap(nursery_free_adr), edx)
         

pypy/jit/backend/x86/codebuf.py

 
 import os, sys
-from pypy.rpython.lltypesystem import lltype, rffi
+from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.translator.tool.cbuild import ExternalCompilationInfo
 from pypy.jit.backend.x86.rx86 import X86_32_CodeBuilder, X86_64_CodeBuilder
 from pypy.jit.backend.x86.regloc import LocationCodeBuilder
             # Hack to make sure that mcs are not within 32-bits of one
             # another for testing purposes
             from pypy.rlib.rmmap import hint
-            hint.pos += 0xFFFFFFFF
+            hint.pos += 0x80000000 - map_size
             
         self._init(data, map_size)
 
 
 # ____________________________________________________________
 
+memcpy_fn = rffi.llexternal('memcpy', [llmemory.Address, llmemory.Address,
+                                       rffi.SIZE_T], lltype.Void,
+                            sandboxsafe=True, _nowrapper=True)
+
+# ____________________________________________________________
+
 if sys.platform == 'win32':
     ensure_sse2_floats = lambda : None
 else:

pypy/jit/backend/x86/regalloc.py

         assert tmpreg not in nonfloatlocs
         assert xmmtmp not in floatlocs
         # note: we need to make a copy of inputargs because possibly_free_vars
-        # is also used on op.args, which is a non-resizable list
+        # is also used on op args, which is a non-resizable list
         self.possibly_free_vars(list(inputargs))
         return nonfloatlocs, floatlocs
 
             if reg not in used:
                 self.xrm.free_regs.append(reg)
         # note: we need to make a copy of inputargs because possibly_free_vars
-        # is also used on op.args, which is a non-resizable list
+        # is also used on op args, which is a non-resizable list
         self.possibly_free_vars(list(inputargs))
         self.rm._check_invariants()
         self.xrm._check_invariants()
 
     consider_unicodegetitem = consider_strgetitem
 
+    def consider_copystrcontent(self, op):
+        # compute the source address
+        args = op.getarglist()
+        base_loc = self.rm.make_sure_var_in_reg(args[0], args)
+        ofs_loc = self.rm.make_sure_var_in_reg(args[2], args)
+        assert args[0] is not args[1]    # forbidden case of aliasing
+        self.rm.possibly_free_var(args[0])
+        if args[3] is not args[2] is not args[4]:  # MESS MESS MESS: don't free
+            self.rm.possibly_free_var(args[2])     # it if ==args[3] or args[4]
+        srcaddr_box = TempBox()
+        forbidden_vars = [args[1], args[3], args[4], srcaddr_box]
+        srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box, forbidden_vars)
+        self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc)
+        # compute the destination address
+        base_loc = self.rm.make_sure_var_in_reg(args[1], forbidden_vars)
+        ofs_loc = self.rm.make_sure_var_in_reg(args[3], forbidden_vars)
+        self.rm.possibly_free_var(args[1])
+        if args[3] is not args[4]:     # more of the MESS described above
+            self.rm.possibly_free_var(args[3])
+        forbidden_vars = [args[4], srcaddr_box]
+        dstaddr_box = TempBox()
+        dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, forbidden_vars)
+        self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc)
+        # call memcpy()
+        length_loc = self.loc(args[4])
+        self.rm.before_call()
+        self.xrm.before_call()
+        self.assembler._emit_call(imm(self.assembler.memcpy_addr),
+                                  [dstaddr_loc, srcaddr_loc, length_loc])
+        self.rm.possibly_free_var(args[4])
+        self.rm.possibly_free_var(dstaddr_box)
+        self.rm.possibly_free_var(srcaddr_box)
+
+    def _gen_address_inside_string(self, baseloc, ofsloc, resloc):
+        cpu = self.assembler.cpu
+        ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
+                                                  self.translate_support_code)
+        assert itemsize == 1
+        self.assembler.load_effective_addr(ofsloc, ofs_items, 0,
+                                           resloc, baseloc)
+
     def consider_jump(self, op):
         assembler = self.assembler
         assert self.jump_target_descr is None

pypy/jit/backend/x86/regloc.py

     def value_a(self): raise AssertionError("value_a undefined")
     def value_m(self): raise AssertionError("value_m undefined")
 
+    def find_unused_reg(self): return eax
+
 class StackLoc(AssemblerLocation):
     _immutable_ = True
     def __init__(self, position, ebp_offset, num_words, type):
     def assembler(self):
         return '%' + repr(self)
 
+    def find_unused_reg(self):
+        if self.value == eax.value:
+            return edx
+        else:
+            return eax
+
 class ImmedLoc(AssemblerLocation):
     _immutable_ = True
     width = WORD
                 self._location_code = 'a'
                 self.loc_a = (base_loc.value, scaled_loc.value, scale, static_offset)
 
+    def __repr__(self):
+        dict = {'j': 'value', 'a': 'loc_a', 'm': 'loc_m', 'a':'loc_a'}
+        attr = dict.get(self._location_code, '?')
+        info = getattr(self, attr, '?')
+        return '<AddressLoc %r: %s>' % (self._location_code, info)
+
     def location_code(self):
         return self._location_code
 
     def value_m(self):
         return self.loc_m
 
+    def find_unused_reg(self):
+        if self._location_code == 'm':
+            if self.loc_m[0] == eax.value:
+                return edx
+        elif self._location_code == 'a':
+            if self.loc_a[0] == eax.value:
+                if self.loc_a[1] == edx.value:
+                    return ecx
+                return edx
+            if self.loc_a[1] == eax.value:
+                if self.loc_a[0] == edx.value:
+                    return ecx
+                return edx
+        return eax
+
 class ConstFloatLoc(AssemblerLocation):
     # XXX: We have to use this class instead of just AddressLoc because
     # AddressLoc is "untyped" and also we to have need some sort of unique
         self.value = address
         self.const_id = const_id
 
+    def __repr__(self):
+        return '<ConstFloatLoc(%s, %s)>' % (self.value, self.const_id)
+
     def _getregkey(self):
         # XXX: 1000 is kind of magic: We just don't want to be confused
         # with any registers
     _scratch_register_value = 0
 
     def _binaryop(name):
+
+        def insn_with_64_bit_immediate(self, loc1, loc2):
+            # These are the worst cases:
+            val2 = loc2.value_i()
+            code1 = loc1.location_code()
+            if (code1 == 'j'
+                or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1]))
+                or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))):
+                # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai
+                # and the constant offset in the address is 64-bit.
+                # Hopefully this doesn't happen too often
+                freereg = loc1.find_unused_reg()
+                self.PUSH_r(freereg.value)
+                self.MOV_ri(freereg.value, val2)
+                INSN(self, loc1, freereg)
+                self.POP_r(freereg.value)
+            else:
+                # For this case, we should not need the scratch register more than here.
+                self._load_scratch(val2)
+                INSN(self, loc1, X86_64_SCRATCH_REG)
+
+        def invoke(self, codes, val1, val2):
+            methname = name + "_" + codes
+            _rx86_getattr(self, methname)(val1, val2)
+        invoke._annspecialcase_ = 'specialize:arg(1)'
+
         def INSN(self, loc1, loc2):
             code1 = loc1.location_code()
             code2 = loc2.location_code()
             if loc1 is X86_64_SCRATCH_REG and not name.startswith("MOV"):
                 assert code2 not in ('j', 'i')
 
-            for possible_code1 in unrolling_location_codes:
-                if code1 == possible_code1:
-                    for possible_code2 in unrolling_location_codes:
-                        if code2 == possible_code2:
+            for possible_code2 in unrolling_location_codes:
+                if code2 == possible_code2:
+                    val2 = getattr(loc2, "value_" + possible_code2)()
+                    #
+                    # Fake out certain operations for x86_64
+                    if self.WORD == 8 and possible_code2 == 'i' and not rx86.fits_in_32bits(val2):
+                        insn_with_64_bit_immediate(self, loc1, loc2)
+                        return
+                    #
+                    # Regular case
+                    for possible_code1 in unrolling_location_codes:
+                        if code1 == possible_code1:
                             val1 = getattr(loc1, "value_" + possible_code1)()
-                            val2 = getattr(loc2, "value_" + possible_code2)()
-                            # Fake out certain operations for x86_64
-                            if self.WORD == 8 and possible_code2 == 'i' and not rx86.fits_in_32bits(val2):
-                                if possible_code1 == 'j':
-                                    # This is the worst case: INSN_ji, and both operands are 64-bit
-                                    # Hopefully this doesn't happen too often
-                                    self.PUSH_r(eax.value)
-                                    self.MOV_ri(eax.value, val1)
-                                    self.MOV_ri(X86_64_SCRATCH_REG.value, val2)
-                                    methname = name + "_mr"
-                                    _rx86_getattr(self, methname)((eax.value, 0), X86_64_SCRATCH_REG.value)
-                                    self.POP_r(eax.value)
-                                else:
-                                    self.MOV_ri(X86_64_SCRATCH_REG.value, val2)
-                                    methname = name + "_" + possible_code1 + "r"
-                                    _rx86_getattr(self, methname)(val1, X86_64_SCRATCH_REG.value)
-                            elif self.WORD == 8 and possible_code1 == 'j':
-                                reg_offset = self._addr_as_reg_offset(val1)
-                                methname = name + "_" + "m" + possible_code2
-                                _rx86_getattr(self, methname)(reg_offset, val2)
+                            # More faking out of certain operations for x86_64
+                            if self.WORD == 8 and possible_code1 == 'j':
+                                val1 = self._addr_as_reg_offset(val1)
+                                invoke(self, "m" + possible_code2, val1, val2)
                             elif self.WORD == 8 and possible_code2 == 'j':
-                                reg_offset = self._addr_as_reg_offset(val2)
-                                methname = name + "_" + possible_code1 + "m"
-                                _rx86_getattr(self, methname)(val1, reg_offset)
+                                val2 = self._addr_as_reg_offset(val2)
+                                invoke(self, possible_code1 + "m", val1, val2)
+                            elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]):
+                                val1 = self._fix_static_offset_64_m(val1)
+                                invoke(self, "a" + possible_code2, val1, val2)
+                            elif possible_code2 == 'm' and not rx86.fits_in_32bits(val2[1]):
+                                val2 = self._fix_static_offset_64_m(val2)
+                                invoke(self, possible_code1 + "a", val1, val2)
                             else:
-                                methname = name + "_" + possible_code1 + possible_code2
-                                _rx86_getattr(self, methname)(val1, val2)
+                                if possible_code1 == 'a' and not rx86.fits_in_32bits(val1[3]):
+                                    val1 = self._fix_static_offset_64_a(val1)
+                                if possible_code2 == 'a' and not rx86.fits_in_32bits(val2[3]):
+                                    val2 = self._fix_static_offset_64_a(val2)
+                                invoke(self, possible_code1 + possible_code2, val1, val2)
+                            return
 
         return func_with_new_name(INSN, "INSN_" + name)
 
                 if code == possible_code:
                     val = getattr(loc, "value_" + possible_code)()
                     if self.WORD == 8 and possible_code == 'i' and not rx86.fits_in_32bits(val):
-                        self.MOV_ri(X86_64_SCRATCH_REG.value, val)
+                        self._load_scratch(val)
                         _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value)
                     else:
                         methname = name + "_" + possible_code
                             _rx86_getattr(self, name + "_l")(val)
                         else:
                             assert self.WORD == 8
-                            self.MOV_ri(X86_64_SCRATCH_REG.value, val)
+                            self._load_scratch(val)
                             _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value)
                     else:
                         methname = name + "_" + possible_code
         self.MOV_ri(X86_64_SCRATCH_REG.value, addr)
         return (X86_64_SCRATCH_REG.value, 0)
 
+    def _fix_static_offset_64_m(self, (basereg, static_offset)):
+        # For cases where an AddressLoc has the location_code 'm', but
+        # where the static offset does not fit in 32-bits.  We have to fall
+        # back to the X86_64_SCRATCH_REG.  Note that this returns a location
+        # encoded as mode 'a'.  These are all possibly rare cases; don't try
+        # to reuse a past value of the scratch register at all.
+        self._scratch_register_known = False
+        self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset)
+        return (basereg, X86_64_SCRATCH_REG.value, 0, 0)
+
+    def _fix_static_offset_64_a(self, (basereg, scalereg,
+                                       scale, static_offset)):
+        # For cases where an AddressLoc has the location_code 'a', but
+        # where the static offset does not fit in 32-bits.  We have to fall
+        # back to the X86_64_SCRATCH_REG.  In one case it is even more
+        # annoying.  These are all possibly rare cases; don't try to reuse a
+        # past value of the scratch register at all.
+        self._scratch_register_known = False
+        self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset)
+        #
+        if basereg != rx86.NO_BASE_REGISTER:
+            self.LEA_ra(X86_64_SCRATCH_REG.value,
+                        (basereg, X86_64_SCRATCH_REG.value, 0, 0))
+        return (X86_64_SCRATCH_REG.value, scalereg, scale, 0)
+
+    def _load_scratch(self, value):
+        if (self._scratch_register_known
+            and value == self._scratch_register_value):
+            return
+        if self._reuse_scratch_register:
+            self._scratch_register_known = True
+            self._scratch_register_value = value
+        self.MOV_ri(X86_64_SCRATCH_REG.value, value)
+
     def begin_reuse_scratch_register(self):
         # Flag the beginning of a block where it is okay to reuse the value
         # of the scratch register. In theory we shouldn't have to do this if

pypy/jit/backend/x86/runner.py

 
     def execute_token(self, executable_token):
         addr = executable_token._x86_bootstrap_code
+        #llop.debug_print(lltype.Void, ">>>> Entering", addr)
         func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr)
+        #llop.debug_print(lltype.Void, "<<<< Back")
         fail_index = self._execute_call(func)
         return self.get_fail_descr_from_number(fail_index)
 
             LLInterpreter.current_interpreter = self.debug_ll_interpreter
         res = 0
         try:
-            #llop.debug_print(lltype.Void, ">>>> Entering",
-            #                 rffi.cast(lltype.Signed, func))
             res = func()
-            #llop.debug_print(lltype.Void, "<<<< Back")
         finally:
             if not self.translate_support_code:
                 LLInterpreter.current_interpreter = prev_interpreter

pypy/jit/backend/x86/rx86.py

     LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True))
     LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2))
     LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2))
+    LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2))
 
     CALL_l = insn('\xE8', relative(1))
     CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3)))

pypy/jit/backend/x86/test/conftest.py

 def pytest_runtest_setup(item):
     if cpu not in ('x86', 'x86_64'):
         py.test.skip("x86/x86_64 tests skipped: cpu is %r" % (cpu,))
+    if cpu == 'x86_64':
+        from pypy.rpython.lltypesystem import ll2ctypes
+        ll2ctypes.do_allocation_in_far_regions()

pypy/jit/backend/x86/test/test_regloc.py

     expected_ofs = pos_addr - (neg_addr+5)
     assert s.getvalue() == '\xE9' + struct.pack("<i", expected_ofs)
 
-def test_reuse_scratch_register():
-    if not IS_X86_64:
-        py.test.skip()
 
-    base_addr = 0xFEDCBA9876543210
-    cb = LocationCodeBuilder64()
-    cb.begin_reuse_scratch_register()
-    cb.MOV(ecx, heap(base_addr))
-    cb.MOV(ecx, heap(base_addr + 8))
-    cb.end_reuse_scratch_register()
+class Test64Bits:
 
-    expected_instructions = (
-            # mov r11, 0xFEDCBA9876543210
-            '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' +
-            # mov rcx, [r11]
-            '\x49\x8B\x0B' +
-            # mov rcx, [r11+8]
-            '\x49\x8B\x4B\x08'
-    )
-    assert cb.getvalue() == expected_instructions
+    def setup_class(cls):
+        if not IS_X86_64:
+            py.test.skip()
+
+    def test_reuse_scratch_register(self):
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.begin_reuse_scratch_register()
+        cb.MOV(ecx, heap(base_addr))
+        cb.MOV(ecx, heap(base_addr + 8))
+        cb.end_reuse_scratch_register()
+
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' +
+                # mov rcx, [r11]
+                '\x49\x8B\x0B' +
+                # mov rcx, [r11+8]
+                '\x49\x8B\x4B\x08'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    # ------------------------------------------------------------
+
+    def test_64bit_address_1(self):
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.CMP(ecx, AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr))
+        # this case is a CMP_rj
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # cmp rcx, [r11]
+                '\x49\x3B\x0B'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_64bit_address_2(self):
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(ecx, AddressLoc(ImmedLoc(0), edx, 3, base_addr))
+        # this case is a CMP_ra
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov rcx, [r11+8*rdx]
+                '\x49\x8B\x0C\xD3'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_64bit_address_3(self):
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(ecx, AddressLoc(edx, ImmedLoc(0), 0, base_addr))
+        # this case is a CMP_rm
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov rcx, [rdx+r11]
+                '\x4A\x8B\x0C\x1A'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_64bit_address_4(self):
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.begin_reuse_scratch_register()
+        assert cb._reuse_scratch_register is True
+        assert cb._scratch_register_known is False
+        cb.MOV(ecx, AddressLoc(edx, esi, 2, base_addr))
+        assert cb._reuse_scratch_register is True
+        assert cb._scratch_register_known is False
+        # this case is a CMP_ra
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # lea r11, [rdx+r11]
+                '\x4E\x8D\x1C\x1A'
+                # mov rcx, [r11+4*rsi]
+                '\x49\x8B\x0C\xB3'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    # ------------------------------------------------------------
+
+    def test_MOV_immed32_into_64bit_address_1(self):
+        immed = -0x01234567
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_ji
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [r11], -0x01234567
+                '\x49\xC7\x03\x99\xBA\xDC\xFE'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed32_into_64bit_address_2(self):
+        immed = -0x01234567
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(ImmedLoc(0), edx, 3, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_ai
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [r11+8*rdx], -0x01234567
+                '\x49\xC7\x04\xD3\x99\xBA\xDC\xFE'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed32_into_64bit_address_3(self):
+        immed = -0x01234567
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(edx, ImmedLoc(0), 0, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_mi
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [rdx+r11], -0x01234567
+                '\x4A\xC7\x04\x1A\x99\xBA\xDC\xFE'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed32_into_64bit_address_4(self):
+        immed = -0x01234567
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(edx, esi, 2, base_addr), ImmedLoc(immed))
+        # this case is a MOV_ai
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # lea r11, [rdx+r11]
+                '\x4E\x8D\x1C\x1A'
+                # mov [r11+4*rsi], -0x01234567
+                '\x49\xC7\x04\xB3\x99\xBA\xDC\xFE'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    # ------------------------------------------------------------
+
+    def test_MOV_immed64_into_64bit_address_1(self):
+        immed = 0x0123456789ABCDEF
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_ji
+        #
+        expected_instructions = (
+                # push rax
+                '\x50'
+                # mov rax, 0x0123456789ABCDEF
+                '\x48\xB8\xEF\xCD\xAB\x89\x67\x45\x23\x01'
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [r11], rax
+                '\x49\x89\x03'
+                # pop rax
+                '\x58'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed64_into_64bit_address_2(self):
+        immed = 0x0123456789ABCDEF
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(ImmedLoc(0), edx, 3, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_ai
+        #
+        expected_instructions = (
+                # push rax
+                '\x50'
+                # mov rax, 0x0123456789ABCDEF
+                '\x48\xB8\xEF\xCD\xAB\x89\x67\x45\x23\x01'
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [r11+8*rdx], rax
+                '\x49\x89\x04\xD3'
+                # pop rax
+                '\x58'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed64_into_64bit_address_3(self):
+        immed = 0x0123456789ABCDEF
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(eax, ImmedLoc(0), 0, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_mi
+        #
+        expected_instructions = (
+                # push rdx
+                '\x52'
+                # mov rdx, 0x0123456789ABCDEF
+                '\x48\xBA\xEF\xCD\xAB\x89\x67\x45\x23\x01'
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [rax+r11], rdx
+                '\x4A\x89\x14\x18'
+                # pop rdx
+                '\x5A'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed64_into_64bit_address_4(self):
+        immed = 0x0123456789ABCDEF
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(edx, eax, 2, base_addr), ImmedLoc(immed))
+        # this case is a MOV_ai
+        #
+        expected_instructions = (
+                # push rcx
+                '\x51'
+                # mov rcx, 0x0123456789ABCDEF
+                '\x48\xB9\xEF\xCD\xAB\x89\x67\x45\x23\x01'
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # lea r11, [rdx+r11]
+                '\x4E\x8D\x1C\x1A'
+                # mov [r11+4*rax], rcx
+                '\x49\x89\x0C\x83'
+                # pop rcx
+                '\x59'
+        )
+        assert cb.getvalue() == expected_instructions

pypy/jit/backend/x86/test/test_zrpy_gc.py

 from pypy.jit.backend.llsupport.gc import GcLLDescr_framework
 from pypy.tool.udir import udir
 from pypy.jit.backend.x86.arch import IS_X86_64
+from pypy.config.translationoption import DEFL_GC
 import py.test
 
 class X(object):
 
 # ______________________________________________________________________
 
-class TestCompileHybrid(object):
+class TestCompileFramework(object):
+    # Test suite using (so far) the minimark GC.
     def setup_class(cls):
         funcs = []
         name_to_func = {}
         OLD_DEBUG = GcLLDescr_framework.DEBUG
         try:
             GcLLDescr_framework.DEBUG = True
-            cls.cbuilder = compile(get_entry(allfuncs), "hybrid",
+            cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC,
                                    gcrootfinder="asmgcc", jit=True)
         finally:
             GcLLDescr_framework.DEBUG = OLD_DEBUG
 
     def run(self, name, n=2000):
-        pypylog = udir.join('TestCompileHybrid.log')
+        pypylog = udir.join('TestCompileFramework.log')
         res = self.cbuilder.cmdexec("%s %d" %(name, n),
                                     env={'PYPYLOG': ':%s' % pypylog})
         assert int(res) == 20
     def run_orig(self, name, n, x):
         self.main_allfuncs(name, n, x)
 
-    def define_compile_hybrid_1(cls):
+    def define_compile_framework_1(cls):
         # a moving GC.  Supports malloc_varsize_nonmovable.  Simple test, works
         # without write_barriers and root stack enumeration.
         def f(n, x, *args):
             return (n, x) + args
         return None, f, None
 
-    def test_compile_hybrid_1(self):
-        self.run('compile_hybrid_1')
+    def test_compile_framework_1(self):
+        self.run('compile_framework_1')
 
-    def define_compile_hybrid_2(cls):
+    def define_compile_framework_2(cls):
         # More complex test, requires root stack enumeration but
         # not write_barriers.
         def f(n, x, *args):
             return (n, x) + args
         return None, f, None
 
-    def test_compile_hybrid_2(self):
-        self.run('compile_hybrid_2')
+    def test_compile_framework_2(self):
+        self.run('compile_framework_2')
 
-    def define_compile_hybrid_3(cls):
+    def define_compile_framework_3(cls):
         # Third version of the test.  Really requires write_barriers.
         def f(n, x, *args):
             x.next = None
 
 
 
-    def test_compile_hybrid_3(self):
+    def test_compile_framework_3(self):
         x_test = X()
         x_test.foo = 5
-        self.run_orig('compile_hybrid_3', 6, x_test)     # check that it does not raise CheckError
-        self.run('compile_hybrid_3')
+        self.run_orig('compile_framework_3', 6, x_test)     # check that it does not raise CheckError
+        self.run('compile_framework_3')
 
-    def define_compile_hybrid_3_extra(cls):
+    def define_compile_framework_3_extra(cls):
         # Extra version of the test, with tons of live vars around the residual
         # call that all contain a GC pointer.
         @dont_look_inside
             return n, None, x0, x1, x2, x3, x4, x5, x6, x7, None, None
         return before, f, None
 
-    def test_compile_hybrid_3_extra(self):
-        self.run_orig('compile_hybrid_3_extra', 6, None)     # check that it does not raise CheckError
-        self.run('compile_hybrid_3_extra')
+    def test_compile_framework_3_extra(self):
+        self.run_orig('compile_framework_3_extra', 6, None)     # check that it does not raise CheckError
+        self.run('compile_framework_3_extra')
 
-    def define_compile_hybrid_4(cls):
+    def define_compile_framework_4(cls):
         # Fourth version of the test, with __del__.
         from pypy.rlib.debug import debug_print
         class Counter:
             return (n, x) + args
         return before, f, None
 
-    def test_compile_hybrid_4(self):
-        self.run('compile_hybrid_4')
+    def test_compile_framework_4(self):
+        self.run('compile_framework_4')
 
-    def define_compile_hybrid_5(cls):
+    def define_compile_framework_5(cls):
         # Test string manipulation.
         def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
             n -= x.foo
             check(len(s) == 1*5 + 2*45 + 3*450 + 4*500)
         return None, f, after
 
-    def test_compile_hybrid_5(self):
-        self.run('compile_hybrid_5')
+    def test_compile_framework_5(self):
+        self.run('compile_framework_5')
 
-    def define_compile_hybrid_7(cls):
+    def define_compile_framework_7(cls):
         # Array of pointers (test the write barrier for setarrayitem_gc)
         def before(n, x):
             return n, x, None, None, None, None, None, None, None, None, [X(123)], None
             check(l[15].x == 142)
         return before, f, after
 
-    def test_compile_hybrid_7(self):
-        self.run('compile_hybrid_7')
+    def test_compile_framework_7(self):
+        self.run('compile_framework_7')
 
-    def define_compile_hybrid_external_exception_handling(cls):
+    def define_compile_framework_external_exception_handling(cls):
         def before(n, x):
             x = X(0)
             return n, x, None, None, None, None, None, None, None, None, None, None        
 
         return before, f, None
 
-    def test_compile_hybrid_external_exception_handling(self):
-        self.run('compile_hybrid_external_exception_handling')
+    def test_compile_framework_external_exception_handling(self):
+        self.run('compile_framework_external_exception_handling')
             
-    def define_compile_hybrid_bug1(self):
+    def define_compile_framework_bug1(self):
         @purefunction
         def nonmoving():
             x = X(1)
 
         return None, f, None
 
-    def test_compile_hybrid_bug1(self):
-        self.run('compile_hybrid_bug1', 200)
+    def test_compile_framework_bug1(self):
+        self.run('compile_framework_bug1', 200)
 
-    def define_compile_hybrid_vref(self):
+    def define_compile_framework_vref(self):
         from pypy.rlib.jit import virtual_ref, virtual_ref_finish
         class A:
             pass
             return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
         return None, f, None
 
-    def test_compile_hybrid_vref(self):
-        self.run('compile_hybrid_vref', 200)
+    def test_compile_framework_vref(self):
+        self.run('compile_framework_vref', 200)
 
-    def define_compile_hybrid_float(self):
+    def define_compile_framework_float(self):
         # test for a bug: the fastpath_malloc does not save and restore
         # xmm registers around the actual call to the slow path
         class A:
             return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
         return None, f, None
 
-    def test_compile_hybrid_float(self):
-        self.run('compile_hybrid_float')
+    def test_compile_framework_float(self):
+        self.run('compile_framework_float')

pypy/jit/backend/x86/test/test_ztranslation.py

 from pypy.jit.codewriter.policy import StopAtXPolicy
 from pypy.translator.translator import TranslationContext
 from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64
+from pypy.config.translationoption import DEFL_GC
 
 class TestTranslationX86(CCompiledMixin):
     CPUClass = getcpuclass()
 
     def _get_TranslationContext(self):
         t = TranslationContext()
-        t.config.translation.gc = 'hybrid'
+        t.config.translation.gc = DEFL_GC   # 'hybrid' or 'minimark'
         t.config.translation.gcrootfinder = 'asmgcc'
         t.config.translation.list_comprehension_operations = True
         t.config.translation.gcremovetypeptr = True

pypy/jit/codewriter/assembler.py

                 return
             addr = ll