Commits

Maciej Fijalkowski committed 6b695e6

(pedronis, fijal)
IN-PROGRESS start implementing CALL_ASSEMBLER on x86 backend, passes 3
tests so far

Comments (0)

Files changed (2)

pypy/jit/backend/x86/assembler.py

         self.malloc_array_func_addr = 0
         self.malloc_str_func_addr = 0
         self.malloc_unicode_func_addr = 0
+        self.assembler_helper_adr = 0
         self.fail_boxes_int = values_array(lltype.Signed, failargs_limit)
         self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit)
         self.fail_boxes_float = values_array(lltype.Float, failargs_limit)
                 ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode()
                 self.malloc_unicode_func_addr = rffi.cast(lltype.Signed,
                                                           ll_new_unicode)
+            self.assembler_helper_adr = self.cpu.cast_ptr_to_int(
+                self.cpu.assembler_helper_ptr)
+                
+        
             # done
             # we generate the loop body in 'mc'
             # 'mc2' is for guard recovery code
             tmp = ecx
         else:
             tmp = eax
-            
+        
         self._emit_call(x, arglocs, 2, tmp=tmp)
 
         if isinstance(resloc, MODRM64):
         self.mc.CMP(mem(ebp, FORCE_INDEX_OFS), imm(0))
         return self.implement_guard(addr, self.mc.JL)
 
+    def genop_guard_call_assembler(self, op, guard_op, addr,
+                                   arglocs, result_loc):
+        self._emit_call(rel32(op.descr._x86_bootstrap_code), arglocs, 2,
+                        tmp=eax)
+        self._emit_call(rel32(self.assembler_helper_adr), [eax, imm(0)], 0,
+                        tmp=eax)
+        if isinstance(result_loc, MODRM64):
+            self.mc.FSTP(result_loc)
+        else:
+            assert result_loc is eax or result_loc is None
+
     def genop_discard_cond_call_gc_wb(self, op, arglocs):
         # use 'mc._mc' directly instead of 'mc', to avoid
         # bad surprizes if the code buffer is mostly full
 
     def not_implemented_op_guard(self, op, guard_op,
                                  failaddr, arglocs, resloc):
-        msg = "not implemented operation (guard): %s" % guard_op.getopname()
+        msg = "not implemented operation (guard): %s" % op.getopname()
         print msg
         raise NotImplementedError(msg)
 

pypy/jit/backend/x86/regalloc.py

         self.assembler.regalloc_perform_discard(op, arglocs)
 
     def can_merge_with_next_guard(self, op, i, operations):
-        if op.opnum == rop.CALL_MAY_FORCE:
+        if op.opnum == rop.CALL_MAY_FORCE or op.opnum == rop.CALL_ASSEMBLER:
             assert operations[i + 1].opnum == rop.GUARD_NOT_FORCED
             return True
         if not op.is_comparison():
         assert guard_op is not None
         self._consider_call(op, guard_op)
 
+    def consider_call_assembler(self, op, guard_op):
+        descr = op.descr
+        assert isinstance(descr, LoopToken)
+        size = descr._calldescr.get_result_size(self.translate_support_code)
+        self._call(op, [imm(size)] +
+                   [self.loc(arg) for arg in op.args],
+                   guard_not_forced_op=guard_op)
+        
     def consider_cond_call_gc_wb(self, op):
         assert op.result is None
         arglocs = [self.loc(arg) for arg in op.args]
         name = name[len('consider_'):]
         num = getattr(rop, name.upper())
         if (ResOperation(num, [], None).is_comparison()
-            or num == rop.CALL_MAY_FORCE):
+            or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER):
             oplist_with_guard[num] = value
             oplist[num] = add_none_argument(value)
         else: