Source

pypy / pypy / jit / backend / llsupport / llmodel.py

Full commit
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rpython.llinterp import LLInterpreter
from pypy.rpython.annlowlevel import llhelper
from pypy.rlib.objectmodel import we_are_translated, specialize
from pypy.jit.metainterp import history
from pypy.jit.codewriter import heaptracker, longlong
from pypy.jit.backend.model import AbstractCPU
from pypy.jit.backend.llsupport import symbolic
from pypy.jit.backend.llsupport.symbolic import WORD, unroll_basic_sizes
from pypy.jit.backend.llsupport.descr import (
    get_size_descr, get_field_descr, get_array_descr,
    get_call_descr, get_interiorfield_descr,
    FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr)
from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager


class AbstractLLCPU(AbstractCPU):
    from pypy.jit.metainterp.typesystem import llhelper as ts

    def __init__(self, rtyper, stats, opts, translate_support_code=False,
                 gcdescr=None):
        assert type(opts) is not bool
        self.opts = opts

        from pypy.jit.backend.llsupport.gc import get_ll_description
        AbstractCPU.__init__(self)
        self.rtyper = rtyper
        self.stats = stats
        self.translate_support_code = translate_support_code
        if translate_support_code:
            translator = rtyper.annotator.translator
        else:
            translator = None
        self.gc_ll_descr = get_ll_description(gcdescr, translator, rtyper)
        if translator and translator.config.translation.gcremovetypeptr:
            self.vtable_offset = None
        else:
            self.vtable_offset, _ = symbolic.get_field_token(rclass.OBJECT,
                                                             'typeptr',
                                                        translate_support_code)
        self._setup_prebuilt_error('ovf', OverflowError)
        self._setup_prebuilt_error('zer', ZeroDivisionError)
        if translate_support_code:
            self._setup_exception_handling_translated()
        else:
            self._setup_exception_handling_untranslated()
        self.saved_exc_value = lltype.nullptr(llmemory.GCREF.TO)
        self.asmmemmgr = AsmMemoryManager()
        self.setup()
        if translate_support_code:
            self._setup_on_leave_jitted_translated()
        else:
            self._setup_on_leave_jitted_untranslated()

    def setup(self):
        pass

    def _setup_prebuilt_error(self, prefix, Class):
        if self.rtyper is not None:   # normal case
            bk = self.rtyper.annotator.bookkeeper
            clsdef = bk.getuniqueclassdef(Class)
            ll_inst = self.rtyper.exceptiondata.get_standard_ll_exc_instance(
                self.rtyper, clsdef)
        else:
            # for tests, a random emulated ll_inst will do
            ll_inst = lltype.malloc(rclass.OBJECT)
            ll_inst.typeptr = lltype.malloc(rclass.OBJECT_VTABLE,
                                            immortal=True)
        setattr(self, '_%s_error_vtable' % prefix,
                llmemory.cast_ptr_to_adr(ll_inst.typeptr))
        setattr(self, '_%s_error_inst' % prefix, ll_inst)


    def _setup_exception_handling_untranslated(self):
        # for running un-translated only, all exceptions occurring in the
        # llinterpreter are stored in '_exception_emulator', which is then
        # read back by the machine code reading at the address given by
        # pos_exception() and pos_exc_value().
        _exception_emulator = lltype.malloc(rffi.CArray(lltype.Signed), 2,
                                            zero=True, flavor='raw',
                                            immortal=True)
        self._exception_emulator = _exception_emulator

        def _store_exception(lle):
            self._last_exception = lle       # keepalive
            tp_i = rffi.cast(lltype.Signed, lle.args[0])
            v_i = rffi.cast(lltype.Signed, lle.args[1])
            _exception_emulator[0] = tp_i
            _exception_emulator[1] = v_i

        self.debug_ll_interpreter = LLInterpreter(self.rtyper)
        self.debug_ll_interpreter._store_exception = _store_exception

        def pos_exception():
            return rffi.cast(lltype.Signed, _exception_emulator)

        def pos_exc_value():
            return (rffi.cast(lltype.Signed, _exception_emulator) +
                    rffi.sizeof(lltype.Signed))

        def save_exception():
            # copy from _exception_emulator to the real attributes on self
            v_i = _exception_emulator[1]
            _exception_emulator[0] = 0
            _exception_emulator[1] = 0
            self.saved_exc_value = rffi.cast(llmemory.GCREF, v_i)

        def save_exception_memoryerr():
            save_exception()
            if not self.saved_exc_value:
                self.saved_exc_value = "memoryerror!"    # for tests

        self.pos_exception = pos_exception
        self.pos_exc_value = pos_exc_value
        self.save_exception = save_exception
        self.save_exception_memoryerr = save_exception_memoryerr
        self.insert_stack_check = lambda: (0, 0, 0)


    def _setup_exception_handling_translated(self):

        def pos_exception():
            addr = llop.get_exception_addr(llmemory.Address)
            return heaptracker.adr2int(addr)

        def pos_exc_value():
            addr = llop.get_exc_value_addr(llmemory.Address)
            return heaptracker.adr2int(addr)

        def save_exception():
            addr = llop.get_exception_addr(llmemory.Address)
            addr.address[0] = llmemory.NULL
            addr = llop.get_exc_value_addr(llmemory.Address)
            exc_value = rffi.cast(llmemory.GCREF, addr.address[0])
            addr.address[0] = llmemory.NULL
            # from now on, the state is again consistent -- no more RPython
            # exception is set.  The following code produces a write barrier
            # in the assignment to self.saved_exc_value, as needed.
            self.saved_exc_value = exc_value

        def save_exception_memoryerr():
            from pypy.rpython.annlowlevel import cast_instance_to_base_ptr
            save_exception()
            if not self.saved_exc_value:
                exc = MemoryError()
                exc = cast_instance_to_base_ptr(exc)
                exc = lltype.cast_opaque_ptr(llmemory.GCREF, exc)
                self.saved_exc_value = exc

        from pypy.rlib import rstack
        STACK_CHECK_SLOWPATH = lltype.Ptr(lltype.FuncType([lltype.Signed],
                                                          lltype.Void))
        def insert_stack_check():
            endaddr = rstack._stack_get_end_adr()
            lengthaddr = rstack._stack_get_length_adr()
            f = llhelper(STACK_CHECK_SLOWPATH, rstack.stack_check_slowpath)
            slowpathaddr = rffi.cast(lltype.Signed, f)
            return endaddr, lengthaddr, slowpathaddr

        self.pos_exception = pos_exception
        self.pos_exc_value = pos_exc_value
        self.save_exception = save_exception
        self.save_exception_memoryerr = save_exception_memoryerr
        self.insert_stack_check = insert_stack_check

    def _setup_on_leave_jitted_untranslated(self):
        # assume we don't need a backend leave in this case
        self.on_leave_jitted_save_exc = self.save_exception
        self.on_leave_jitted_memoryerr = self.save_exception_memoryerr
        self.on_leave_jitted_noexc = lambda : None

    def _setup_on_leave_jitted_translated(self):
        on_leave_jitted_hook = self.get_on_leave_jitted_hook()
        save_exception = self.save_exception
        save_exception_memoryerr = self.save_exception_memoryerr

        def on_leave_jitted_noexc():
            on_leave_jitted_hook()

        def on_leave_jitted_save_exc():
            save_exception()
            on_leave_jitted_hook()

        def on_leave_jitted_memoryerr():
            save_exception_memoryerr()
            on_leave_jitted_hook()

        self.on_leave_jitted_noexc = on_leave_jitted_noexc
        self.on_leave_jitted_save_exc = on_leave_jitted_save_exc
        self.on_leave_jitted_memoryerr = on_leave_jitted_memoryerr

    def get_on_leave_jitted_hook(self):
        return lambda : None

    _ON_JIT_LEAVE_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void))

    def get_on_leave_jitted_int(self, save_exception,
                                default_to_memoryerror=False):
        if default_to_memoryerror:
            f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_memoryerr)
        elif save_exception:
            f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_save_exc)
        else:
            f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_noexc)
        return rffi.cast(lltype.Signed, f)

    def grab_exc_value(self):
        exc = self.saved_exc_value
        self.saved_exc_value = lltype.nullptr(llmemory.GCREF.TO)
        return exc

    def free_loop_and_bridges(self, compiled_loop_token):
        AbstractCPU.free_loop_and_bridges(self, compiled_loop_token)
        blocks = compiled_loop_token.asmmemmgr_blocks
        if blocks is not None:
            compiled_loop_token.asmmemmgr_blocks = None
            for rawstart, rawstop in blocks:
                self.gc_ll_descr.freeing_block(rawstart, rawstop)
                self.asmmemmgr.free(rawstart, rawstop)

    # ------------------- helpers and descriptions --------------------

    @staticmethod
    def _cast_int_to_gcref(x):
        # dangerous!  only use if you are sure no collection could occur
        # between reading the integer and casting it to a pointer
        return rffi.cast(llmemory.GCREF, x)

    @staticmethod
    def cast_gcref_to_int(x):
        return rffi.cast(lltype.Signed, x)

    @staticmethod
    def cast_int_to_adr(x):
        return rffi.cast(llmemory.Address, x)

    @staticmethod
    def cast_adr_to_int(x):
        return rffi.cast(lltype.Signed, x)

    def sizeof(self, S):
        return get_size_descr(self.gc_ll_descr, S)

    def fielddescrof(self, STRUCT, fieldname):
        return get_field_descr(self.gc_ll_descr, STRUCT, fieldname)

    def unpack_fielddescr(self, fielddescr):
        assert isinstance(fielddescr, FieldDescr)
        return fielddescr.offset
    unpack_fielddescr._always_inline_ = True

    def unpack_fielddescr_size(self, fielddescr):
        assert isinstance(fielddescr, FieldDescr)
        ofs = fielddescr.offset
        size = fielddescr.field_size
        sign = fielddescr.is_field_signed()
        return ofs, size, sign
    unpack_fielddescr_size._always_inline_ = True

    def arraydescrof(self, A):
        return get_array_descr(self.gc_ll_descr, A)

    def interiorfielddescrof(self, A, fieldname):
        return get_interiorfield_descr(self.gc_ll_descr, A, fieldname)

    def unpack_arraydescr(self, arraydescr):
        assert isinstance(arraydescr, ArrayDescr)
        return arraydescr.basesize
    unpack_arraydescr._always_inline_ = True

    def unpack_arraydescr_size(self, arraydescr):
        assert isinstance(arraydescr, ArrayDescr)
        ofs = arraydescr.basesize
        size = arraydescr.itemsize
        sign = arraydescr.is_item_signed()
        return ofs, size, sign
    unpack_arraydescr_size._always_inline_ = True

    def calldescrof(self, FUNC, ARGS, RESULT, extrainfo):
        return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo)

    def calldescrof_dynamic(self, cif_description, extrainfo):
        from pypy.jit.backend.llsupport import ffisupport
        return ffisupport.get_call_descr_dynamic(self, cif_description,
                                                 extrainfo)

    def _calldescr_dynamic_for_tests(self, atypes, rtype,
                                     abiname='FFI_DEFAULT_ABI'):
        from pypy.jit.backend.llsupport import ffisupport
        return ffisupport.calldescr_dynamic_for_tests(self, atypes, rtype,
                                                      abiname)

    def get_overflow_error(self):
        ovf_vtable = self.cast_adr_to_int(self._ovf_error_vtable)
        ovf_inst = lltype.cast_opaque_ptr(llmemory.GCREF,
                                          self._ovf_error_inst)
        return ovf_vtable, ovf_inst

    def get_zero_division_error(self):
        zer_vtable = self.cast_adr_to_int(self._zer_error_vtable)
        zer_inst = lltype.cast_opaque_ptr(llmemory.GCREF,
                                          self._zer_error_inst)
        return zer_vtable, zer_inst

    # ____________________________________________________________

    def bh_arraylen_gc(self, arraydescr, array):
        assert isinstance(arraydescr, ArrayDescr)
        ofs = arraydescr.lendescr.offset
        return rffi.cast(rffi.CArrayPtr(lltype.Signed), array)[ofs/WORD]

    @specialize.argtype(2)
    def bh_getarrayitem_gc_i(self, arraydescr, gcref, itemindex):
        ofs, size, sign = self.unpack_arraydescr_size(arraydescr)
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs)
        for STYPE, UTYPE, itemsize in unroll_basic_sizes:
            if size == itemsize:
                if sign:
                    items = rffi.cast(rffi.CArrayPtr(STYPE), items)
                    val = items[itemindex]
                    val = rffi.cast(lltype.Signed, val)
                else:
                    items = rffi.cast(rffi.CArrayPtr(UTYPE), items)
                    val = items[itemindex]
                    val = rffi.cast(lltype.Signed, val)
                # --- end of GC unsafe code ---
                return val
        else:
            raise NotImplementedError("size = %d" % size)

    def bh_getarrayitem_gc_r(self, arraydescr, gcref, itemindex):
        ofs = self.unpack_arraydescr(arraydescr)
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs)
        items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items)
        pval = self._cast_int_to_gcref(items[itemindex])
        # --- end of GC unsafe code ---
        return pval

    @specialize.argtype(2)
    def bh_getarrayitem_gc_f(self, arraydescr, gcref, itemindex):
        ofs = self.unpack_arraydescr(arraydescr)
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs)
        items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), items)
        fval = items[itemindex]
        # --- end of GC unsafe code ---
        return fval

    @specialize.argtype(2)
    def bh_setarrayitem_gc_i(self, arraydescr, gcref, itemindex, newvalue):
        ofs, size, sign = self.unpack_arraydescr_size(arraydescr)
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs)
        for TYPE, _, itemsize in unroll_basic_sizes:
            if size == itemsize:
                items = rffi.cast(rffi.CArrayPtr(TYPE), items)
                items[itemindex] = rffi.cast(TYPE, newvalue)
                # --- end of GC unsafe code ---
                return
        else:
            raise NotImplementedError("size = %d" % size)

    def bh_setarrayitem_gc_r(self, arraydescr, gcref, itemindex, newvalue):
        ofs = self.unpack_arraydescr(arraydescr)
        self.gc_ll_descr.do_write_barrier(gcref, newvalue)
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs)
        items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items)
        items[itemindex] = self.cast_gcref_to_int(newvalue)
        # --- end of GC unsafe code ---

    @specialize.argtype(2)
    def bh_setarrayitem_gc_f(self, arraydescr, gcref, itemindex, newvalue):
        ofs = self.unpack_arraydescr(arraydescr)
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs)
        items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), items)
        items[itemindex] = newvalue
        # --- end of GC unsafe code ---

    bh_setarrayitem_raw_i = bh_setarrayitem_gc_i
    bh_setarrayitem_raw_f = bh_setarrayitem_gc_f

    bh_getarrayitem_raw_i = bh_getarrayitem_gc_i
    bh_getarrayitem_raw_f = bh_getarrayitem_gc_f

    def bh_getinteriorfield_gc_i(self, gcref, itemindex, descr):
        assert isinstance(descr, InteriorFieldDescr)
        arraydescr = descr.arraydescr
        ofs, size, _ = self.unpack_arraydescr_size(arraydescr)
        ofs += descr.fielddescr.offset
        fieldsize = descr.fielddescr.field_size
        sign = descr.fielddescr.is_field_signed()
        fullofs = itemindex * size + ofs
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), fullofs)
        for STYPE, UTYPE, itemsize in unroll_basic_sizes:
            if fieldsize == itemsize:
                if sign:
                    item = rffi.cast(rffi.CArrayPtr(STYPE), items)
                    val = item[0]
                    val = rffi.cast(lltype.Signed, val)
                else:
                    item = rffi.cast(rffi.CArrayPtr(UTYPE), items)
                    val = item[0]
                    val = rffi.cast(lltype.Signed, val)
                # --- end of GC unsafe code ---
                return val
        else:
            raise NotImplementedError("size = %d" % fieldsize)

    def bh_getinteriorfield_gc_r(self, gcref, itemindex, descr):
        assert isinstance(descr, InteriorFieldDescr)
        arraydescr = descr.arraydescr
        ofs, size, _ = self.unpack_arraydescr_size(arraydescr)
        ofs += descr.fielddescr.offset
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs +
                            size * itemindex)
        items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items)
        pval = self._cast_int_to_gcref(items[0])
        # --- end of GC unsafe code ---
        return pval

    def bh_getinteriorfield_gc_f(self, gcref, itemindex, descr):
        assert isinstance(descr, InteriorFieldDescr)
        arraydescr = descr.arraydescr
        ofs, size, _ = self.unpack_arraydescr_size(arraydescr)
        ofs += descr.fielddescr.offset
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs +
                            size * itemindex)
        items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), items)
        fval = items[0]
        # --- end of GC unsafe code ---
        return fval

    def bh_setinteriorfield_gc_i(self, gcref, itemindex, descr, value):
        assert isinstance(descr, InteriorFieldDescr)
        arraydescr = descr.arraydescr
        ofs, size, _ = self.unpack_arraydescr_size(arraydescr)
        ofs += descr.fielddescr.offset
        fieldsize = descr.fielddescr.field_size
        ofs = itemindex * size + ofs
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs)
        for TYPE, _, itemsize in unroll_basic_sizes:
            if fieldsize == itemsize:
                items = rffi.cast(rffi.CArrayPtr(TYPE), items)
                items[0] = rffi.cast(TYPE, value)
                # --- end of GC unsafe code ---
                return
        else:
            raise NotImplementedError("size = %d" % fieldsize)

    def bh_setinteriorfield_gc_r(self, gcref, itemindex, descr, newvalue):
        assert isinstance(descr, InteriorFieldDescr)
        arraydescr = descr.arraydescr
        ofs, size, _ = self.unpack_arraydescr_size(arraydescr)
        ofs += descr.fielddescr.offset
        self.gc_ll_descr.do_write_barrier(gcref, newvalue)
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref),
                            ofs + size * itemindex)
        items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items)
        items[0] = self.cast_gcref_to_int(newvalue)
        # --- end of GC unsafe code ---

    def bh_setinteriorfield_gc_f(self, gcref, itemindex, descr, newvalue):
        assert isinstance(descr, InteriorFieldDescr)
        arraydescr = descr.arraydescr
        ofs, size, _ = self.unpack_arraydescr_size(arraydescr)
        ofs += descr.fielddescr.offset
        # --- start of GC unsafe code (no GC operation!) ---
        items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref),
                            ofs + size * itemindex)
        items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), items)
        items[0] = newvalue
        # --- end of GC unsafe code ---

    def bh_strlen(self, string):
        s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string)
        return len(s.chars)

    def bh_unicodelen(self, string):
        u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string)
        return len(u.chars)

    def bh_strgetitem(self, string, index):
        s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string)
        return ord(s.chars[index])

    def bh_unicodegetitem(self, string, index):
        u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string)
        return ord(u.chars[index])

    @specialize.argtype(1)
    def _base_do_getfield_i(self, struct, fielddescr):
        ofs, size, sign = self.unpack_fielddescr_size(fielddescr)
        # --- start of GC unsafe code (no GC operation!) ---
        fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs)
        for STYPE, UTYPE, itemsize in unroll_basic_sizes:
            if size == itemsize:
                # Note that in the common case where size==sizeof(Signed),
                # both cases of what follows are doing the same thing.
                # But gcc is clever enough to figure this out :-)
                if sign:
                    val = rffi.cast(rffi.CArrayPtr(STYPE), fieldptr)[0]
                    val = rffi.cast(lltype.Signed, val)
                else:
                    val = rffi.cast(rffi.CArrayPtr(UTYPE), fieldptr)[0]
                    val = rffi.cast(lltype.Signed, val)
                # --- end of GC unsafe code ---
                return val
        else:
            raise NotImplementedError("size = %d" % size)

    @specialize.argtype(1)
    def _base_do_getfield_r(self, struct, fielddescr):
        ofs = self.unpack_fielddescr(fielddescr)
        # --- start of GC unsafe code (no GC operation!) ---
        fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs)
        pval = rffi.cast(rffi.CArrayPtr(lltype.Signed), fieldptr)[0]
        pval = self._cast_int_to_gcref(pval)
        # --- end of GC unsafe code ---
        return pval

    @specialize.argtype(1)
    def _base_do_getfield_f(self, struct, fielddescr):
        ofs = self.unpack_fielddescr(fielddescr)
        # --- start of GC unsafe code (no GC operation!) ---
        fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs)
        fval = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), fieldptr)[0]
        # --- end of GC unsafe code ---
        return fval

    bh_getfield_gc_i = _base_do_getfield_i
    bh_getfield_gc_r = _base_do_getfield_r
    bh_getfield_gc_f = _base_do_getfield_f
    bh_getfield_raw_i = _base_do_getfield_i
    bh_getfield_raw_r = _base_do_getfield_r
    bh_getfield_raw_f = _base_do_getfield_f

    @specialize.argtype(1)
    def _base_do_setfield_i(self, struct, fielddescr, newvalue):
        ofs, size, sign = self.unpack_fielddescr_size(fielddescr)
        # --- start of GC unsafe code (no GC operation!) ---
        fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs)
        for TYPE, _, itemsize in unroll_basic_sizes:
            if size == itemsize:
                fieldptr = rffi.cast(rffi.CArrayPtr(TYPE), fieldptr)
                fieldptr[0] = rffi.cast(TYPE, newvalue)
                # --- end of GC unsafe code ---
                return
        else:
            raise NotImplementedError("size = %d" % size)

    @specialize.argtype(1)
    def _base_do_setfield_r(self, struct, fielddescr, newvalue):
        ofs = self.unpack_fielddescr(fielddescr)
        assert lltype.typeOf(struct) is not lltype.Signed, (
            "can't handle write barriers for setfield_raw")
        self.gc_ll_descr.do_write_barrier(struct, newvalue)
        # --- start of GC unsafe code (no GC operation!) ---
        fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs)
        fieldptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), fieldptr)
        fieldptr[0] = self.cast_gcref_to_int(newvalue)
        # --- end of GC unsafe code ---

    @specialize.argtype(1)
    def _base_do_setfield_f(self, struct, fielddescr, newvalue):
        ofs = self.unpack_fielddescr(fielddescr)
        # --- start of GC unsafe code (no GC operation!) ---
        fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs)
        fieldptr = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), fieldptr)
        fieldptr[0] = newvalue
        # --- end of GC unsafe code ---

    bh_setfield_gc_i = _base_do_setfield_i
    bh_setfield_gc_r = _base_do_setfield_r
    bh_setfield_gc_f = _base_do_setfield_f
    bh_setfield_raw_i = _base_do_setfield_i
    bh_setfield_raw_r = _base_do_setfield_r
    bh_setfield_raw_f = _base_do_setfield_f

    def bh_raw_store_i(self, addr, offset, descr, newvalue):
        ofs, size, sign = self.unpack_arraydescr_size(descr)
        items = addr + offset
        for TYPE, _, itemsize in unroll_basic_sizes:
            if size == itemsize:
                items = rffi.cast(rffi.CArrayPtr(TYPE), items)
                items[0] = rffi.cast(TYPE, newvalue)
                break

    def bh_raw_store_f(self, addr, offset, descr, newvalue):
        items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), addr + offset)
        items[0] = newvalue

    def bh_raw_load_i(self, addr, offset, descr):
        ofs, size, sign = self.unpack_arraydescr_size(descr)
        items = addr + offset
        for TYPE, _, itemsize in unroll_basic_sizes:
            if size == itemsize:
                items = rffi.cast(rffi.CArrayPtr(TYPE), items)
                return rffi.cast(lltype.Signed, items[0])
        assert False # unreachable code

    def bh_raw_load_f(self, addr, offset, descr):
        items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), addr + offset)
        return items[0]

    def bh_new(self, sizedescr):
        return self.gc_ll_descr.gc_malloc(sizedescr)

    def bh_new_with_vtable(self, sizedescr, vtable):
        res = self.gc_ll_descr.gc_malloc(sizedescr)
        if self.vtable_offset is not None:
            as_array = rffi.cast(rffi.CArrayPtr(lltype.Signed), res)
            as_array[self.vtable_offset/WORD] = vtable
        return res

    def bh_classof(self, struct):
        struct = lltype.cast_opaque_ptr(rclass.OBJECTPTR, struct)
        result_adr = llmemory.cast_ptr_to_adr(struct.typeptr)
        return heaptracker.adr2int(result_adr)

    def bh_new_array(self, arraydescr, length):
        return self.gc_ll_descr.gc_malloc_array(arraydescr, length)

    def bh_newstr(self, length):
        return self.gc_ll_descr.gc_malloc_str(length)

    def bh_newunicode(self, length):
        return self.gc_ll_descr.gc_malloc_unicode(length)

    def bh_strsetitem(self, string, index, newvalue):
        s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string)
        s.chars[index] = chr(newvalue)

    def bh_unicodesetitem(self, string, index, newvalue):
        u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string)
        u.chars[index] = unichr(newvalue)

    def bh_copystrcontent(self, src, dst, srcstart, dststart, length):
        src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src)
        dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst)
        rstr.copy_string_contents(src, dst, srcstart, dststart, length)

    def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length):
        src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src)
        dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst)
        rstr.copy_unicode_contents(src, dst, srcstart, dststart, length)

    def bh_call_i(self, func, calldescr, args_i, args_r, args_f):
        assert isinstance(calldescr, CallDescr)
        if not we_are_translated():
            calldescr.verify_types(args_i, args_r, args_f, history.INT + 'S')
        return calldescr.call_stub_i(func, args_i, args_r, args_f)

    def bh_call_r(self, func, calldescr, args_i, args_r, args_f):
        assert isinstance(calldescr, CallDescr)
        if not we_are_translated():
            calldescr.verify_types(args_i, args_r, args_f, history.REF)
        return calldescr.call_stub_r(func, args_i, args_r, args_f)

    def bh_call_f(self, func, calldescr, args_i, args_r, args_f):
        assert isinstance(calldescr, CallDescr)
        if not we_are_translated():
            calldescr.verify_types(args_i, args_r, args_f, history.FLOAT + 'L')
        return calldescr.call_stub_f(func, args_i, args_r, args_f)

    def bh_call_v(self, func, calldescr, args_i, args_r, args_f):
        assert isinstance(calldescr, CallDescr)
        if not we_are_translated():
            calldescr.verify_types(args_i, args_r, args_f, history.VOID)
        # the 'i' return value is ignored (and nonsense anyway)
        calldescr.call_stub_i(func, args_i, args_r, args_f)