Commits

Maciej Fijalkowski committed 76bf35c Merge

merge default

Comments (0)

Files changed (76)

lib_pypy/_ctypes/array.py

-
+import _ffi
 import _rawffi
 
 from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof
 from _ctypes.basics import keepalive_key, store_reference, ensure_objects
-from _ctypes.basics import CArgObject
+from _ctypes.basics import CArgObject, as_ffi_pointer
 
 class ArrayMeta(_CDataMeta):
     def __new__(self, name, cls, typedict):
     def _to_ffi_param(self):
         return self._get_buffer_value()
 
+    def _as_ffi_pointer_(self, ffitype):
+        return as_ffi_pointer(self, ffitype)
+
 ARRAY_CACHE = {}
 
 def create_array_type(base, length):
             _type_ = base
         )
         cls = ArrayMeta(name, (Array,), tpdict)
+        cls._ffiargtype = _ffi.types.Pointer(base.get_ffi_argtype())
         ARRAY_CACHE[key] = cls
         return cls

lib_pypy/_ctypes/basics.py

     }
 
 
+# called from primitive.py, pointer.py, array.py
+def as_ffi_pointer(value, ffitype):
+    my_ffitype = type(value).get_ffi_argtype()
+    # for now, we always allow types.pointer, else a lot of tests
+    # break. We need to rethink how pointers are represented, though
+    if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p:
+        raise ArgumentError("expected %s instance, got %s" % (type(value),
+                                                              ffitype))
+    return value._get_buffer_value()
+
+
 # used by "byref"
 from _ctypes.pointer import pointer

lib_pypy/_ctypes/pointer.py

 import _ffi
 from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError
 from _ctypes.basics import keepalive_key, store_reference, ensure_objects
-from _ctypes.basics import sizeof, byref
+from _ctypes.basics import sizeof, byref, as_ffi_pointer
 from _ctypes.array import Array, array_get_slice_params, array_slice_getitem,\
      array_slice_setitem
 
     def _as_ffi_pointer_(self, ffitype):
         return as_ffi_pointer(self, ffitype)
 
-def as_ffi_pointer(value, ffitype):
-    my_ffitype = type(value).get_ffi_argtype()
-    # for now, we always allow types.pointer, else a lot of tests
-    # break. We need to rethink how pointers are represented, though
-    if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p:
-        raise ArgumentError("expected %s instance, got %s" % (type(value),
-                                                              ffitype))
-    return value._get_buffer_value()
 
 def _cast_addr(obj, _, tp):
     if not (isinstance(tp, _CDataMeta) and tp._is_pointer_like()):

pypy/config/translationoption.py

     BoolOption("sandbox", "Produce a fully-sandboxed executable",
                default=False, cmdline="--sandbox",
                requires=[("translation.thread", False)],
-               suggests=[("translation.gc", "generation")]),
+               suggests=[("translation.gc", "generation"),
+                         ("translation.gcrootfinder", "shadowstack")]),
     BoolOption("rweakref", "The backend supports RPython-level weakrefs",
                default=True),
 

pypy/doc/cpython_differences.rst

   implementation detail that shows up because of internal C-level slots
   that PyPy does not have.
 
+* the ``__dict__`` attribute of new-style classes returns a normal dict, as
+  opposed to a dict proxy like in CPython. Mutating the dict will change the
+  type and vice versa. For builtin types, a dictionary will be returned that
+  cannot be changed (but still looks and behaves like a normal dictionary).
+
 
 .. include:: _ref.txt

pypy/interpreter/pyframe.py

         self.pycode = code
         eval.Frame.__init__(self, space, w_globals)
         self.locals_stack_w = [None] * (code.co_nlocals + code.co_stacksize)
-        self.nlocals = code.co_nlocals
         self.valuestackdepth = code.co_nlocals
         self.lastblock = None
         make_sure_not_resized(self.locals_stack_w)
-        check_nonneg(self.nlocals)
+        check_nonneg(self.valuestackdepth)
         #
         if space.config.objspace.honor__builtins__:
             self.builtin = space.builtin.pick_builtin(w_globals)
     def execute_frame(self, w_inputvalue=None, operr=None):
         """Execute this frame.  Main entry point to the interpreter.
         The optional arguments are there to handle a generator's frame:
-        w_inputvalue is for generator.send()) and operr is for
-        generator.throw()).
+        w_inputvalue is for generator.send() and operr is for
+        generator.throw().
         """
         # the following 'assert' is an annotation hint: it hides from
         # the annotator all methods that are defined in PyFrame but
 
     def popvalue(self):
         depth = self.valuestackdepth - 1
-        assert depth >= self.nlocals, "pop from empty value stack"
+        assert depth >= self.pycode.co_nlocals, "pop from empty value stack"
         w_object = self.locals_stack_w[depth]
         self.locals_stack_w[depth] = None
         self.valuestackdepth = depth
     def peekvalues(self, n):
         values_w = [None] * n
         base = self.valuestackdepth - n
-        assert base >= self.nlocals
+        assert base >= self.pycode.co_nlocals
         while True:
             n -= 1
             if n < 0:
     def dropvalues(self, n):
         n = hint(n, promote=True)
         finaldepth = self.valuestackdepth - n
-        assert finaldepth >= self.nlocals, "stack underflow in dropvalues()"
+        assert finaldepth >= self.pycode.co_nlocals, (
+            "stack underflow in dropvalues()")
         while True:
             n -= 1
             if n < 0:
         # Contrast this with CPython where it's PEEK(-1).
         index_from_top = hint(index_from_top, promote=True)
         index = self.valuestackdepth + ~index_from_top
-        assert index >= self.nlocals, "peek past the bottom of the stack"
+        assert index >= self.pycode.co_nlocals, (
+            "peek past the bottom of the stack")
         return self.locals_stack_w[index]
 
     def settopvalue(self, w_object, index_from_top=0):
         index_from_top = hint(index_from_top, promote=True)
         index = self.valuestackdepth + ~index_from_top
-        assert index >= self.nlocals, "settop past the bottom of the stack"
+        assert index >= self.pycode.co_nlocals, (
+            "settop past the bottom of the stack")
         self.locals_stack_w[index] = w_object
 
     @jit.unroll_safe
         else:
             f_lineno = self.f_lineno
 
-        values_w = self.locals_stack_w[self.nlocals:self.valuestackdepth]
+        nlocals = self.pycode.co_nlocals
+        values_w = self.locals_stack_w[nlocals:self.valuestackdepth]
         w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w)
         
         w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()])
         w_fastlocals = maker.slp_into_tuple_with_nulls(
-            space, self.locals_stack_w[:self.nlocals])
+            space, self.locals_stack_w[:nlocals])
         if self.last_exception is None:
             w_exc_value = space.w_None
             w_tb = space.w_None
         """Initialize the fast locals from a list of values,
         where the order is according to self.pycode.signature()."""
         scope_len = len(scope_w)
-        if scope_len > self.nlocals:
+        if scope_len > self.pycode.co_nlocals:
             raise ValueError, "new fastscope is longer than the allocated area"
         # don't assign directly to 'locals_stack_w[:scope_len]' to be
         # virtualizable-friendly
         pass
 
     def getfastscopelength(self):
-        return self.nlocals
+        return self.pycode.co_nlocals
 
     def getclosure(self):
         return None

pypy/jit/backend/test/runner_test.py

         print 'step 4 ok'
         print '-'*79
 
+    def test_guard_not_invalidated_and_label(self):
+        # test that the guard_not_invalidated reserves enough room before
+        # the label.  If it doesn't, then in this example after we invalidate
+        # the guard, jumping to the label will hit the invalidation code too
+        cpu = self.cpu
+        i0 = BoxInt()
+        faildescr = BasicFailDescr(1)
+        labeldescr = TargetToken()
+        ops = [
+            ResOperation(rop.GUARD_NOT_INVALIDATED, [], None, descr=faildescr),
+            ResOperation(rop.LABEL, [i0], None, descr=labeldescr),
+            ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(3)),
+        ]
+        ops[0].setfailargs([])
+        looptoken = JitCellToken()
+        self.cpu.compile_loop([i0], ops, looptoken)
+        # mark as failing
+        self.cpu.invalidate_loop(looptoken)
+        # attach a bridge
+        i2 = BoxInt()
+        ops = [
+            ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr),
+        ]
+        self.cpu.compile_bridge(faildescr, [], ops, looptoken)
+        # run: must not be caught in an infinite loop
+        fail = self.cpu.execute_token(looptoken, 16)
+        assert fail.identifier == 3
+        assert self.cpu.get_latest_value_int(0) == 333
+
     # pure do_ / descr features
 
     def test_do_operations(self):

pypy/jit/backend/x86/regalloc.py

         self.jump_target_descr = None
         self.close_stack_struct = 0
         self.final_jump_op = None
-        self.min_bytes_before_label = 0
 
     def _prepare(self, inputargs, operations, allgcrefs):
         self.fm = X86FrameManager()
         operations = self._prepare(inputargs, operations, allgcrefs)
         self._update_bindings(arglocs, inputargs)
         self.param_depth = prev_depths[1]
+        self.min_bytes_before_label = 0
         return operations
 
+    def ensure_next_label_is_at_least_at_position(self, at_least_position):
+        self.min_bytes_before_label = max(self.min_bytes_before_label,
+                                          at_least_position)
+
     def reserve_param(self, n):
         self.param_depth = max(self.param_depth, n)
 
         self.assembler.mc.mark_op(None) # end of the loop
 
     def flush_loop(self):
-        # rare case: if the loop is too short, pad with NOPs
+        # rare case: if the loop is too short, or if we are just after
+        # a GUARD_NOT_INVALIDATED, pad with NOPs.  Important!  This must
+        # be called to ensure that there are enough bytes produced,
+        # because GUARD_NOT_INVALIDATED or redirect_call_assembler()
+        # will maybe overwrite them.
         mc = self.assembler.mc
         while mc.get_relative_pos() < self.min_bytes_before_label:
             mc.NOP()
     def consider_guard_no_exception(self, op):
         self.perform_guard(op, [], None)
 
-    consider_guard_not_invalidated = consider_guard_no_exception
+    def consider_guard_not_invalidated(self, op):
+        mc = self.assembler.mc
+        n = mc.get_relative_pos()
+        self.perform_guard(op, [], None)
+        assert n == mc.get_relative_pos()
+        # ensure that the next label is at least 5 bytes farther than
+        # the current position.  Otherwise, when invalidating the guard,
+        # we would overwrite randomly the next label's position.
+        self.ensure_next_label_is_at_least_at_position(n + 5)
 
     def consider_guard_exception(self, op):
         loc = self.rm.make_sure_var_in_reg(op.getarg(0))

pypy/jit/metainterp/optimizeopt/optimizer.py

         assert isinstance(descr, compile.ResumeGuardDescr)
         modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo)
         try:
-            newboxes = modifier.finish(self.values, self.pendingfields)
+            newboxes = modifier.finish(self, self.pendingfields)
             if len(newboxes) > self.metainterp_sd.options.failargs_limit:
                 raise resume.TagOverflow
         except resume.TagOverflow:

pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py

         """
         self.optimize_loop(ops, expected)
         
+    def test_constant_failargs(self):
+        ops = """
+        [p1, i2, i3]
+        setfield_gc(p1, ConstPtr(myptr), descr=nextdescr)
+        p16 = getfield_gc(p1, descr=nextdescr)
+        guard_true(i2) [p16, i3]
+        jump(p1, i3, i2)
+        """
+        preamble = """
+        [p1, i2, i3]
+        setfield_gc(p1, ConstPtr(myptr), descr=nextdescr)
+        guard_true(i2) [i3]
+        jump(p1, i3)
+        """
+        expected = """
+        [p1, i3]
+        guard_true(i3) []
+        jump(p1, 1)
+        """
+        self.optimize_loop(ops, expected, preamble)
+
+    def test_issue1048(self):
+        ops = """
+        [p1, i2, i3]
+        p16 = getfield_gc(p1, descr=nextdescr)
+        guard_true(i2) [p16]
+        setfield_gc(p1, ConstPtr(myptr), descr=nextdescr)
+        jump(p1, i3, i2)
+        """
+        expected = """
+        [p1, i3]
+        guard_true(i3) []
+        jump(p1, 1)
+        """
+        self.optimize_loop(ops, expected)
+
+    def test_issue1048_ok(self):
+        ops = """
+        [p1, i2, i3]
+        p16 = getfield_gc(p1, descr=nextdescr)
+        call(p16, descr=nonwritedescr)
+        guard_true(i2) [p16]
+        setfield_gc(p1, ConstPtr(myptr), descr=nextdescr)
+        jump(p1, i3, i2)
+        """
+        expected = """
+        [p1, i3]
+        call(ConstPtr(myptr), descr=nonwritedescr)
+        guard_true(i3) []
+        jump(p1, 1)
+        """
+        self.optimize_loop(ops, expected)
+
 class TestLLtype(OptimizeOptTest, LLtypeMixin):
     pass
 

pypy/jit/metainterp/resume.py

 
     # env numbering
 
-    def number(self, values, snapshot):
+    def number(self, optimizer, snapshot):
         if snapshot is None:
             return lltype.nullptr(NUMBERING), {}, 0
         if snapshot in self.numberings:
              numb, liveboxes, v = self.numberings[snapshot]
              return numb, liveboxes.copy(), v
 
-        numb1, liveboxes, v = self.number(values, snapshot.prev)
+        numb1, liveboxes, v = self.number(optimizer, snapshot.prev)
         n = len(liveboxes)-v
         boxes = snapshot.boxes
         length = len(boxes)
         numb = lltype.malloc(NUMBERING, length)
         for i in range(length):
             box = boxes[i]
-            value = values.get(box, None)
-            if value is not None:
-                box = value.get_key_box()
+            value = optimizer.getvalue(box)
+            box = value.get_key_box()
 
             if isinstance(box, Const):
                 tagged = self.getconst(box)
         _, tagbits = untag(tagged)
         return tagbits == TAGVIRTUAL
 
-    def finish(self, values, pending_setfields=[]):
+    def finish(self, optimizer, pending_setfields=[]):
         # compute the numbering
         storage = self.storage
         # make sure that nobody attached resume data to this guard yet
         assert not storage.rd_numb
         snapshot = storage.rd_snapshot
         assert snapshot is not None # is that true?
-        numb, liveboxes_from_env, v = self.memo.number(values, snapshot)
+        numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot)
         self.liveboxes_from_env = liveboxes_from_env
         self.liveboxes = {}
         storage.rd_numb = numb
                 liveboxes[i] = box
             else:
                 assert tagbits == TAGVIRTUAL
-                value = values[box]
+                value = optimizer.getvalue(box)
                 value.get_args_for_fail(self)
 
         for _, box, fieldbox, _ in pending_setfields:
             self.register_box(box)
             self.register_box(fieldbox)
-            value = values[fieldbox]
+            value = optimizer.getvalue(fieldbox)
             value.get_args_for_fail(self)
 
-        self._number_virtuals(liveboxes, values, v)
+        self._number_virtuals(liveboxes, optimizer, v)
         self._add_pending_fields(pending_setfields)
 
         storage.rd_consts = self.memo.consts
         dump_storage(storage, liveboxes)
         return liveboxes[:]
 
-    def _number_virtuals(self, liveboxes, values, num_env_virtuals):
+    def _number_virtuals(self, liveboxes, optimizer, num_env_virtuals):
         # !! 'liveboxes' is a list that is extend()ed in-place !!
         memo = self.memo
         new_liveboxes = [None] * memo.num_cached_boxes()
             memo.nvholes += length - len(vfieldboxes)
             for virtualbox, fieldboxes in vfieldboxes.iteritems():
                 num, _ = untag(self.liveboxes[virtualbox])
-                value = values[virtualbox]
+                value = optimizer.getvalue(virtualbox)
                 fieldnums = [self._gettagged(box)
                              for box in fieldboxes]
                 vinfo = value.make_virtual_info(self, fieldnums)

pypy/jit/metainterp/test/test_resume.py

     rd_virtuals = None
     rd_pendingfields = None
 
+
+class FakeOptimizer(object):
+    def __init__(self, values):
+        self.values = values
+        
+    def getvalue(self, box):
+        try:
+            value = self.values[box]
+        except KeyError:
+            value = self.values[box] = OptValue(box)
+        return value
+        
+
 def test_tag():
     assert tag(3, 1) == rffi.r_short(3<<2|1)
     assert tag(-3, 2) == rffi.r_short(-3<<2|2)
     capture_resumedata(fs, None, [], storage)
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish({})
+    liveboxes = modifier.finish(FakeOptimizer({}))
     metainterp = MyMetaInterp()
 
     b1t, b2t, b3t = [BoxInt(), BoxPtr(), BoxInt()]
     capture_resumedata(fs, [b4], [], storage)
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish({})
+    liveboxes = modifier.finish(FakeOptimizer({}))
     metainterp = MyMetaInterp()
 
     b1t, b2t, b3t, b4t = [BoxInt(), BoxPtr(), BoxInt(), BoxPtr()]
     
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish({})
+    liveboxes = modifier.finish(FakeOptimizer({}))
 
     modifier = ResumeDataVirtualAdder(storage2, memo)
-    liveboxes2 = modifier.finish({})
+    liveboxes2 = modifier.finish(FakeOptimizer({}))
 
     metainterp = MyMetaInterp()
 
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     values = {b2: virtual_value(b2, b5, c4)}
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish(values)
+    liveboxes = modifier.finish(FakeOptimizer(values))
     assert len(storage.rd_virtuals) == 1
     assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX),
                                                 tag(0, TAGCONST)]
     values = {b2: virtual_value(b2, b4, v6), b6: v6}
     memo.clear_box_virtual_numbers()
     modifier = ResumeDataVirtualAdder(storage2, memo)
-    liveboxes2 = modifier.finish(values)
+    liveboxes2 = modifier.finish(FakeOptimizer(values))
     assert len(storage2.rd_virtuals) == 2    
     assert storage2.rd_virtuals[0].fieldnums == [tag(len(liveboxes2)-1, TAGBOX),
                                                  tag(-1, TAGVIRTUAL)]
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     values = {b2: virtual_value(b2, b5, c4)}
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish(values)
+    liveboxes = modifier.finish(FakeOptimizer(values))
     assert len(storage.rd_virtuals) == 1
     assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX),
                                                 tag(0, TAGCONST)]
     capture_resumedata(fs, None, [], storage2)
     values[b4] = virtual_value(b4, b6, c4)
     modifier = ResumeDataVirtualAdder(storage2, memo)
-    liveboxes = modifier.finish(values)
+    liveboxes = modifier.finish(FakeOptimizer(values))
     assert len(storage2.rd_virtuals) == 2
     assert storage2.rd_virtuals[1].fieldnums == storage.rd_virtuals[0].fieldnums
     assert storage2.rd_virtuals[1] is storage.rd_virtuals[0]
     v1.setfield(LLtypeMixin.nextdescr, v2)
     values = {b1: v1, b2: v2}
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish(values)
+    liveboxes = modifier.finish(FakeOptimizer(values))
     assert liveboxes == [b3]
     assert len(storage.rd_virtuals) == 2
     assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX),
 
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
 
-    numb, liveboxes, v = memo.number({}, snap1)
+    numb, liveboxes, v = memo.number(FakeOptimizer({}), snap1)
     assert v == 0
 
     assert liveboxes == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX),
                                     tag(0, TAGBOX), tag(2, TAGINT)]
     assert not numb.prev.prev
 
-    numb2, liveboxes2, v = memo.number({}, snap2)
+    numb2, liveboxes2, v = memo.number(FakeOptimizer({}), snap2)
     assert v == 0
     
     assert liveboxes2 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX),
             return self.virt
 
     # renamed
-    numb3, liveboxes3, v = memo.number({b3: FakeValue(False, c4)}, snap3)
+    numb3, liveboxes3, v = memo.number(FakeOptimizer({b3: FakeValue(False, c4)}),
+                                       snap3)
     assert v == 0
     
     assert liveboxes3 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX)}
     env4 = [c3, b4, b1, c3]
     snap4 = Snapshot(snap, env4)    
 
-    numb4, liveboxes4, v = memo.number({b4: FakeValue(True, b4)}, snap4)
+    numb4, liveboxes4, v = memo.number(FakeOptimizer({b4: FakeValue(True, b4)}),
+                                       snap4)
     assert v == 1
     
     assert liveboxes4 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX),
     env5 = [b1, b4, b5]
     snap5 = Snapshot(snap4, env5)    
 
-    numb5, liveboxes5, v = memo.number({b4: FakeValue(True, b4),
-                                        b5: FakeValue(True, b5)}, snap5)
+    numb5, liveboxes5, v = memo.number(FakeOptimizer({b4: FakeValue(True, b4),
+                                                      b5: FakeValue(True, b5)}),
+                                       snap5)
     assert v == 2
     
     assert liveboxes5 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX),
     storage = make_storage(b1s, b2s, b3s)
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())    
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish({})
+    liveboxes = modifier.finish(FakeOptimizer({}))
     assert storage.rd_snapshot is None
     cpu = MyCPU([])
     reader = ResumeDataDirectReader(MyMetaInterp(cpu), storage)
     storage = make_storage(b1s, b2s, b3s)
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     modifier = ResumeDataVirtualAdder(storage, memo)
-    modifier.finish({})
+    modifier.finish(FakeOptimizer({}))
     assert len(memo.consts) == 2
     assert storage.rd_consts is memo.consts
 
     b1s, b2s, b3s = [ConstInt(sys.maxint), ConstInt(2**17), ConstInt(-65)]
     storage2 = make_storage(b1s, b2s, b3s)
     modifier2 = ResumeDataVirtualAdder(storage2, memo)
-    modifier2.finish({})
+    modifier2.finish(FakeOptimizer({}))
     assert len(memo.consts) == 3    
     assert storage2.rd_consts is memo.consts
 
 
     val = FakeValue()
     values = {b1s: val, b2s: val}  
-    liveboxes = modifier.finish(values)
+    liveboxes = modifier.finish(FakeOptimizer(values))
     assert storage.rd_snapshot is None
     b1t, b3t = [BoxInt(11), BoxInt(33)]
     newboxes = _resume_remap(liveboxes, [b1_2, b3s], b1t, b3t)
     storage = make_storage(b1s, b2s, b3s)
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())        
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish({})
+    liveboxes = modifier.finish(FakeOptimizer({}))
     b2t, b3t = [BoxPtr(demo55o), BoxInt(33)]
     newboxes = _resume_remap(liveboxes, [b2s, b3s], b2t, b3t)
     metainterp = MyMetaInterp()
     values = {b2s: v2, b4s: v4}
 
     liveboxes = []
-    modifier._number_virtuals(liveboxes, values, 0)
+    modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0)
     storage.rd_consts = memo.consts[:]
     storage.rd_numb = None
     # resume
     modifier.register_virtual_fields(b2s, [b4s, c1s])
     liveboxes = []
     values = {b2s: v2}
-    modifier._number_virtuals(liveboxes, values, 0)
+    modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0)
     dump_storage(storage, liveboxes)
     storage.rd_consts = memo.consts[:]
     storage.rd_numb = None
     v2.setfield(LLtypeMixin.bdescr, OptValue(b4s))
     modifier.register_virtual_fields(b2s, [c1s, b4s])
     liveboxes = []
-    modifier._number_virtuals(liveboxes, {b2s: v2}, 0)
+    modifier._number_virtuals(liveboxes, FakeOptimizer({b2s: v2}), 0)
     dump_storage(storage, liveboxes)
     storage.rd_consts = memo.consts[:]
     storage.rd_numb = None
 
     values = {b4s: v4, b2s: v2}
     liveboxes = []
-    modifier._number_virtuals(liveboxes, values, 0)
+    modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0)
     assert liveboxes == [b2s, b4s] or liveboxes == [b4s, b2s]
     modifier._add_pending_fields([(LLtypeMixin.nextdescr, b2s, b4s, -1)])
     storage.rd_consts = memo.consts[:]

pypy/module/_demo/test/test_sieve.py

+from pypy.conftest import gettestobjspace
+
+
+class AppTestSieve:
+    def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=('_demo',))
+
+    def test_sieve(self):
+        import _demo
+        lst = _demo.sieve(100)
+        assert lst == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
+                       43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]

pypy/module/cpyext/api.py

         "Dict": "space.w_dict",
         "Tuple": "space.w_tuple",
         "List": "space.w_list",
+        "Set": "space.w_set",
         "Int": "space.w_int",
         "Bool": "space.w_bool",
         "Float": "space.w_float",
         ('buf', rffi.VOIDP),
         ('obj', PyObject),
         ('len', Py_ssize_t),
-        # ('itemsize', Py_ssize_t),
+        ('itemsize', Py_ssize_t),
 
-        # ('readonly', lltype.Signed),
-        # ('ndim', lltype.Signed),
-        # ('format', rffi.CCHARP),
-        # ('shape', Py_ssize_tP),
-        # ('strides', Py_ssize_tP),
-        # ('suboffets', Py_ssize_tP),
-        # ('smalltable', rffi.CFixedArray(Py_ssize_t, 2)),
-        # ('internal', rffi.VOIDP)
+        ('readonly', lltype.Signed),
+        ('ndim', lltype.Signed),
+        ('format', rffi.CCHARP),
+        ('shape', Py_ssize_tP),
+        ('strides', Py_ssize_tP),
+        ('suboffsets', Py_ssize_tP),
+        #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)),
+        ('internal', rffi.VOIDP)
         ))
 
 @specialize.memo()

pypy/module/cpyext/dictobject.py

 from pypy.module.cpyext.pyobject import RefcountState
 from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
 from pypy.interpreter.error import OperationError
+from pypy.rlib.objectmodel import specialize
 
 @cpython_api([], PyObject)
 def PyDict_New(space):
         w_item = space.call_method(w_iter, "next")
         w_key, w_value = space.fixedview(w_item, 2)
         state = space.fromcache(RefcountState)
-        pkey[0]   = state.make_borrowed(w_dict, w_key)
-        pvalue[0] = state.make_borrowed(w_dict, w_value)
+        if pkey:
+            pkey[0]   = state.make_borrowed(w_dict, w_key)
+        if pvalue:
+            pvalue[0] = state.make_borrowed(w_dict, w_value)
         ppos[0] += 1
     except OperationError, e:
         if not e.match(space, space.w_StopIteration):
             raise
         return 0
     return 1
+
+@specialize.memo()
+def make_frozendict(space):
+    return space.appexec([], '''():
+    import collections
+    class FrozenDict(collections.Mapping):
+        def __init__(self, *args, **kwargs):
+            self._d = dict(*args, **kwargs)
+        def __iter__(self):
+            return iter(self._d)
+        def __len__(self):
+            return len(self._d)
+        def __getitem__(self, key):
+            return self._d[key]
+    return FrozenDict''')
+
+@cpython_api([PyObject], PyObject)
+def PyDictProxy_New(space, w_dict):
+    w_frozendict = make_frozendict(space)
+    return space.call_function(w_frozendict, w_dict)
+

pypy/module/cpyext/include/methodobject.h

     PyObject_HEAD
     PyMethodDef *m_ml; /* Description of the C function to call */
     PyObject    *m_self; /* Passed as 'self' arg to the C func, can be NULL */
+    PyObject    *m_module; /* The __module__ attribute, can be anything */
 } PyCFunctionObject;
 
 /* Flag passed to newmethodobject */

pypy/module/cpyext/include/object.h

 
     /* This is Py_ssize_t so it can be
        pointed to by strides in simple case.*/
-    /* Py_ssize_t itemsize; */
-    /* int readonly; */
-    /* int ndim; */
-    /* char *format; */
-    /* Py_ssize_t *shape; */
-    /* Py_ssize_t *strides; */
-    /* Py_ssize_t *suboffsets; */
+    Py_ssize_t itemsize;
+    int readonly;
+    int ndim;
+    char *format;
+    Py_ssize_t *shape;
+    Py_ssize_t *strides;
+    Py_ssize_t *suboffsets;
 
     /* static store for shape and strides of
        mono-dimensional buffers. */
     /* Py_ssize_t smalltable[2]; */
-    /* void *internal; */
+    void *internal;
 } Py_buffer;
 
 

pypy/module/cpyext/include/pystate.h

 
 typedef struct _ts {
     PyInterpreterState *interp;
+    PyObject *dict;  /* Stores per-thread state */
 } PyThreadState;
 
 #define Py_BEGIN_ALLOW_THREADS { \
     enum {PyGILState_LOCKED, PyGILState_UNLOCKED}
         PyGILState_STATE;
 
+#define PyThreadState_GET() PyThreadState_Get()
+
 #endif /* !Py_PYSTATE_H */

pypy/module/cpyext/include/pythread.h

 #ifndef Py_PYTHREAD_H
 #define Py_PYTHREAD_H
 
+#define WITH_THREAD
+
 typedef void *PyThread_type_lock;
 #define WAIT_LOCK	1
 #define NOWAIT_LOCK	0

pypy/module/cpyext/include/structmember.h

 } PyMemberDef;
 
 
-/* Types */
+/* Types. These constants are also in structmemberdefs.py. */
 #define T_SHORT		0
 #define T_INT		1
 #define T_LONG		2
 #define T_LONGLONG	17
 #define T_ULONGLONG	 18
 
-/* Flags */
+/* Flags. These constants are also in structmemberdefs.py. */
 #define READONLY      1
 #define RO            READONLY                /* Shorthand */
+#define READ_RESTRICTED 2
+#define PY_WRITE_RESTRICTED 4
+#define RESTRICTED    (READ_RESTRICTED | PY_WRITE_RESTRICTED)
 
 
 #ifdef __cplusplus

pypy/module/cpyext/methodobject.py

     PyObjectFields + (
      ('m_ml', lltype.Ptr(PyMethodDef)),
      ('m_self', PyObject),
+     ('m_module', PyObject),
      ))
 PyCFunctionObject = lltype.Ptr(PyCFunctionObjectStruct)
 
     assert isinstance(w_obj, W_PyCFunctionObject)
     py_func.c_m_ml = w_obj.ml
     py_func.c_m_self = make_ref(space, w_obj.w_self)
+    py_func.c_m_module = make_ref(space, w_obj.w_module)
 
 @cpython_api([PyObject], lltype.Void, external=False)
 def cfunction_dealloc(space, py_obj):
     py_func = rffi.cast(PyCFunctionObject, py_obj)
     Py_DecRef(space, py_func.c_m_self)
+    Py_DecRef(space, py_func.c_m_module)
     from pypy.module.cpyext.object import PyObject_dealloc
     PyObject_dealloc(space, py_obj)
 

pypy/module/cpyext/object.py

     This is the equivalent of the Python expression hash(o)."""
     return space.int_w(space.hash(w_obj))
 
+@cpython_api([PyObject], PyObject)
+def PyObject_Dir(space, w_o):
+    """This is equivalent to the Python expression dir(o), returning a (possibly
+    empty) list of strings appropriate for the object argument, or NULL if there
+    was an error.  If the argument is NULL, this is like the Python dir(),
+    returning the names of the current locals; in this case, if no execution frame
+    is active then NULL is returned but PyErr_Occurred() will return false."""
+    return space.call_function(space.builtin.get('dir'), w_o)
+
 @cpython_api([PyObject, rffi.CCHARPP, Py_ssize_tP], rffi.INT_real, error=-1)
 def PyObject_AsCharBuffer(space, obj, bufferp, sizep):
     """Returns a pointer to a read-only memory location usable as
     return 0
 
 
+PyBUF_WRITABLE = 0x0001  # Copied from object.h
+
 @cpython_api([lltype.Ptr(Py_buffer), PyObject, rffi.VOIDP, Py_ssize_t,
               lltype.Signed, lltype.Signed], rffi.INT, error=CANNOT_FAIL)
 def PyBuffer_FillInfo(space, view, obj, buf, length, readonly, flags):
     view.c_len = length
     view.c_obj = obj
     Py_IncRef(space, obj)
+    view.c_itemsize = 1
+    if flags & PyBUF_WRITABLE:
+        rffi.setintfield(view, 'c_readonly', 0)
+    else:
+        rffi.setintfield(view, 'c_readonly', 1)
+    rffi.setintfield(view, 'c_ndim', 0)
+    view.c_format = lltype.nullptr(rffi.CCHARP.TO)
+    view.c_shape = lltype.nullptr(Py_ssize_tP.TO)
+    view.c_strides = lltype.nullptr(Py_ssize_tP.TO)
+    view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO)
+    view.c_internal = lltype.nullptr(rffi.VOIDP.TO)
+
     return 0
 
 

pypy/module/cpyext/pyfile.py

 from pypy.rpython.lltypesystem import rffi, lltype
 from pypy.module.cpyext.api import (
-    cpython_api, CONST_STRING, FILEP, build_type_checkers)
+    cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers)
 from pypy.module.cpyext.pyobject import PyObject, borrow_from
+from pypy.module.cpyext.object import Py_PRINT_RAW
 from pypy.interpreter.error import OperationError
 from pypy.module._file.interp_file import W_File
 
 def PyFile_WriteString(space, s, w_p):
     """Write string s to file object p.  Return 0 on success or -1 on
     failure; the appropriate exception will be set."""
-    w_s = space.wrap(rffi.charp2str(s))
-    space.call_method(w_p, "write", w_s)
+    w_str = space.wrap(rffi.charp2str(s))
+    space.call_method(w_p, "write", w_str)
+    return 0
+
+@cpython_api([PyObject, PyObject, rffi.INT_real], rffi.INT_real, error=-1)
+def PyFile_WriteObject(space, w_obj, w_p, flags):
+    """
+    Write object obj to file object p.  The only supported flag for flags is
+    Py_PRINT_RAW; if given, the str() of the object is written
+    instead of the repr().  Return 0 on success or -1 on failure; the
+    appropriate exception will be set."""
+    if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW:
+        w_str = space.str(w_obj)
+    else:
+        w_str = space.repr(w_obj)
+    space.call_method(w_p, "write", w_str)
     return 0
 
 @cpython_api([PyObject], PyObject)
 def PyFile_Name(space, w_p):
     """Return the name of the file specified by p as a string object."""
-    return borrow_from(w_p, space.getattr(w_p, space.wrap("name")))
+    return borrow_from(w_p, space.getattr(w_p, space.wrap("name")))
+
+@cpython_api([PyObject, rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL)
+def PyFile_SoftSpace(space, w_p, newflag):
+    """
+    This function exists for internal use by the interpreter.  Set the
+    softspace attribute of p to newflag and return the previous value.
+    p does not have to be a file object for this function to work
+    properly; any object is supported (thought its only interesting if
+    the softspace attribute can be set).  This function clears any
+    errors, and will return 0 as the previous value if the attribute
+    either does not exist or if there were errors in retrieving it.
+    There is no way to detect errors from this function, but doing so
+    should not be needed."""
+    try:
+        if rffi.cast(lltype.Signed, newflag):
+            w_newflag = space.w_True
+        else:
+            w_newflag = space.w_False
+        oldflag = space.int_w(space.getattr(w_p, space.wrap("softspace")))
+        space.setattr(w_p, space.wrap("softspace"), w_newflag)
+        return oldflag
+    except OperationError, e:
+        return 0
+

pypy/module/cpyext/pystate.py

 from pypy.module.cpyext.api import (
     cpython_api, generic_cpy_call, CANNOT_FAIL, CConfig, cpython_struct)
+from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref
 from pypy.rpython.lltypesystem import rffi, lltype
 
 PyInterpreterStateStruct = lltype.ForwardReference()
 PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct)
 cpython_struct(
-    "PyInterpreterState", [('next', PyInterpreterState)], PyInterpreterStateStruct)
-PyThreadState = lltype.Ptr(cpython_struct("PyThreadState", [('interp', PyInterpreterState)]))
+    "PyInterpreterState",
+    [('next', PyInterpreterState)],
+    PyInterpreterStateStruct)
+PyThreadState = lltype.Ptr(cpython_struct(
+    "PyThreadState", 
+    [('interp', PyInterpreterState),
+     ('dict', PyObject),
+     ]))
 
 @cpython_api([], PyThreadState, error=CANNOT_FAIL)
 def PyEval_SaveThread(space):
     return 1
 
 # XXX: might be generally useful
-def encapsulator(T, flavor='raw'):
+def encapsulator(T, flavor='raw', dealloc=None):
     class MemoryCapsule(object):
-        def __init__(self, alloc=True):
-            if alloc:
+        def __init__(self, space):
+            self.space = space
+            if space is not None:
                 self.memory = lltype.malloc(T, flavor=flavor)
             else:
                 self.memory = lltype.nullptr(T)
         def __del__(self):
             if self.memory:
+                if dealloc and self.space:
+                    dealloc(self.memory, self.space)
                 lltype.free(self.memory, flavor=flavor)
     return MemoryCapsule
 
-ThreadStateCapsule = encapsulator(PyThreadState.TO)
+def ThreadState_dealloc(ts, space):
+    assert space is not None
+    Py_DecRef(space, ts.c_dict)
+ThreadStateCapsule = encapsulator(PyThreadState.TO,
+                                  dealloc=ThreadState_dealloc)
 
 from pypy.interpreter.executioncontext import ExecutionContext
-ExecutionContext.cpyext_threadstate = ThreadStateCapsule(alloc=False)
+ExecutionContext.cpyext_threadstate = ThreadStateCapsule(None)
 
 class InterpreterState(object):
     def __init__(self, space):
         self.interpreter_state = lltype.malloc(
             PyInterpreterState.TO, flavor='raw', zero=True, immortal=True)
 
-    def new_thread_state(self):
-        capsule = ThreadStateCapsule()
+    def new_thread_state(self, space):
+        capsule = ThreadStateCapsule(space)
         ts = capsule.memory
         ts.c_interp = self.interpreter_state
+        ts.c_dict = make_ref(space, space.newdict())
         return capsule
 
     def get_thread_state(self, space):
         ec = space.getexecutioncontext()
-        return self._get_thread_state(ec).memory
+        return self._get_thread_state(space, ec).memory
 
-    def _get_thread_state(self, ec):
+    def _get_thread_state(self, space, ec):
         if ec.cpyext_threadstate.memory == lltype.nullptr(PyThreadState.TO):
-            ec.cpyext_threadstate = self.new_thread_state()
+            ec.cpyext_threadstate = self.new_thread_state(space)
 
         return ec.cpyext_threadstate
 
     state = space.fromcache(InterpreterState)
     return state.get_thread_state(space)
 
+@cpython_api([], PyObject, error=CANNOT_FAIL)
+def PyThreadState_GetDict(space):
+    state = space.fromcache(InterpreterState)
+    return state.get_thread_state(space).c_dict
+
 @cpython_api([PyThreadState], PyThreadState, error=CANNOT_FAIL)
 def PyThreadState_Swap(space, tstate):
     """Swap the current thread state with the thread state given by the argument

pypy/module/cpyext/pythonrun.py

     value."""
     return space.fromcache(State).get_programname()
 
+@cpython_api([], rffi.CCHARP)
+def Py_GetVersion(space):
+    """Return the version of this Python interpreter.  This is a
+    string that looks something like
+
+    "1.5 (\#67, Dec 31 1997, 22:34:28) [GCC 2.7.2.2]"
+
+    The first word (up to the first space character) is the current
+    Python version; the first three characters are the major and minor
+    version separated by a period.  The returned string points into
+    static storage; the caller should not modify its value.  The value
+    is available to Python code as sys.version."""
+    return space.fromcache(State).get_version()
+
 @cpython_api([lltype.Ptr(lltype.FuncType([], lltype.Void))], rffi.INT_real, error=-1)
 def Py_AtExit(space, func_ptr):
     """Register a cleanup function to be called by Py_Finalize().  The cleanup

pypy/module/cpyext/setobject.py

     return 0
 
 
+@cpython_api([PyObject], PyObject)
+def PySet_Pop(space, w_set):
+    """Return a new reference to an arbitrary object in the set, and removes the
+    object from the set.  Return NULL on failure.  Raise KeyError if the
+    set is empty. Raise a SystemError if set is an not an instance of
+    set or its subtype."""
+    return space.call_method(w_set, "pop")
+
+@cpython_api([PyObject], rffi.INT_real, error=-1)
+def PySet_Clear(space, w_set):
+    """Empty an existing set of all elements."""
+    space.call_method(w_set, 'clear')
+    return 0
+
 @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
 def PySet_GET_SIZE(space, w_s):
     """Macro form of PySet_Size() without error checking."""

pypy/module/cpyext/slotdefs.py

         space.fromcache(State).check_and_raise_exception(always=True)
     return space.wrap(res)
 
+def wrap_delitem(space, w_self, w_args, func):
+    func_target = rffi.cast(objobjargproc, func)
+    check_num_args(space, w_args, 1)
+    w_key, = space.fixedview(w_args)
+    res = generic_cpy_call(space, func_target, w_self, w_key, None)
+    if rffi.cast(lltype.Signed, res) == -1:
+        space.fromcache(State).check_and_raise_exception(always=True)
+    return space.w_None
+
 def wrap_ssizessizeargfunc(space, w_self, w_args, func):
     func_target = rffi.cast(ssizessizeargfunc, func)
     check_num_args(space, w_args, 2)
 def slot_nb_int(space, w_self):
     return space.int(w_self)
 
+@cpython_api([PyObject], PyObject, external=False)
+def slot_tp_iter(space, w_self):
+    return space.iter(w_self)
+
+@cpython_api([PyObject], PyObject, external=False)
+def slot_tp_iternext(space, w_self):
+    return space.next(w_self)
+
 from pypy.rlib.nonconst import NonConstant
 
 SLOTS = {}
     TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""),
 )
 
+# partial sort to solve some slot conflicts:
+# Number slots before Mapping slots before Sequence slots.
+# These are the only conflicts between __name__ methods
+def slotdef_sort_key(slotdef):
+    if slotdef.slot_name.startswith('tp_as_number'):
+        return 1
+    if slotdef.slot_name.startswith('tp_as_mapping'):
+        return 2
+    if slotdef.slot_name.startswith('tp_as_sequence'):
+        return 3
+    return 0
+slotdefs = sorted(slotdefs, key=slotdef_sort_key)
+
 slotdefs_for_tp_slots = unrolling_iterable(
     [(x.method_name, x.slot_name, x.slot_names, x.slot_func)
      for x in slotdefs])

pypy/module/cpyext/state.py

         self.space = space
         self.reset()
         self.programname = lltype.nullptr(rffi.CCHARP.TO)
+        self.version = lltype.nullptr(rffi.CCHARP.TO)
 
     def reset(self):
         from pypy.module.cpyext.modsupport import PyMethodDef
             lltype.render_immortal(self.programname)
         return self.programname
 
+    def get_version(self):
+        if not self.version:
+            space = self.space
+            w_version = space.sys.get('version')
+            version = space.str_w(w_version)
+            self.version = rffi.str2charp(version)
+            lltype.render_immortal(self.version)
+        return self.version
+
     def find_extension(self, name, path):
         from pypy.module.cpyext.modsupport import PyImport_AddModule
         from pypy.interpreter.module import Module

pypy/module/cpyext/stringobject.py

     s = rffi.charp2str(string)
     return space.new_interned_str(s)
 
+@cpython_api([PyObjectP], lltype.Void)
+def PyString_InternInPlace(space, string):
+    """Intern the argument *string in place.  The argument must be the
+    address of a pointer variable pointing to a Python string object.
+    If there is an existing interned string that is the same as
+    *string, it sets *string to it (decrementing the reference count
+    of the old string object and incrementing the reference count of
+    the interned string object), otherwise it leaves *string alone and
+    interns it (incrementing its reference count).  (Clarification:
+    even though there is a lot of talk about reference counts, think
+    of this function as reference-count-neutral; you own the object
+    after the call if and only if you owned it before the call.)
+
+    This function is not available in 3.x and does not have a PyBytes
+    alias."""
+    w_str = from_ref(space, string[0])
+    w_str = space.new_interned_w_str(w_str)
+    Py_DecRef(space, string[0])
+    string[0] = make_ref(space, w_str)
+
 @cpython_api([PyObject, rffi.CCHARP, rffi.CCHARP], PyObject)
 def PyString_AsEncodedObject(space, w_str, encoding, errors):
     """Encode a string object using the codec registered for encoding and return

pypy/module/cpyext/structmemberdefs.py

+# These constants are also in include/structmember.h
+
 T_SHORT = 0
 T_INT = 1
 T_LONG = 2
 T_ULONGLONG = 18
 
 READONLY = RO = 1
+READ_RESTRICTED = 2
+WRITE_RESTRICTED = 4
+RESTRICTED = READ_RESTRICTED | WRITE_RESTRICTED

pypy/module/cpyext/stubs.py

 from pypy.module.cpyext.api import (
-    cpython_api, PyObject, PyObjectP, CANNOT_FAIL, Py_buffer
+    cpython_api, PyObject, PyObjectP, CANNOT_FAIL
     )
 from pypy.module.cpyext.complexobject import Py_complex_ptr as Py_complex
 from pypy.rpython.lltypesystem import rffi, lltype
 PyMethodDef = rffi.VOIDP
 PyGetSetDef = rffi.VOIDP
 PyMemberDef = rffi.VOIDP
+Py_buffer = rffi.VOIDP
 va_list = rffi.VOIDP
 PyDateTime_Date = rffi.VOIDP
 PyDateTime_DateTime = rffi.VOIDP
 def _PyObject_Del(space, op):
     raise NotImplementedError
 
-@cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)
-def PyObject_CheckBuffer(space, obj):
-    raise NotImplementedError
-
 @cpython_api([rffi.CCHARP], Py_ssize_t, error=CANNOT_FAIL)
 def PyBuffer_SizeFromFormat(space, format):
     """Return the implied ~Py_buffer.itemsize from the struct-stype
     """
     raise NotImplementedError
 
-@cpython_api([PyObject, rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL)
-def PyFile_SoftSpace(space, p, newflag):
-    """
-    This function exists for internal use by the interpreter.  Set the
-    softspace attribute of p to newflag and return the previous value.
-    p does not have to be a file object for this function to work properly; any
-    object is supported (thought its only interesting if the softspace
-    attribute can be set).  This function clears any errors, and will return 0
-    as the previous value if the attribute either does not exist or if there were
-    errors in retrieving it.  There is no way to detect errors from this function,
-    but doing so should not be needed."""
-    raise NotImplementedError
-
-@cpython_api([PyObject, PyObject, rffi.INT_real], rffi.INT_real, error=-1)
-def PyFile_WriteObject(space, obj, p, flags):
-    """
-    Write object obj to file object p.  The only supported flag for flags is
-    Py_PRINT_RAW; if given, the str() of the object is written
-    instead of the repr().  Return 0 on success or -1 on failure; the
-    appropriate exception will be set."""
-    raise NotImplementedError
-
 @cpython_api([], PyObject)
 def PyFloat_GetInfo(space):
     """Return a structseq instance which contains information about the
     raise NotImplementedError
 
 @cpython_api([], rffi.CCHARP)
-def Py_GetVersion(space):
-    """Return the version of this Python interpreter.  This is a string that looks
-    something like
-
-    "1.5 (\#67, Dec 31 1997, 22:34:28) [GCC 2.7.2.2]"
-
-    The first word (up to the first space character) is the current Python version;
-    the first three characters are the major and minor version separated by a
-    period.  The returned string points into static storage; the caller should not
-    modify its value.  The value is available to Python code as sys.version."""
-    raise NotImplementedError
-
-@cpython_api([], rffi.CCHARP)
 def Py_GetPlatform(space):
     """Return the platform identifier for the current platform.  On Unix, this
     is formed from the"official" name of the operating system, converted to lower
     that haven't been explicitly destroyed at that point."""
     raise NotImplementedError
 
-@cpython_api([rffi.VOIDP], lltype.Void)
-def Py_AddPendingCall(space, func):
-    """Post a notification to the Python main thread.  If successful, func will
-    be called with the argument arg at the earliest convenience.  func will be
-    called having the global interpreter lock held and can thus use the full
-    Python API and can take any action such as setting object attributes to
-    signal IO completion.  It must return 0 on success, or -1 signalling an
-    exception.  The notification function won't be interrupted to perform another
-    asynchronous notification recursively, but it can still be interrupted to
-    switch threads if the global interpreter lock is released, for example, if it
-    calls back into Python code.
-
-    This function returns 0 on success in which case the notification has been
-    scheduled.  Otherwise, for example if the notification buffer is full, it
-    returns -1 without setting any exception.
-
-    This function can be called on any thread, be it a Python thread or some
-    other system thread.  If it is a Python thread, it doesn't matter if it holds
-    the global interpreter lock or not.
-    """
-    raise NotImplementedError
-
 @cpython_api([Py_tracefunc, PyObject], lltype.Void)
 def PyEval_SetProfile(space, func, obj):
     """Set the profiler function to func.  The obj parameter is passed to the
     """
     raise NotImplementedError
 
-@cpython_api([PyObject], PyObject)
-def PyObject_Dir(space, o):
-    """This is equivalent to the Python expression dir(o), returning a (possibly
-    empty) list of strings appropriate for the object argument, or NULL if there
-    was an error.  If the argument is NULL, this is like the Python dir(),
-    returning the names of the current locals; in this case, if no execution frame
-    is active then NULL is returned but PyErr_Occurred() will return false."""
-    raise NotImplementedError
-
 @cpython_api([], PyFrameObject)
 def PyEval_GetFrame(space):
     """Return the current thread state's frame, which is NULL if no frame is
     building-up new frozensets with PySet_Add()."""
     raise NotImplementedError
 
-@cpython_api([PyObject], PyObject)
-def PySet_Pop(space, set):
-    """Return a new reference to an arbitrary object in the set, and removes the
-    object from the set.  Return NULL on failure.  Raise KeyError if the
-    set is empty. Raise a SystemError if set is an not an instance of
-    set or its subtype."""
-    raise NotImplementedError
-
-@cpython_api([PyObject], rffi.INT_real, error=-1)
-def PySet_Clear(space, set):
-    """Empty an existing set of all elements."""
-    raise NotImplementedError
-
-@cpython_api([PyObjectP], lltype.Void)
-def PyString_InternInPlace(space, string):
-    """Intern the argument *string in place.  The argument must be the address of a
-    pointer variable pointing to a Python string object.  If there is an existing
-    interned string that is the same as *string, it sets *string to it
-    (decrementing the reference count of the old string object and incrementing the
-    reference count of the interned string object), otherwise it leaves *string
-    alone and interns it (incrementing its reference count).  (Clarification: even
-    though there is a lot of talk about reference counts, think of this function as
-    reference-count-neutral; you own the object after the call if and only if you
-    owned it before the call.)
-
-    This function is not available in 3.x and does not have a PyBytes alias."""
-    raise NotImplementedError
-
 @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.CCHARP], PyObject)
 def PyString_Decode(space, s, size, encoding, errors):
     """Create an object by decoding size bytes of the encoded buffer s using the
     properly supporting 64-bit systems."""
     raise NotImplementedError
 
-@cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject)
-def PyUnicode_Replace(space, str, substr, replstr, maxcount):
-    """Replace at most maxcount occurrences of substr in str with replstr and
-    return the resulting Unicode object. maxcount == -1 means replace all
-    occurrences.
-
-    This function used an int type for maxcount. This might
-    require changes in your code for properly supporting 64-bit systems."""
-    raise NotImplementedError
-
 @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject)
 def PyUnicode_RichCompare(space, left, right, op):
     """Rich compare two unicode strings and return one of the following:

pypy/module/cpyext/stubsactive.py

 def Py_MakePendingCalls(space):
     return 0
 
+pending_call = lltype.Ptr(lltype.FuncType([rffi.VOIDP], rffi.INT_real))
+@cpython_api([pending_call, rffi.VOIDP], rffi.INT_real, error=-1)
+def Py_AddPendingCall(space, func, arg):
+    """Post a notification to the Python main thread.  If successful,
+    func will be called with the argument arg at the earliest
+    convenience.  func will be called having the global interpreter
+    lock held and can thus use the full Python API and can take any
+    action such as setting object attributes to signal IO completion.
+    It must return 0 on success, or -1 signalling an exception.  The
+    notification function won't be interrupted to perform another
+    asynchronous notification recursively, but it can still be
+    interrupted to switch threads if the global interpreter lock is
+    released, for example, if it calls back into Python code.
+
+    This function returns 0 on success in which case the notification
+    has been scheduled.  Otherwise, for example if the notification
+    buffer is full, it returns -1 without setting any exception.
+
+    This function can be called on any thread, be it a Python thread
+    or some other system thread.  If it is a Python thread, it doesn't
+    matter if it holds the global interpreter lock or not.
+    """
+    return -1
+
+thread_func = lltype.Ptr(lltype.FuncType([rffi.VOIDP], lltype.Void))
+@cpython_api([thread_func, rffi.VOIDP], rffi.INT_real, error=-1)
+def PyThread_start_new_thread(space, func, arg):
+    return -1

pypy/module/cpyext/test/test_arraymodule.py

         assert arr[:2].tolist() == [1,2]
         assert arr[1:3].tolist() == [2,3]
 
+    def test_slice_object(self):
+        module = self.import_module(name='array')
+        arr = module.array('i', [1,2,3,4])
+        assert arr[slice(1,3)].tolist() == [2,3]
+        arr[slice(1,3)] = module.array('i', [21, 22, 23])
+        assert arr.tolist() == [1, 21, 22, 23, 4]
+        del arr[slice(1, 3)]
+        assert arr.tolist() == [1, 23, 4]
+
     def test_buffer(self):
         module = self.import_module(name='array')
         arr = module.array('i', [1,2,3,4])

pypy/module/cpyext/test/test_cpyext.py

         print p
         assert 'py' in p
 
+    def test_get_version(self):
+        mod = self.import_extension('foo', [
+            ('get_version', 'METH_NOARGS',
+             '''
+             char* name1 = Py_GetVersion();
+             char* name2 = Py_GetVersion();
+             if (name1 != name2)
+                 Py_RETURN_FALSE;
+             return PyString_FromString(name1);
+             '''
+             ),
+            ])
+        p = mod.get_version()
+        print p
+        assert 'PyPy' in p
+
     def test_no_double_imports(self):
         import sys, os
         try:

pypy/module/cpyext/test/test_dictobject.py

 from pypy.module.cpyext.test.test_api import BaseApiTest
 from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP
 from pypy.module.cpyext.pyobject import make_ref, from_ref
+from pypy.interpreter.error import OperationError
 
 class TestDictObject(BaseApiTest):
     def test_dict(self, space, api):
 
         assert space.eq_w(space.len(w_copy), space.len(w_dict))
         assert space.eq_w(w_copy, w_dict)
+
+    def test_iterkeys(self, space, api):
+        w_dict = space.sys.getdict(space)
+        py_dict = make_ref(space, w_dict)
+
+        ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw')
+        pkey = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
+        pvalue = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
+
+        keys_w = []
+        values_w = []
+        try:
+            ppos[0] = 0
+            while api.PyDict_Next(w_dict, ppos, pkey, None):
+                w_key = from_ref(space, pkey[0])
+                keys_w.append(w_key)
+            ppos[0] = 0
+            while api.PyDict_Next(w_dict, ppos, None, pvalue):
+                w_value = from_ref(space, pvalue[0])
+                values_w.append(w_value)
+        finally:
+            lltype.free(ppos, flavor='raw')
+            lltype.free(pkey, flavor='raw')
+            lltype.free(pvalue, flavor='raw')
+
+        api.Py_DecRef(py_dict) # release borrowed references
+
+        assert space.eq_w(space.newlist(keys_w),
+                          space.call_method(w_dict, "keys"))
+        assert space.eq_w(space.newlist(values_w),
+                          space.call_method(w_dict, "values"))
+
+    def test_dictproxy(self, space, api):
+        w_dict = space.sys.get('modules')
+        w_proxy = api.PyDictProxy_New(w_dict)
+        assert space.is_true(space.contains(w_proxy, space.wrap('sys')))
+        raises(OperationError, space.setitem,
+               w_proxy, space.wrap('sys'), space.w_None)
+        raises(OperationError, space.delitem,
+               w_proxy, space.wrap('sys'))
+        raises(OperationError, space.call_method, w_proxy, 'clear')

pypy/module/cpyext/test/test_methodobject.py

 
 class AppTestMethodObject(AppTestCpythonExtensionBase):
     def test_call_METH(self):
-        mod = self.import_extension('foo', [
+        mod = self.import_extension('MyModule', [
             ('getarg_O', 'METH_O',
              '''
              Py_INCREF(args);
              }
              '''
              ),
+            ('getModule', 'METH_O',
+             '''
+             if(PyCFunction_Check(args)) {
+                 PyCFunctionObject* func = (PyCFunctionObject*)args;
+                 Py_INCREF(func->m_module);
+                 return func->m_module;
+             }
+             else {
+                 Py_RETURN_FALSE;
+             }
+             '''
+             ),
             ('isSameFunction', 'METH_O',
              '''
              PyCFunction ptr = PyCFunction_GetFunction(args);
              if (!ptr) return NULL;
-             if (ptr == foo_getarg_O)
+             if (ptr == MyModule_getarg_O)
                  Py_RETURN_TRUE;
              else
                  Py_RETURN_FALSE;
         assert mod.getarg_OLD(1, 2) == (1, 2)
 
         assert mod.isCFunction(mod.getarg_O) == "getarg_O"
+        assert mod.getModule(mod.getarg_O) == 'MyModule'
         assert mod.isSameFunction(mod.getarg_O)
         raises(TypeError, mod.isSameFunction, 1)
 

pypy/module/cpyext/test/test_object.py

         assert api.PyObject_Unicode(space.wrap("\xe9")) is None
         api.PyErr_Clear()
 
+    def test_dir(self, space, api):
+        w_dir = api.PyObject_Dir(space.sys)
+        assert space.isinstance_w(w_dir, space.w_list)
+        assert space.is_true(space.contains(w_dir, space.wrap('modules')))
+
 class AppTestObject(AppTestCpythonExtensionBase):
     def setup_class(cls):
         AppTestCpythonExtensionBase.setup_class.im_func(cls)

pypy/module/cpyext/test/test_pyfile.py

 from pypy.module.cpyext.api import fopen, fclose, fwrite
 from pypy.module.cpyext.test.test_api import BaseApiTest
+from pypy.module.cpyext.object import Py_PRINT_RAW
 from pypy.rpython.lltypesystem import rffi, lltype
 from pypy.tool.udir import udir
 import pytest
         out = out.replace('\r\n', '\n')
         assert out == "test\n"
 
+    def test_file_writeobject(self, space, api, capfd):
+        w_obj = space.wrap("test\n")
+        w_stdout = space.sys.get("stdout")
+        api.PyFile_WriteObject(w_obj, w_stdout, Py_PRINT_RAW)
+        api.PyFile_WriteObject(w_obj, w_stdout, 0)
+        space.call_method(w_stdout, "flush")
+        out, err = capfd.readouterr()
+        out = out.replace('\r\n', '\n')
+        assert out == "test\n'test\\n'"
+
+    def test_file_softspace(self, space, api, capfd):
+        w_stdout = space.sys.get("stdout")
+        assert api.PyFile_SoftSpace(w_stdout, 1) == 0
+        assert api.PyFile_SoftSpace(w_stdout, 0) == 1
+        
+        api.PyFile_SoftSpace(w_stdout, 1)
+        w_ns = space.newdict()
+        space.exec_("print 1,", w_ns, w_ns)
+        space.exec_("print 2,", w_ns, w_ns)
+        api.PyFile_SoftSpace(w_stdout, 0)
+        space.exec_("print 3", w_ns, w_ns)
+        space.call_method(w_stdout, "flush")
+        out, err = capfd.readouterr()
+        out = out.replace('\r\n', '\n')
+        assert out == " 1 23\n"

pypy/module/cpyext/test/test_pystate.py

 from pypy.module.cpyext.test.test_api import BaseApiTest
 from pypy.rpython.lltypesystem.lltype import nullptr
 from pypy.module.cpyext.pystate import PyInterpreterState, PyThreadState
+from pypy.module.cpyext.pyobject import from_ref
 
 class AppTestThreads(AppTestCpythonExtensionBase):
     def test_allow_threads(self):
 
         api.PyEval_AcquireThread(tstate)
         api.PyEval_ReleaseThread(tstate)
+
+    def test_threadstate_dict(self, space, api):
+        ts = api.PyThreadState_Get()
+        ref = ts.c_dict
+        assert ref == api.PyThreadState_GetDict()
+        w_obj = from_ref(space, ref)
+        assert space.isinstance_w(w_obj, space.w_dict)

pypy/module/cpyext/test/test_setobject.py

         w_set = api.PySet_New(space.wrap([1,2,3,4]))
         assert api.PySet_Contains(w_set, space.wrap(1))
         assert not api.PySet_Contains(w_set, space.wrap(0))
+
+    def test_set_pop_clear(self, space, api):
+        w_set = api.PySet_New(space.wrap([1,2,3,4]))
+        w_obj = api.PySet_Pop(w_set)
+        assert space.int_w(w_obj) in (1,2,3,4)
+        assert space.len_w(w_set) == 3
+        api.PySet_Clear(w_set)
+        assert space.len_w(w_set) == 0
+
+    

pypy/module/cpyext/test/test_stringobject.py

         res = module.test_string_format(1, "xyz")
         assert res == "bla 1 ble xyz\n"
 
+    def test_intern_inplace(self):
+        module = self.import_extension('foo', [
+            ("test_intern_inplace", "METH_O",
+             '''
+                 PyObject *s = args;
+                 Py_INCREF(s);
+                 PyString_InternInPlace(&s);
+                 return s;
+             '''
+             )
+            ])
+        # This does not test much, but at least the refcounts are checked.
+        assert module.test_intern_inplace('s') == 's'
+
 class TestString(BaseApiTest):
     def test_string_resize(self, space, api):
         py_str = new_empty_str(space, 10)

pypy/module/cpyext/test/test_typeobject.py

             ''')
         obj = module.new_obj()
         raises(ZeroDivisionError, obj.__setitem__, 5, None)
+
+    def test_tp_iter(self):
+        module = self.import_extension('foo', [
+           ("tp_iter", "METH_O",
+            '''
+                 if (!args->ob_type->tp_iter)
+                 {
+                     PyErr_SetNone(PyExc_ValueError);
+                     return NULL;
+                 }
+                 return args->ob_type->tp_iter(args);
+             '''
+             ),
+           ("tp_iternext", "METH_O",
+            '''
+                 if (!args->ob_type->tp_iternext)
+                 {
+                     PyErr_SetNone(PyExc_ValueError);
+                     return NULL;
+                 }
+                 return args->ob_type->tp_iternext(args);
+             '''
+             )
+            ])
+        l = [1]
+        it = module.tp_iter(l)
+        assert type(it) is type(iter([]))
+        assert module.tp_iternext(it) == 1
+        raises(StopIteration, module.tp_iternext, it)

pypy/module/cpyext/test/test_unicodeobject.py

         w_seq = space.wrap([u'a', u'b'])
         w_joined = api.PyUnicode_Join(w_sep, w_seq)
         assert space.unwrap(w_joined) == u'a<sep>b'
+
+    def test_fromordinal(self, space, api):
+        w_char = api.PyUnicode_FromOrdinal(65)
+        assert space.unwrap(w_char) == u'A'
+        w_char = api.PyUnicode_FromOrdinal(0)
+        assert space.unwrap(w_char) == u'\0'
+        w_char = api.PyUnicode_FromOrdinal(0xFFFF)
+        assert space.unwrap(w_char) == u'\uFFFF'
+
+    def test_replace(self, space, api):
+        w_str = space.wrap(u"abababab")
+        w_substr = space.wrap(u"a")
+        w_replstr = space.wrap(u"z")
+        assert u"zbzbabab" == space.unwrap(
+            api.PyUnicode_Replace(w_str, w_substr, w_replstr, 2))
+        assert u"zbzbzbzb" == space.unwrap(
+            api.PyUnicode_Replace(w_str, w_substr, w_replstr, -1))

pypy/module/cpyext/unicodeobject.py

     w_str = space.wrap(rffi.charpsize2str(s, size))
     return space.call_method(w_str, 'decode', space.wrap("utf-8"))
 
+@cpython_api([rffi.INT_real], PyObject)
+def PyUnicode_FromOrdinal(space, ordinal):
+    """Create a Unicode Object from the given Unicode code point ordinal.
+
+    The ordinal must be in range(0x10000) on narrow Python builds
+    (UCS2), and range(0x110000) on wide builds (UCS4). A ValueError is
+    raised in case it is not."""
+    w_ordinal = space.wrap(rffi.cast(lltype.Signed, ordinal))
+    return space.call_function(space.builtin.get('unichr'), w_ordinal)
+
 @cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1)
 def PyUnicode_Resize(space, ref, newsize):
     # XXX always create a new string so far
 
 @cpython_api([PyObject, PyObject], PyObject)
 def PyUnicode_Join(space, w_sep, w_seq):
-    """Join a sequence of strings using the given separator and return the resulting
-    Unicode string."""
+    """Join a sequence of strings using the given separator and return
+    the resulting Unicode string."""
     return space.call_method(w_sep, 'join', w_seq)
+
+@cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject)
+def PyUnicode_Replace(space, w_str, w_substr, w_replstr, maxcount):
+    """Replace at most maxcount occurrences of substr in str with replstr and
+    return the resulting Unicode object. maxcount == -1 means replace all
+    occurrences."""
+    return space.call_method(w_str, "replace", w_substr, w_replstr,
+                             space.wrap(maxcount))
+

pypy/module/micronumpy/__init__.py

         ("arccos", "arccos"),
         ("arcsin", "arcsin"),
         ("arctan", "arctan"),
+        ("arccosh", "arccosh"),