Commits

Philip Jenvey committed 3f146e4 Merge

merge default

  • Participants
  • Parent commits 05832d0, 620c44f
  • Branches py3k

Comments (0)

Files changed (44)

lib-python/2.7/sysconfig.py

         'data'   : '{base}',
         },
     'pypy': {
-        'stdlib': '{base}/lib-python',
-        'platstdlib': '{base}/lib-python',
-        'purelib': '{base}/lib-python',
-        'platlib': '{base}/lib-python',
+        'stdlib': '{base}/lib-python/{py_version_short}',
+        'platstdlib': '{base}/lib-python/{py_version_short}',
+        'purelib': '{base}/lib-python/{py_version_short}',
+        'platlib': '{base}/lib-python/{py_version_short}',
         'include': '{base}/include',
         'platinclude': '{base}/include',
         'scripts': '{base}/bin',

lib-python/2.7/test/test_capi.py

     threading = None
 import _testcapi
 
+skips = []
+if test_support.check_impl_detail(pypy=True):
+    skips += [
+            'test_broken_memoryview',
+            'test_capsule',
+            'test_lazy_hash_inheritance',
+            'test_widechar',
+            'TestThreadState',
+            'TestPendingCalls',
+            ]
+
 @unittest.skipUnless(threading, 'Threading required for this test.')
 class TestPendingCalls(unittest.TestCase):
 
 def test_main():
 
     for name in dir(_testcapi):
-        if name.startswith('test_'):
+        if name.startswith('test_') and name not in skips:
             test = getattr(_testcapi, name)
             if test_support.verbose:
                 print "internal", name
             raise test_support.TestFailed, \
                   "Couldn't find main thread correctly in the list"
 
-    if threading:
+    if threading and 'TestThreadState' not in skips:
         import thread
         import time
         TestThreadState()
         t.start()
         t.join()
 
-    test_support.run_unittest(TestPendingCalls)
+    if 'TestPendingCalls' not in skips:
+        test_support.run_unittest(TestPendingCalls)
 
 if __name__ == "__main__":
     test_main()

lib-python/2.7/test/test_multiprocessing.py

 
         # Because we are using xmlrpclib for serialization instead of
         # pickle this will cause a serialization error.
-        self.assertRaises(Exception, queue.put, time.sleep)
+        self.assertRaises(Exception, queue.put, object)
 
         # Make queue finalizer run before the server is stopped
         del queue
         if not gc.isenabled():
             gc.enable()
             self.addCleanup(gc.disable)
-        thresholds = gc.get_threshold()
-        self.addCleanup(gc.set_threshold, *thresholds)
-        gc.set_threshold(10)
+        #thresholds = gc.get_threshold()
+        #self.addCleanup(gc.set_threshold, *thresholds)
+        #gc.set_threshold(10)
 
         # perform numerous block allocations, with cyclic references to make
         # sure objects are collected asynchronously by the gc
     def test_synchronize(self):
         self.test_sharedctypes(lock=True)
 
+    @unittest.skipUnless(test_support.check_impl_detail(pypy=False), "pypy ctypes differences")
     def test_copy(self):
         foo = _Foo(2, 5.0)
         bar = copy(foo)

lib-python/2.7/test/test_zipfile.py

             self.assertEqual(data1, '1'*FIXEDTEST_SIZE)
             self.assertEqual(data2, '2'*FIXEDTEST_SIZE)
 
+    def test_many_opens(self):
+        # Verify that read() and open() promptly close the file descriptor,
+        # and don't rely on the garbage collector to free resources.
+        with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
+            for x in range(100):
+                zipf.read('ones')
+                with zipf.open('ones') as zopen1:
+                    pass
+        for x in range(10):
+            self.assertLess(open('/dev/null').fileno(), 100)
+
     def tearDown(self):
         unlink(TESTFN2)
 

lib-python/2.7/zipfile.py

     # Search for universal newlines or line chunks.
     PATTERN = re.compile(r'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
 
-    def __init__(self, fileobj, mode, zipinfo, decrypter=None):
+    def __init__(self, fileobj, mode, zipinfo, decrypter=None,
+                 close_fileobj=False):
         self._fileobj = fileobj
         self._decrypter = decrypter
+        self._close_fileobj = close_fileobj
 
         self._compress_type = zipinfo.compress_type
         self._compress_size = zipinfo.compress_size
         self._offset += len(data)
         return data
 
+    def close(self):
+        try:
+            if self._close_fileobj:
+                self._fileobj.close()
+        finally:
+            super(ZipExtFile, self).close()
 
 
 class ZipFile:
 
     def read(self, name, pwd=None):
         """Return file bytes (as a string) for name."""
-        return self.open(name, "r", pwd).read()
+        with self.open(name, "r", pwd) as fp:
+            return fp.read()
 
     def open(self, name, mode="r", pwd=None):
         """Return file-like object for 'name'."""
             zinfo = name
         else:
             # Get info object for name
-            zinfo = self.getinfo(name)
-
+            try:
+                zinfo = self.getinfo(name)
+            except KeyError:
+                if not self._filePassed:
+                    zef_file.close()
+                raise
         zef_file.seek(zinfo.header_offset, 0)
 
         # Skip the file header:
             zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
 
         if fname != zinfo.orig_filename:
+            if not self._filePassed:
+                zef_file.close()
             raise BadZipfile, \
                       'File name in directory "%s" and header "%s" differ.' % (
                           zinfo.orig_filename, fname)
             if not pwd:
                 pwd = self.pwd
             if not pwd:
+                if not self._filePassed:
+                    zef_file.close()
                 raise RuntimeError, "File %s is encrypted, " \
                       "password required for extraction" % name
 
                 # compare against the CRC otherwise
                 check_byte = (zinfo.CRC >> 24) & 0xff
             if ord(h[11]) != check_byte:
+                if not self._filePassed:
+                    zef_file.close()
                 raise RuntimeError("Bad password for file", name)
 
-        return  ZipExtFile(zef_file, mode, zinfo, zd)
+        return ZipExtFile(zef_file, mode, zinfo, zd,
+                          close_fileobj=not self._filePassed)
 
     def extract(self, member, path=None, pwd=None):
         """Extract a member from the archive to the current working directory,
             return targetpath
 
         source = self.open(member, pwd=pwd)
-        target = file(targetpath, "wb")
+        target = open(targetpath, "wb")
         shutil.copyfileobj(source, target)
         source.close()
         target.close()

lib-python/conftest.py

     RegrTest('test_bz2.py', usemodules='bz2'),
     RegrTest('test_calendar.py'),
     RegrTest('test_call.py', core=True),
-    RegrTest('test_capi.py', skip="not applicable"),
+    RegrTest('test_capi.py'),
     RegrTest('test_cfgparser.py'),
     RegrTest('test_cgi.py'),
     RegrTest('test_charmapcodec.py', core=True),
     RegrTest('test_modulefinder.py'),
     RegrTest('test_msilib.py'),
     RegrTest('test_multibytecodec.py', usemodules='_multibytecodec'),
-    RegrTest('test_multiprocessing.py', skip=True),
+    RegrTest('test_multiprocessing.py'),
     RegrTest('test_mutants.py', core="possibly"),
     RegrTest('test_netrc.py'),
     RegrTest('test_nis.py'),

lib_pypy/_ctypes_test.py

 import os, sys
 import tempfile
-import gc
 
 def compile_shared():
     """Compile '_ctypes_test.c' into an extension module, and import it
     if sys.platform == 'win32':
         ccflags = ['-D_CRT_SECURE_NO_WARNINGS']
     else:
-        ccflags = ['-fPIC']
+        ccflags = ['-fPIC', '-Wimplicit-function-declaration']
     res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')],
                            include_dirs=[include_dir],
                            extra_preargs=ccflags)
             library = os.path.join(thisdir, '..', 'include', 'python27')
         if not os.path.exists(library + '.lib'):
             # For a local translation
-            library = os.path.join(thisdir, '..', 'pypy', 'translator',
-                    'goal', 'libpypy-c')
+            library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c')
         libraries = [library, 'oleaut32']
-        extra_ldargs = ['/MANIFEST'] # needed for VC10
+        extra_ldargs = ['/MANIFEST',  # needed for VC10
+                        '/EXPORT:init_ctypes_test']
     else:
         libraries = []
         extra_ldargs = []

lib_pypy/_testcapi.py

             library = os.path.join(thisdir, '..', 'include', 'python27')
         if not os.path.exists(library + '.lib'):
             # For a local translation
-            library = os.path.join(thisdir, '..', 'pypy', 'translator',
-                    'goal', 'libpypy-c')
+            library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c')
         libraries = [library, 'oleaut32']
         extra_ldargs = ['/MANIFEST',  # needed for VC10
                         '/EXPORT:init_testcapi']

pypy/bin/checkmodule.py

 """
 import sys, os
 
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
+
 from pypy.objspace.fake.checkmodule import checkmodule
 
 def main(argv):

pypy/doc/cpython_differences.rst

   type and vice versa. For builtin types, a dictionary will be returned that
   cannot be changed (but still looks and behaves like a normal dictionary).
 
-* the ``__len__`` or ``__length_hint__`` special methods are sometimes
-  called by CPython to get a length estimate to preallocate internal arrays.
-  So far, PyPy never calls ``__len__`` for this purpose, and never calls
-  ``__length_hint__`` at all.
-
 
 .. include:: _ref.txt

pypy/doc/discussion/improve-rpython.rst

     from pypy.interpreter.typedef import interp_attrproperty, interp_attrproperty_w
     from pypy.interpreter.gateway import interp2app
     from pypy.interpreter.error import OperationError
-    from pypy.rpython.lltypesystem import rffi, lltype
+    from rpython.rtyper.lltypesystem import rffi, lltype
 
 - A more direct declarative way to write Typedef::
 
 Do I have to rewrite my programs in RPython?
 --------------------------------------------
 
-No.  And you shouldn't try.  PyPy always runs your code in its own interpreter, which is a
-full and compliant Python 2.7 interpreter.  RPython is only the
-language in which parts of PyPy itself are written and extension
-modules for it.  Not only is it not necessary for you to rewrite your
-code in RPython, it probably won't give you any speed improvements if you 
-try.
+No.  And you shouldn't try.  First and foremost, RPython is a language
+that is designed to write interpreters in.  It is a restricted subset of
+Python.  If you program is not an interpreter but tries to do "real
+things", like use *any* part of the standard Python library or *any*
+3rd-party library, then it is not RPython to start with.  You should
+only look at RPython if you try to `write your own interpreter`__.
+
+.. __: `how do I compile my own interpreters`_
+
+If your goal is to speed up Python code, then look at the regular PyPy,
+which is a full and compliant Python 2.7 interpreter (which happens to
+be written in RPython).  Not only is it not necessary for you to rewrite
+your code in RPython, it might not give you any speed improvements even
+if you manage to.
+
+Yes, it is possible with enough effort to compile small self-contained
+pieces of RPython code doing a few performance-sensitive things.  But
+this case is not interesting for us.  If you needed to rewrite the code
+in RPython, you could as well have rewritten it in C for example.  The
+latter is a much more supported, much more documented language `:-)`
 
 ---------------------------------------------------
 Which backends are there for the RPython toolchain?

pypy/doc/rffi.rst

 lltype.Signed or lltype.Array) and memory management must be done
 by hand. To declare a function, we write::
 
-  from pypy.rpython.lltypesystem import rffi
+  from rpython.rtyper.lltypesystem import rffi
 
   external_function = rffi.llexternal(name, args, result)
 
 libraries and sources by passing in the optional ``compilation_info``
 parameter::
 
-  from pypy.rpython.lltypesystem import rffi
+  from rpython.rtyper.lltypesystem import rffi
   from rpython.translator.tool.cbuild import ExternalCompilationInfo
 
   info = ExternalCompilationInfo(includes=[], libraries=[])

pypy/interpreter/baseobjspace.py

                     )
                 raise
             w_fd = self.call_function(w_fileno)
-            if not self.isinstance_w(w_fd, self.w_int):
+            if (not self.isinstance_w(w_fd, self.w_int) and
+                not self.isinstance_w(w_fd, self.w_long)):
                 raise OperationError(self.w_TypeError,
-                    self.wrap("fileno() must return an integer")
+                    self.wrap("fileno() returned a non-integer")
                 )
         fd = self.int_w(w_fd)
         if fd < 0:

pypy/interpreter/miscutils.py

     def setvalue(self, value):
         self._value = value
 
-    def getmainthreadvalue(self):
-        return self._value
+    def ismainthread(self):
+        return True
 
     def getallvalues(self):
         return {0: self._value}
-

pypy/module/_bisect/interp_bisect.py

 from pypy.interpreter.error import OperationError
 from pypy.interpreter.gateway import unwrap_spec
+from rpython.rlib.rarithmetic import intmask, r_uint
 
 
 @unwrap_spec(lo=int, hi=int)
     if hi == -1:
         hi = space.len_w(w_a)
     while lo < hi:
-        mid = (lo + hi) >> 1
+        mid = intmask((r_uint(lo) + r_uint(hi)) >> 1)
         w_litem = space.getitem(w_a, space.wrap(mid))
         if space.is_true(space.lt(w_litem, w_x)):
             lo = mid + 1
     if hi == -1:
         hi = space.len_w(w_a)
     while lo < hi:
-        mid = (lo + hi) >> 1
+        mid = intmask((r_uint(lo) + r_uint(hi)) >> 1)
         w_litem = space.getitem(w_a, space.wrap(mid))
         if space.is_true(space.lt(w_x, w_litem)):
             hi = mid

pypy/module/_bisect/test/test_bisect.py

         insort_right(a, 6.0)
         assert a == [0, 5, 6, 6, 6, 6.0, 7]
         assert list(map(type, a)) == [int, int, int, int, int, float, int]
+
+    def test_bisect_overflow(self):
+        from _bisect import bisect_left, bisect_right
+        import sys
+
+        size = sys.maxsize
+        data = range(size - 1)
+        assert bisect_left(data, size - 3) == size - 3
+        assert bisect_right(data, size - 3) == size - 2

pypy/module/_io/interp_fileio.py

 
         self.readable, self.writable, append, flags = decode_mode(space, mode)
 
-        if fd >= 0:
-            verify_fd(fd)
-            try:
-                os.fstat(fd)
-            except OSError, e:
-                if e.errno == errno.EBADF:
-                    raise wrap_oserror(space, e)
-                # else: pass
-            self.fd = fd
-            self.closefd = bool(closefd)
-        else:
-            if not closefd:
-                raise OperationError(space.w_ValueError, space.wrap(
-                    "Cannot use closefd=False with file name"))
-            self.closefd = True
+        fd_is_own = False
+        try:
+            if fd >= 0:
+                verify_fd(fd)
+                try:
+                    os.fstat(fd)
+                except OSError, e:
+                    if e.errno == errno.EBADF:
+                        raise wrap_oserror(space, e)
+                    # else: pass
+                self.fd = fd
+                self.closefd = bool(closefd)
+            else:
+                self.closefd = True
+                if not closefd:
+                    raise OperationError(space.w_ValueError, space.wrap(
+                        "Cannot use closefd=False with file name"))
 
-            from pypy.module.posix.interp_posix import (
-                dispatch_filename, rposix)
-            try:
-                self.fd = dispatch_filename(rposix.open)(
-                    space, w_name, flags, 0666)
-            except OSError, e:
-                raise wrap_oserror2(space, e, w_name,
-                                    exception_name='w_IOError')
+                from pypy.module.posix.interp_posix import (
+                    dispatch_filename, rposix)
+                try:
+                    self.fd = dispatch_filename(rposix.open)(
+                        space, w_name, flags, 0666)
+                except OSError, e:
+                    raise wrap_oserror2(space, e, w_name,
+                                        exception_name='w_IOError')
+                finally:
+                    fd_is_own = True
 
             self._dircheck(space, w_name)
-        self.w_name = w_name
+            self.w_name = w_name
 
-        if append:
-            # For consistent behaviour, we explicitly seek to the end of file
-            # (otherwise, it might be done only on the first write()).
-            try:
-                os.lseek(self.fd, 0, os.SEEK_END)
-            except OSError, e:
-                raise wrap_oserror(space, e, exception_name='w_IOError')
+            if append:
+                # For consistent behaviour, we explicitly seek to the end of file
+                # (otherwise, it might be done only on the first write()).
+                try:
+                    os.lseek(self.fd, 0, os.SEEK_END)
+                except OSError, e:
+                    raise wrap_oserror(space, e, exception_name='w_IOError')
+        except:
+            if not fd_is_own:
+                self.fd = -1
+            raise
 
     def _mode(self):
         if self.readable:
         except OSError:
             return
         if stat.S_ISDIR(st.st_mode):
-            self._close(space)
             raise wrap_oserror2(space, OSError(errno.EISDIR, "fstat"),
                                 w_filename, exception_name='w_IOError')
 

pypy/module/_io/test/test_fileio.py

 
     def test_open_directory(self):
         import _io
+        import os
         raises(IOError, _io.FileIO, self.tmpdir, "rb")
+        if os.name != 'nt':
+            fd = os.open(self.tmpdir, os.O_RDONLY)
+            raises(IOError, _io.FileIO, fd, "rb")
+            os.close(fd)
 
     def test_readline(self):
         import _io

pypy/module/cpyext/longobject.py

     Return a C unsigned long representation of the contents of pylong.
     If pylong is greater than ULONG_MAX, an OverflowError is
     raised."""
-    return rffi.cast(rffi.ULONG, space.uint_w(w_long))
+    try:
+        return rffi.cast(rffi.ULONG, space.uint_w(w_long))
+    except OperationError, e:
+        if e.match(space, space.w_ValueError):
+            e.w_type = space.w_OverflowError
+        raise
 
 @cpython_api([PyObject], rffi.ULONG, error=-1)
 def PyLong_AsUnsignedLongMask(space, w_long):
     Return a C unsigned long representation of the contents of pylong.
     If pylong is greater than ULONG_MAX, an OverflowError is
     raised."""
-    return rffi.cast(rffi.ULONGLONG, space.r_ulonglong_w(w_long))
+    try:
+        return rffi.cast(rffi.ULONGLONG, space.r_ulonglong_w(w_long))
+    except OperationError, e:
+        if e.match(space, space.w_ValueError):
+            e.w_type = space.w_OverflowError
+        raise
 
 @cpython_api([PyObject], rffi.ULONGLONG, error=-1)
 def PyLong_AsUnsignedLongLongMask(space, w_long):

pypy/module/cpyext/object.py

 
 @cpython_api([PyObject], PyObject)
 def PyObject_Str(space, w_obj):
+    if w_obj is None:
+        return space.wrap("<NULL>")
     return space.str(w_obj)
 
 @cpython_api([PyObject], PyObject)
     representation on success, NULL on failure.  This is the equivalent of the
     Python expression repr(o).  Called by the repr() built-in function and
     by reverse quotes."""
+    if w_obj is None:
+        return space.wrap("<NULL>")
     return space.repr(w_obj)
 
 @cpython_api([PyObject], PyObject)
     string representation on success, NULL on failure. This is the equivalent of
     the Python expression unicode(o).  Called by the unicode() built-in
     function."""
+    if w_obj is None:
+        return space.wrap(u"<NULL>")
     return space.call_function(space.w_unicode, w_obj)
 
 @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject)

pypy/module/cpyext/test/test_longobject.py

         value = api.PyLong_AsUnsignedLong(w_value)
         assert value == (sys.maxint - 1) * 2
 
+        self.raises(space, api, OverflowError, api.PyLong_AsUnsignedLong, space.wrap(-1))
+
     def test_as_ssize_t(self, space, api):
         w_value = space.newlong(2)
         value = api.PyLong_AsSsize_t(w_value)
         w_l = space.wrap(sys.maxint + 1)
         assert api.PyLong_Check(w_l)
         assert api.PyLong_CheckExact(w_l)
-        
+
         w_i = space.wrap(sys.maxint)
         assert not api.PyLong_Check(w_i)
         assert not api.PyLong_CheckExact(w_i)
-        
+
         L = space.appexec([], """():
             class L(long):
                 pass
         assert api.PyLong_AsUnsignedLongLongMask(
             space.wrap(1<<64)) == 0
 
+        self.raises(space, api, OverflowError, api.PyLong_AsUnsignedLongLong, space.wrap(-1))
+
     def test_as_long_and_overflow(self, space, api):
         overflow = lltype.malloc(rffi.CArrayPtr(rffi.INT_real).TO, 1, flavor='raw')
         assert api.PyLong_AsLongAndOverflow(

pypy/module/cpyext/test/test_object.py

     def test_size(self, space, api):
         assert api.PyObject_Size(space.newlist([space.w_None])) == 1
 
+    def test_str(self, space, api):
+        w_list = space.newlist([space.w_None, space.wrap(42)])
+        assert space.str_w(api.PyObject_Str(None)) == "<NULL>"
+        assert space.str_w(api.PyObject_Str(w_list)) == "[None, 42]"
+        assert space.str_w(api.PyObject_Str(space.wrap("a"))) == "a"
+
     def test_repr(self, space, api):
         w_list = space.newlist([space.w_None, space.wrap(42)])
+        assert space.str_w(api.PyObject_Repr(None)) == "<NULL>"
         assert space.str_w(api.PyObject_Repr(w_list)) == "[None, 42]"
         assert space.str_w(api.PyObject_Repr(space.wrap("a"))) == "'a'"
 
-        w_list = space.newlist([space.w_None, space.wrap(42)])
-        assert space.str_w(api.PyObject_Str(w_list)) == "[None, 42]"
-        assert space.str_w(api.PyObject_Str(space.wrap("a"))) == "a"
-
     def test_RichCompare(self, space, api):
         def compare(w_o1, w_o2, opid):
             res = api.PyObject_RichCompareBool(w_o1, w_o2, opid)

pypy/module/fcntl/test/test_fcntl.py

         import sys
         import struct
 
+        class F:
+            def __init__(self, fn):
+                self.fn = fn
+            def fileno(self):
+                return self.fn
+
         f = open(self.tmp + "b", "w+")
 
         fcntl.fcntl(f, 1, 0)
         fcntl.fcntl(f, 1)
+        fcntl.fcntl(F(long(f.fileno())), 1)
         raises(TypeError, fcntl.fcntl, "foo")
         raises(TypeError, fcntl.fcntl, f, "foo")
-        raises((IOError, ValueError), fcntl.fcntl, -1, 1, 0)
+        raises(TypeError, fcntl.fcntl, F("foo"), 1)
+        raises(ValueError, fcntl.fcntl, -1, 1, 0)
+        raises(ValueError, fcntl.fcntl, F(-1), 1, 0)
+        raises(ValueError, fcntl.fcntl, F(long(-1)), 1, 0)
         assert fcntl.fcntl(f, 1, 0) == 0
         assert fcntl.fcntl(f, 2, "foo") == b"foo"
         assert fcntl.fcntl(f, 2, memoryview(b"foo")) == b"foo"

pypy/module/signal/interp_signal.py

     A signal handler function is called with two arguments:
     the first is the signal number, the second is the interrupted stack frame.
     """
-    ec = space.getexecutioncontext()
-    main_ec = space.threadlocals.getmainthreadvalue()
-
-    old_handler = getsignal(space, signum)
-
-    if ec is not main_ec:
+    if not space.threadlocals.ismainthread():
         raise OperationError(space.w_ValueError,
                              space.wrap("signal() must be called from the "
                                         "main thread"))
+    old_handler = getsignal(space, signum)
+
     action = space.check_signal_action
     if space.eq_w(w_handler, space.wrap(SIG_DFL)):
         pypysig_default(signum)
 
     The fd must be non-blocking.
     """
-    if space.config.objspace.usemodules.thread:
-        main_ec = space.threadlocals.getmainthreadvalue()
-        ec = space.getexecutioncontext()
-        if ec is not main_ec:
-            raise OperationError(
-                space.w_ValueError,
-                space.wrap("set_wakeup_fd only works in main thread"))
+    if not space.threadlocals.ismainthread():
+        raise OperationError(
+            space.w_ValueError,
+            space.wrap("set_wakeup_fd only works in main thread"))
     old_fd = pypysig_set_wakeup_fd(fd)
     return space.wrap(intmask(old_fd))
 

pypy/module/thread/os_lock.py

File contents unchanged.

pypy/module/thread/test/support.py

 import time
 import thread
 import os
+import errno
 
 from pypy.interpreter.gateway import interp2app, unwrap_spec
 from pypy.module.thread import gil
     def kill():
         for x in range(delay * 10):
             time.sleep(0.1)
-            os.kill(pid, 0)
+            try:
+                os.kill(pid, 0)
+            except OSError, e:
+                if e.errno == errno.ESRCH: # no such process
+                    return
+                raise
         os.kill(pid, 9)
         print "process %s killed!" % (pid,)
     thread.start_new_thread(kill, ())

pypy/module/thread/test/test_fork.py

 from pypy.module.thread.test.support import GenericTestThread
 
 class AppTestFork(GenericTestThread):
-    def test_fork(self):
+    def test_fork_with_thread(self):
         # XXX This test depends on a multicore machine, as busy_thread must
         # aquire the GIL the instant that the main thread releases it.
         # It will incorrectly pass if the GIL is not grabbed in time.
         if not hasattr(os, 'fork'):
             skip("No fork on this platform")
 
-        run = True
-        done = []
         def busy_thread():
             while run:
                 time.sleep(0)
             done.append(None)
 
-        try:
-            _thread.start_new(busy_thread, ())
+        for i in range(1):
+            run = True
+            done = []
+            try:
+                thread.start_new(busy_thread, ())
+                print('sleep')
 
-            pid = os.fork()
-
-            if pid == 0:
-                os._exit(0)
-
-            else:
-                time.sleep(1)
-                spid, status = os.waitpid(pid, os.WNOHANG)
-                assert spid == pid
-        finally:
-            run = False
-            self.waitfor(lambda: done)
+                pid = os.fork()
+                if pid == 0:
+                    os._exit(0)
+                else:
+                    self.timeout_killer(pid, 5)
+                    exitcode = os.waitpid(pid, 0)[1]
+                    assert exitcode == 0 # if 9, process was killed by timer!
+            finally:
+                run = False
+                self.waitfor(lambda: done)
+                assert done
 
     def test_forked_can_thread(self):
         "Checks that a forked interpreter can start a thread"
-        import os, _thread, time
+        import _thread
+        import os
 
         if not hasattr(os, 'fork'):
             skip("No fork on this platform")
 
-        # pre-allocate some locks
-        _thread.start_new_thread(lambda: None, ())
+        for i in range(10):
+            # pre-allocate some locks
+            _thread.start_new_thread(lambda: None, ())
+            print('sleep')
 
-        pid = os.fork()
-        if pid == 0:
-            print('in child')
-            _thread.start_new_thread(lambda: None, ())
-            os._exit(0)
-        else:
-            self.timeout_killer(pid, 5)
-            exitcode = os.waitpid(pid, 0)[1]
-            assert exitcode == 0 # if 9, process was killed by timer!
+            pid = os.fork()
+            if pid == 0:
+                _thread.start_new_thread(lambda: None, ())
+                os._exit(0)
+            else:
+                self.timeout_killer(pid, 5)
+                exitcode = os.waitpid(pid, 0)[1]
+                assert exitcode == 0 # if 9, process was killed by timer!

pypy/module/thread/test/test_lock.py

         assert lock.locked() is False
         raises(_thread.error, lock.release)
         assert lock.locked() is False
-        lock.acquire()
+        r = lock.acquire()
+        assert r is True
+        r = lock.acquire(False)
+        assert r is False
         assert lock.locked() is True
         lock.release()
         assert lock.locked() is False

pypy/module/thread/threadlocals.py

         self._mostrecentkey = ident
         self._mostrecentvalue = value
 
-    def getmainthreadvalue(self):
-        ident = self._mainthreadident
-        return self._valuedict.get(ident, None)
-
     def ismainthread(self):
         return thread.get_ident() == self._mainthreadident
 

rpython/flowspace/framestate.py

+from rpython.flowspace.model import Variable, Constant
 from rpython.rlib.unroll import SpecTag
-from rpython.flowspace.model import *
+
 
 class FrameState:
     def __init__(self, mergeable, blocklist, next_instr):
     raise TypeError('union of %r and %r' % (w1.__class__.__name__,
                                             w2.__class__.__name__))
 
+
 # ____________________________________________________________
 #
 # We have to flatten out the state of the frame into a list of
 PICKLE_TAGS = {}
 UNPICKLE_TAGS = {}
 
+
 def recursively_flatten(space, lst):
     from rpython.flowspace.flowcontext import SuspendedUnroller
     i = 0
             except:
                 tag = PICKLE_TAGS[key] = Constant(PickleTag())
                 UNPICKLE_TAGS[tag] = key
-            lst[i:i+1] = [tag] + vars
+            lst[i:i + 1] = [tag] + vars
+
 
 def recursively_unflatten(space, lst):
-    for i in xrange(len(lst)-1, -1, -1):
+    for i in xrange(len(lst) - 1, -1, -1):
         item = lst[i]
         if item in UNPICKLE_TAGS:
             unrollerclass, argcount = UNPICKLE_TAGS[item]
-            arguments = lst[i+1: i+1+argcount]
-            del lst[i+1: i+1+argcount]
+            arguments = lst[i + 1:i + 1 + argcount]
+            del lst[i + 1:i + 1 + argcount]
             unroller = unrollerclass.state_pack_variables(space, *arguments)
             lst[i] = unroller

rpython/jit/metainterp/optimizeopt/unroll.py

-from rpython.jit.codewriter.effectinfo import EffectInfo
+import sys
+
+from rpython.jit.metainterp.compile import ResumeGuardDescr
+from rpython.jit.metainterp.history import TargetToken, JitCellToken, Const
+from rpython.jit.metainterp.inliner import Inliner
+from rpython.jit.metainterp.optimize import InvalidLoop
+from rpython.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds
+from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer, Optimization
 from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes, BadVirtualState
-from rpython.jit.metainterp.compile import ResumeGuardDescr
-from rpython.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken
-from rpython.jit.metainterp.jitexc import JitException
-from rpython.jit.metainterp.optimize import InvalidLoop
-from rpython.rlib.debug import debug_print, debug_start, debug_stop
-from rpython.jit.metainterp.optimizeopt.optimizer import *
-from rpython.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds
-from rpython.jit.metainterp.inliner import Inliner
 from rpython.jit.metainterp.resoperation import rop, ResOperation
 from rpython.jit.metainterp.resume import Snapshot
-import sys, os
+from rpython.rlib.debug import debug_print, debug_start, debug_stop
+
 
 # FIXME: Introduce some VirtualOptimizer super class instead
 
     opt.inline_short_preamble = inline_short_preamble
     opt.propagate_all_forward()
 
+
 class UnrollableOptimizer(Optimizer):
     def setup(self):
         self.importable_values = {}
     distinction anymore)"""
 
     inline_short_preamble = True
-    
+
     def __init__(self, metainterp_sd, loop, optimizations):
         self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations)
         self.boxes_created_this_iteration = None
     def fix_snapshot(self, jump_args, snapshot):
         if snapshot is None:
             return None
-        snapshot_args = snapshot.boxes 
+        snapshot_args = snapshot.boxes
         new_snapshot_args = []
         for a in snapshot_args:
             a = self.getvalue(a).get_key_box()
             new_snapshot_args.append(a)
         prev = self.fix_snapshot(jump_args, snapshot.prev)
         return Snapshot(prev, new_snapshot_args)
-            
+
     def propagate_all_forward(self):
         loop = self.optimizer.loop
         self.optimizer.clear_newoperations()
             # will clear heap caches
             self.optimizer.send_extra_operation(start_label)
         else:
-            start_label = None            
+            start_label = None
 
         jumpop = loop.operations[-1]
         if jumpop.getopnum() == rop.JUMP or jumpop.getopnum() == rop.LABEL:
         assert isinstance(cell_token, JitCellToken)
         stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token))
 
-        
         if jumpop.getopnum() == rop.JUMP:
             if self.jump_to_already_compiled_trace(jumpop):
                 # Found a compiled trace to jump to
                     return
 
         # Found nothing to jump to, emit a label instead
-        
+
         if self.short:
             # Construct our short preamble
             assert start_label
     def jump_to_start_label(self, start_label, stop_label):
         if not start_label or not stop_label:
             return False
-        
+
         stop_target = stop_label.getdescr()
         start_target = start_label.getdescr()
         assert isinstance(stop_target, TargetToken)
         #virtual_state = modifier.get_virtual_state(args)
         #if self.initial_virtual_state.generalization_of(virtual_state):
         #    return True
-        
 
     def export_state(self, targetop):
         original_jump_args = targetop.getarglist()
 
         modifier = VirtualStateAdder(self.optimizer)
         virtual_state = modifier.get_virtual_state(jump_args)
-            
+
         values = [self.getvalue(arg) for arg in jump_args]
         inputargs = virtual_state.make_inputargs(values, self.optimizer)
         short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True)
 
-
         if self.boxes_created_this_iteration is not None:
             for box in self.inputargs:
                 self.boxes_created_this_iteration[box] = True
             if op and op.result:
                 box = op.result
                 exported_values[box] = self.optimizer.getvalue(box)
-            
+
         target_token.exported_state = ExportedState(short_boxes, inputarg_setup_ops,
                                                     exported_values)
 
             virtual_state = modifier.get_virtual_state(self.inputargs)
             self.initial_virtual_state = virtual_state
             return
-        
+
         self.short = target_token.short_preamble[:]
         self.short_seen = {}
         self.short_boxes = exported_state.short_boxes
                     #if self.optimizer.loop.logops:
                     #    debug_print('  Falling back to add extra: ' +
                     #                self.optimizer.loop.logops.repr_of_resop(op))
-                    
+
         self.optimizer.flush()
         self.optimizer.emitting_dissabled = False
 
         # We dont need to inline the short preamble we are creating as we are conneting
         # the bridge to a different trace with a different short preamble
         self.short_inliner = None
-        
+
         newoperations = self.optimizer.get_newoperations()
         self.boxes_created_this_iteration = {}
         i = 0
                                       'same box passed to multiple of its ' +
                                       'inputargs, but the jump at the ' +
                                       'end of this bridge does not do that.')
-                                      
+
             args[short_inputargs[i]] = jmp_to_short_args[i]
         self.short_inliner = Inliner(short_inputargs, jmp_to_short_args)
         i = 1
                               'loop is not compatible with the virtual ' +
                               'state at the start of the loop which makes ' +
                               'it impossible to close the loop')
-            
+
         #debug_stop('jit-log-virtualstate')
 
         maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards
             target_token = jumpop.getdescr()
             assert isinstance(target_token, TargetToken)
             target_token.targeting_jitcell_token.retraced_count = sys.maxint
-            
+
         self.finilize_short_preamble(start_label)
-            
+
     def finilize_short_preamble(self, start_label):
         short = self.short
         assert short[-1].getopnum() == rop.JUMP
         if op is None:
             return None
         if op.result is not None and op.result in self.short_seen:
-            if emit and self.short_inliner:                
+            if emit and self.short_inliner:
                 return self.short_inliner.inline_arg(op.result)
             else:
                 return None
-        
+
         for a in op.getarglist():
             if not isinstance(a, Const) and a not in self.short_seen:
                 self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed)
         if guards_needed and self.short_boxes.has_producer(op.result):
             value_guards = self.getvalue(op.result).make_guards(op.result)
         else:
-            value_guards = []            
+            value_guards = []
 
         self.short.append(op)
         self.short_seen[op.result] = True
         if newop:
             return newop.result
         return None
-        
+
     def import_box(self, box, inputargs, short_jumpargs, jumpargs):
         if isinstance(box, Const) or box in inputargs:
             return
                             classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu)
                             if not classbox or not classbox.same_constant(target.assumed_classes[shop.result]):
                                 raise InvalidLoop('The class of an opaque pointer at the end ' +
-                                                  'of the bridge does not mach the class ' + 
+                                                  'of the bridge does not mach the class ' +
                                                   'it has at the start of the target loop')
                 except InvalidLoop:
                     #debug_print("Inlining failed unexpectedly",
         debug_stop('jit-log-virtualstate')
         return False
 
+
 class ValueImporter(object):
     def __init__(self, unroll, value, op):
         self.unroll = unroll
 
     def import_value(self, value):
         value.import_from(self.preamble_value, self.unroll.optimizer)
-        self.unroll.add_op_to_short(self.op, False, True)        
+        self.unroll.add_op_to_short(self.op, False, True)
+
 
 class ExportedState(object):
     def __init__(self, short_boxes, inputarg_setup_ops, exported_values):

rpython/jit/metainterp/test/test_ajit.py

         assert res == -2
 
     def test_guard_always_changing_value(self):
-        myjitdriver = JitDriver(greens = [], reds = ['x'])
-        class A:
-            pass
+        myjitdriver = JitDriver(greens = [], reds = ['x', 'a'])
         def f(x):
+            a = 0
             while x > 0:
-                myjitdriver.can_enter_jit(x=x)
-                myjitdriver.jit_merge_point(x=x)
-                a = A()
+                myjitdriver.can_enter_jit(x=x, a=a)
+                myjitdriver.jit_merge_point(x=x, a=a)
+                a += 1
                 promote(a)
                 x -= 1
         self.meta_interp(f, [50])

rpython/jit/metainterp/virtualizable.py

+from rpython.jit.metainterp import history
+from rpython.jit.metainterp.typesystem import deref, fieldType, arrayItem
+from rpython.jit.metainterp.warmstate import wrap, unwrap
+from rpython.rlib.unroll import unrolling_iterable
+from rpython.rtyper import rvirtualizable2
 from rpython.rtyper.lltypesystem import lltype, llmemory
 from rpython.rtyper.ootypesystem import ootype
-from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
 from rpython.rtyper.rclass import IR_IMMUTABLE_ARRAY, IR_IMMUTABLE
-from rpython.rtyper import rvirtualizable2
-from rpython.rlib.unroll import unrolling_iterable
-from rpython.rlib.nonconst import NonConstant
-from rpython.jit.metainterp.typesystem import deref, fieldType, arrayItem
-from rpython.jit.metainterp import history
-from rpython.jit.metainterp.warmstate import wrap, unwrap
-from rpython.rlib.objectmodel import specialize
+
 
 class VirtualizableInfo(object):
     TOKEN_NONE            = 0      # must be 0 -- see also x86.call_assembler
         getlength = cpu.ts.getlength
         getarrayitem = cpu.ts.getarrayitem
         setarrayitem = cpu.ts.setarrayitem
-        #
+
         def read_boxes(cpu, virtualizable):
             assert lltype.typeOf(virtualizable) == llmemory.GCREF
             virtualizable = cast_gcref_to_vtype(virtualizable)
                 for i in range(getlength(lst)):
                     boxes.append(wrap(cpu, getarrayitem(lst, i)))
             return boxes
-        #
+
         def write_boxes(virtualizable, boxes):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             i = 0
                     setarrayitem(lst, j, x)
                     i = i + 1
             assert len(boxes) == i + 1
-        #
+
         def write_from_resume_data_partial(virtualizable, reader, numb):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             # Load values from the reader (see resume.py) described by
             assert i >= 0
             for ARRAYITEMTYPE, fieldname in unroll_array_fields_rev:
                 lst = getattr(virtualizable, fieldname)
-                for j in range(getlength(lst)-1, -1, -1):
+                for j in range(getlength(lst) - 1, -1, -1):
                     i -= 1
                     assert i >= 0
                     x = reader.load_value_of_type(ARRAYITEMTYPE, numb.nums[i])
                 x = reader.load_value_of_type(FIELDTYPE, numb.nums[i])
                 setattr(virtualizable, fieldname, x)
             return i
-        #
+
         def load_list_of_boxes(virtualizable, reader, numb):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             # Uses 'virtualizable' only to know the length of the arrays;
             boxes = [reader.decode_box_of_type(self.VTYPEPTR, numb.nums[i])]
             for ARRAYITEMTYPE, fieldname in unroll_array_fields_rev:
                 lst = getattr(virtualizable, fieldname)
-                for j in range(getlength(lst)-1, -1, -1):
+                for j in range(getlength(lst) - 1, -1, -1):
                     i -= 1
                     assert i >= 0
-                    box = reader.decode_box_of_type(ARRAYITEMTYPE,numb.nums[i])
+                    box = reader.decode_box_of_type(ARRAYITEMTYPE, numb.nums[i])
                     boxes.append(box)
             for FIELDTYPE, fieldname in unroll_static_fields_rev:
                 i -= 1
                 boxes.append(box)
             boxes.reverse()
             return boxes
-        #
+
         def check_boxes(virtualizable, boxes):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             # for debugging
                     assert getarrayitem(lst, j) == x
                     i = i + 1
             assert len(boxes) == i + 1
-        #
+
         def get_index_in_array(virtualizable, arrayindex, index):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             index += self.num_static_extra_boxes
                 index += getlength(lst)
                 j = j + 1
             assert False, "invalid arrayindex"
-        #
+
         def get_array_length(virtualizable, arrayindex):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             j = 0
                 if arrayindex == j:
                     lst = getattr(virtualizable, fieldname)
                     return getlength(lst)
-                j = j + 1
+                j += 1
             assert False, "invalid arrayindex"
-        #
+
         unroll_static_fields = unrolling_iterable(zip(FIELDTYPES,
                                                       static_fields))
         unroll_array_fields = unrolling_iterable(zip(ARRAYITEMTYPES,
                                                      array_fields))
         unroll_static_fields_rev = unrolling_iterable(
                                           reversed(list(unroll_static_fields)))
-        unroll_array_fields_rev  = unrolling_iterable(
+        unroll_array_fields_rev = unrolling_iterable(
                                           reversed(list(unroll_array_fields)))
         self.read_boxes = read_boxes
         self.write_boxes = write_boxes
 
     def unwrap_virtualizable_box(self, virtualizable_box):
         return virtualizable_box.getref(llmemory.GCREF)
-     
+
     def is_vtypeptr(self, TYPE):
         return rvirtualizable2.match_virtualizable_type(TYPE, self.VTYPEPTR)
 

rpython/rlib/rthread.py

         return bool(res)
 
     def acquire_timed(self, timeout):
-        "timeout is in microseconds."
+        """Timeout is in microseconds.  Returns 0 in case of failure,
+        1 in case it works, 2 if interrupted by a signal."""
         res = c_thread_acquirelock_timed(self._lock, timeout, 1)
         res = rffi.cast(lltype.Signed, res)
-        return bool(res)
+        return res
 
     def release(self):
         # Sanity check: the lock must be locked

rpython/rlib/test/test_rpath.py

 
 def test_rabspath_relative(tmpdir):
     tmpdir.chdir()
-    assert rpath.rabspath('foo') == tmpdir.join('foo')
+    assert rpath.rabspath('foo') == os.path.realpath(str(tmpdir.join('foo')))
 
 @py.test.mark.skipif("IS_WINDOWS")
 def test_rabspath_absolute_posix():

rpython/rlib/test/test_rthread.py

             l = allocate_lock()
             l.acquire(True)
             t1 = time.time()
-            ok = l.acquire_timed(1000000)
+            ok = l.acquire_timed(1000001)
             t2 = time.time()
             delay = t2 - t1
-            if ok:
+            if ok == 0:        # RPY_LOCK_FAILURE
+                return -delay
+            elif ok == 2:      # RPY_LOCK_INTR
                 return delay
-            else:
-                return -delay
+            else:              # RPY_LOCK_ACQUIRED
+                return 0.0
         fn = self.getcompiled(f, [])
         res = fn()
         assert res < -1.0
 
+    def test_acquire_timed_alarm(self):
+        import sys
+        if not sys.platform.startswith('linux'):
+            py.test.skip("skipped on non-linux")
+        import time
+        from rpython.rlib import rsignal
+        def f():
+            l = allocate_lock()
+            l.acquire(True)
+            #
+            rsignal.pypysig_setflag(rsignal.SIGALRM)
+            rsignal.c_alarm(1)
+            #
+            t1 = time.time()
+            ok = l.acquire_timed(2500000)
+            t2 = time.time()
+            delay = t2 - t1
+            if ok == 0:        # RPY_LOCK_FAILURE
+                return -delay
+            elif ok == 2:      # RPY_LOCK_INTR
+                return delay
+            else:              # RPY_LOCK_ACQUIRED
+                return 0.0
+        fn = self.getcompiled(f, [])
+        res = fn()
+        assert res >= 0.95
+
 #class TestRunDirectly(AbstractThreadTests):
 #    def getcompiled(self, f, argtypes):
 #        return f

rpython/rtyper/memory/gc/minimark.py

 from rpython.rtyper.lltypesystem.lloperation import llop
 from rpython.rtyper.lltypesystem.llmemory import raw_malloc_usage
 from rpython.rtyper.memory.gc.base import GCBase, MovingGCBase
-from rpython.rtyper.memory.gc import minimarkpage, env
+from rpython.rtyper.memory.gc import env
 from rpython.rtyper.memory.support import mangle_hash
 from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint
 from rpython.rlib.rarithmetic import LONG_BIT_SHIFT
         #
         # The ArenaCollection() handles the nonmovable objects allocation.
         if ArenaCollectionClass is None:
+            from rpython.rtyper.memory.gc import minimarkpage
             ArenaCollectionClass = minimarkpage.ArenaCollection
         self.ac = ArenaCollectionClass(arena_size, page_size,
                                        small_request_threshold)
                 (obj + offset).address[0] = llmemory.NULL
         self.old_objects_with_weakrefs.delete()
         self.old_objects_with_weakrefs = new_with_weakref
-
-
-# ____________________________________________________________
-
-# For testing, a simple implementation of ArenaCollection.
-# This version could be used together with obmalloc.c, but
-# it requires an extra word per object in the 'all_objects'
-# list.
-
-class SimpleArenaCollection(object):
-
-    def __init__(self, arena_size, page_size, small_request_threshold):
-        self.arena_size = arena_size   # ignored
-        self.page_size = page_size
-        self.small_request_threshold = small_request_threshold
-        self.all_objects = []
-        self.total_memory_used = 0
-
-    def malloc(self, size):
-        nsize = raw_malloc_usage(size)
-        ll_assert(nsize > 0, "malloc: size is null or negative")
-        ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
-        ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
-        #
-        result = llarena.arena_malloc(nsize, False)
-        llarena.arena_reserve(result, size)
-        self.all_objects.append((result, nsize))
-        self.total_memory_used += nsize
-        return result
-
-    def mass_free(self, ok_to_free_func):
-        objs = self.all_objects
-        self.all_objects = []
-        self.total_memory_used = 0
-        for rawobj, nsize in objs:
-            if ok_to_free_func(rawobj):
-                llarena.arena_free(rawobj)
-            else:
-                self.all_objects.append((rawobj, nsize))
-                self.total_memory_used += nsize

rpython/rtyper/memory/gc/minimarktest.py

+from rpython.rtyper.lltypesystem import llarena
+from rpython.rtyper.lltypesystem.llmemory import raw_malloc_usage
+from rpython.rlib.debug import ll_assert
+from rpython.rlib.rarithmetic import LONG_BIT
+
+# For testing, a simple implementation of ArenaCollection.
+# This version could be used together with obmalloc.c, but
+# it requires an extra word per object in the 'all_objects'
+# list.
+
+WORD = LONG_BIT // 8
+
+
+class SimpleArenaCollection(object):
+
+    def __init__(self, arena_size, page_size, small_request_threshold):
+        self.arena_size = arena_size   # ignored
+        self.page_size = page_size
+        self.small_request_threshold = small_request_threshold
+        self.all_objects = []
+        self.total_memory_used = 0
+
+    def malloc(self, size):
+        nsize = raw_malloc_usage(size)
+        ll_assert(nsize > 0, "malloc: size is null or negative")
+        ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
+        ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
+        #
+        result = llarena.arena_malloc(nsize, False)
+        llarena.arena_reserve(result, size)
+        self.all_objects.append((result, nsize))
+        self.total_memory_used += nsize
+        return result
+
+    def mass_free(self, ok_to_free_func):
+        objs = self.all_objects
+        self.all_objects = []
+        self.total_memory_used = 0
+        for rawobj, nsize in objs:
+            if ok_to_free_func(rawobj):
+                llarena.arena_free(rawobj)
+            else:
+                self.all_objects.append((rawobj, nsize))
+                self.total_memory_used += nsize

rpython/rtyper/memory/gc/test/test_direct.py

 
 class TestMiniMarkGCSimple(DirectGCTest):
     from rpython.rtyper.memory.gc.minimark import MiniMarkGC as GCClass
-    from rpython.rtyper.memory.gc.minimark import SimpleArenaCollection
+    from rpython.rtyper.memory.gc.minimarktest import SimpleArenaCollection
     # test the GC itself, providing a simple class for ArenaCollection
     GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection}
 

rpython/rtyper/memory/gc/test/test_inspector.py

 
 class TestMiniMarkGCSimple(InspectorTest):
     from rpython.rtyper.memory.gc.minimark import MiniMarkGC as GCClass
-    from rpython.rtyper.memory.gc.minimark import SimpleArenaCollection
+    from rpython.rtyper.memory.gc.minimarktest import SimpleArenaCollection
     GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection,
                  "card_page_indices": 4}

rpython/translator/c/genc.py

         srcdir / 'exception.c',
         srcdir / 'rtyper.c',           # ifdef HAVE_RTYPER
         srcdir / 'support.c',
-        srcdir / 'profiling.c',
         srcdir / 'debug_print.c',
         srcdir / 'debug_traceback.c',  # ifdef HAVE_RTYPER
         srcdir / 'asm.c',

rpython/translator/c/src/profiling.c

 #include <sched.h>
 #endif
 
-cpu_set_t base_cpu_set;
-int profiling_setup = 0;
+static cpu_set_t base_cpu_set;
+static int profiling_setup = 0;
 
 void pypy_setup_profiling()
 {
 
 #include <windows.h>
 
-DWORD_PTR base_affinity_mask;
-int profiling_setup = 0;
+static DWORD_PTR base_affinity_mask;
+static int profiling_setup = 0;
 
 void pypy_setup_profiling() { 
     if (!profiling_setup) {

rpython/translator/c/src/thread.h

 typedef enum RPyLockStatus {
     RPY_LOCK_FAILURE = 0,
     RPY_LOCK_ACQUIRED = 1,
-    RPY_LOCK_INTR
+    RPY_LOCK_INTR = 2
 } RPyLockStatus;
 
 #ifdef _WIN32