Commits

Alexander Hesse committed a493f5a Merge

hg merge default

  • Participants
  • Parent commits f169915, 635fed2
  • Branches quiet-rpython

Comments (0)

Files changed (32)

File lib-python/2.7/inspect.py

                 raise TypeError('%s() takes exactly 0 arguments '
                                 '(%d given)' % (f_name, num_total))
         else:
-            raise TypeError('%s() takes no argument (%d given)' %
+            raise TypeError('%s() takes no arguments (%d given)' %
                             (f_name, num_total))
     for arg in args:
         if isinstance(arg, str) and arg in named:

File lib-python/2.7/site.py

 USER_SITE = None
 USER_BASE = None
 
+
 def makepath(*paths):
     dir = os.path.join(*paths)
     try:

File lib-python/2.7/sysconfig.py

         'data'   : '{base}',
         },
     'pypy': {
-        'stdlib': '{base}/lib-python',
-        'platstdlib': '{base}/lib-python',
-        'purelib': '{base}/lib-python',
-        'platlib': '{base}/lib-python',
+        'stdlib': '{base}/lib-python/{py_version_short}',
+        'platstdlib': '{base}/lib-python/{py_version_short}',
+        'purelib': '{base}/lib-python/{py_version_short}',
+        'platlib': '{base}/lib-python/{py_version_short}',
         'include': '{base}/include',
         'platinclude': '{base}/include',
         'scripts': '{base}/bin',

File lib-python/2.7/test/test_capi.py

     threading = None
 import _testcapi
 
+skips = []
+if test_support.check_impl_detail(pypy=True):
+    skips += [
+            'test_broken_memoryview',
+            'test_capsule',
+            'test_lazy_hash_inheritance',
+            'test_widechar',
+            'TestThreadState',
+            'TestPendingCalls',
+            ]
+
 @unittest.skipUnless(threading, 'Threading required for this test.')
 class TestPendingCalls(unittest.TestCase):
 
 def test_main():
 
     for name in dir(_testcapi):
-        if name.startswith('test_'):
+        if name.startswith('test_') and name not in skips:
             test = getattr(_testcapi, name)
             if test_support.verbose:
                 print "internal", name
             raise test_support.TestFailed, \
                   "Couldn't find main thread correctly in the list"
 
-    if threading:
+    if threading and 'TestThreadState' not in skips:
         import thread
         import time
         TestThreadState()
         t.start()
         t.join()
 
-    test_support.run_unittest(TestPendingCalls)
+    if 'TestPendingCalls' not in skips:
+        test_support.run_unittest(TestPendingCalls)
 
 if __name__ == "__main__":
     test_main()

File lib-python/2.7/test/test_itertools.py

         self.assertEqual(list(izip()), zip())
         self.assertRaises(TypeError, izip, 3)
         self.assertRaises(TypeError, izip, range(3), 3)
-
         self.assertEqual([tuple(list(pair)) for pair in izip('abc', 'def')],
                          zip('abc', 'def'))
         self.assertEqual([pair for pair in izip('abc', 'def')],
                          zip('abc', 'def'))
+
     @test_support.impl_detail("tuple reuse is specific to CPython")
     def test_izip_tuple_reuse(self):
         ids = map(id, izip('abc', 'def'))
                          zip('abc', 'def'))
         self.assertEqual([pair for pair in izip_longest('abc', 'def')],
                          zip('abc', 'def'))
+
     @test_support.impl_detail("tuple reuse is specific to CPython")
     def test_izip_longest_tuple_reuse(self):
         ids = map(id, izip_longest('abc', 'def'))

File lib-python/2.7/test/test_multiprocessing.py

 
         # Because we are using xmlrpclib for serialization instead of
         # pickle this will cause a serialization error.
-        self.assertRaises(Exception, queue.put, time.sleep)
+        self.assertRaises(Exception, queue.put, object)
 
         # Make queue finalizer run before the server is stopped
         del queue
         if not gc.isenabled():
             gc.enable()
             self.addCleanup(gc.disable)
-        thresholds = gc.get_threshold()
-        self.addCleanup(gc.set_threshold, *thresholds)
-        gc.set_threshold(10)
+        #thresholds = gc.get_threshold()
+        #self.addCleanup(gc.set_threshold, *thresholds)
+        #gc.set_threshold(10)
 
         # perform numerous block allocations, with cyclic references to make
         # sure objects are collected asynchronously by the gc
     def test_synchronize(self):
         self.test_sharedctypes(lock=True)
 
+    @unittest.skipUnless(test_support.check_impl_detail(pypy=False), "pypy ctypes differences")
     def test_copy(self):
         foo = _Foo(2, 5.0)
         bar = copy(foo)

File lib-python/2.7/test/test_support.py

     else:
         runner = BasicTestRunner()
 
-
     result = runner.run(suite)
     if not result.wasSuccessful():
         if len(result.errors) == 1 and not result.failures:

File lib-python/conftest.py

     RegrTest('test_bz2.py', usemodules='bz2'),
     RegrTest('test_calendar.py'),
     RegrTest('test_call.py', core=True),
-    RegrTest('test_capi.py', skip="not applicable"),
+    RegrTest('test_capi.py'),
     RegrTest('test_cd.py'),
     RegrTest('test_cfgparser.py'),
     RegrTest('test_cgi.py'),
     RegrTest('test_msilib.py'),
     RegrTest('test_multibytecodec.py', usemodules='_multibytecodec'),
     RegrTest('test_multifile.py'),
-    RegrTest('test_multiprocessing.py', skip=True),
+    RegrTest('test_multiprocessing.py'),
     RegrTest('test_mutants.py', core="possibly"),
     RegrTest('test_mutex.py'),
     RegrTest('test_netrc.py'),

File pypy/bin/checkmodule.py

 """
 import sys, os
 
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
+
 from pypy.objspace.fake.checkmodule import checkmodule
 
 def main(argv):

File pypy/doc/cpython_differences.rst

   type and vice versa. For builtin types, a dictionary will be returned that
   cannot be changed (but still looks and behaves like a normal dictionary).
 
-* the ``__len__`` or ``__length_hint__`` special methods are sometimes
-  called by CPython to get a length estimate to preallocate internal arrays.
-  So far, PyPy never calls ``__len__`` for this purpose, and never calls
-  ``__length_hint__`` at all.
-
 
 .. include:: _ref.txt

File pypy/doc/faq.rst

 Do I have to rewrite my programs in RPython?
 --------------------------------------------
 
-No.  And you shouldn't try.  PyPy always runs your code in its own interpreter, which is a
-full and compliant Python 2.7 interpreter.  RPython is only the
-language in which parts of PyPy itself are written and extension
-modules for it.  Not only is it not necessary for you to rewrite your
-code in RPython, it probably won't give you any speed improvements if you 
-try.
+No.  And you shouldn't try.  First and foremost, RPython is a language
+that is designed to write interpreters in.  It is a restricted subset of
+Python.  If you program is not an interpreter but tries to do "real
+things", like use *any* part of the standard Python library or *any*
+3rd-party library, then it is not RPython to start with.  You should
+only look at RPython if you try to `write your own interpreter`__.
+
+.. __: `how do I compile my own interpreters`_
+
+If your goal is to speed up Python code, then look at the regular PyPy,
+which is a full and compliant Python 2.7 interpreter (which happens to
+be written in RPython).  Not only is it not necessary for you to rewrite
+your code in RPython, it might not give you any speed improvements even
+if you manage to.
+
+Yes, it is possible with enough effort to compile small self-contained
+pieces of RPython code doing a few performance-sensitive things.  But
+this case is not interesting for us.  If you needed to rewrite the code
+in RPython, you could as well have rewritten it in C for example.  The
+latter is a much more supported, much more documented language `:-)`
 
 ---------------------------------------------------
 Which backends are there for the RPython toolchain?

File pypy/interpreter/baseobjspace.py

                     )
                 raise
             w_fd = self.call_function(w_fileno)
-            if not self.isinstance_w(w_fd, self.w_int):
+            if (not self.isinstance_w(w_fd, self.w_int) and
+                not self.isinstance_w(w_fd, self.w_long)):
                 raise OperationError(self.w_TypeError,
-                    self.wrap("fileno() must return an integer")
+                    self.wrap("fileno() returned a non-integer")
                 )
         fd = self.int_w(w_fd)
         if fd < 0:

File pypy/module/_bisect/interp_bisect.py

 from pypy.interpreter.error import OperationError
 from pypy.interpreter.gateway import unwrap_spec
+from rpython.rlib.rarithmetic import intmask, r_uint
 
 
 @unwrap_spec(lo=int, hi=int)
     if hi == -1:
         hi = space.len_w(w_a)
     while lo < hi:
-        mid = (lo + hi) >> 1
+        mid = intmask((r_uint(lo) + r_uint(hi)) >> 1)
         w_litem = space.getitem(w_a, space.wrap(mid))
         if space.is_true(space.lt(w_litem, w_x)):
             lo = mid + 1
     if hi == -1:
         hi = space.len_w(w_a)
     while lo < hi:
-        mid = (lo + hi) >> 1
+        mid = intmask((r_uint(lo) + r_uint(hi)) >> 1)
         w_litem = space.getitem(w_a, space.wrap(mid))
         if space.is_true(space.lt(w_x, w_litem)):
             hi = mid

File pypy/module/_bisect/test/test_bisect.py

         insort_right(a, 6.0)
         assert a == [0, 5, 6, 6, 6, 6.0, 7]
         assert map(type, a) == [int, int, int, int, int, float, int]
+
+    def test_bisect_overflow(self):
+        from _bisect import bisect_left, bisect_right
+        import sys
+
+        size = sys.maxsize
+        data = xrange(size - 1)
+        assert bisect_left(data, size - 3) == size - 3
+        assert bisect_right(data, size - 3) == size - 2

File pypy/module/_io/interp_fileio.py

 
         self.readable, self.writable, append, flags = decode_mode(space, mode)
 
-        if fd >= 0:
-            verify_fd(fd)
-            try:
-                os.fstat(fd)
-            except OSError, e:
-                if e.errno == errno.EBADF:
-                    raise wrap_oserror(space, e)
-                # else: pass
-            self.fd = fd
-            self.closefd = bool(closefd)
-        else:
-            if not closefd:
-                raise OperationError(space.w_ValueError, space.wrap(
-                    "Cannot use closefd=False with file name"))
-            self.closefd = True
+        fd_is_own = False
+        try:
+            if fd >= 0:
+                verify_fd(fd)
+                try:
+                    os.fstat(fd)
+                except OSError, e:
+                    if e.errno == errno.EBADF:
+                        raise wrap_oserror(space, e)
+                    # else: pass
+                self.fd = fd
+                self.closefd = bool(closefd)
+            else:
+                self.closefd = True
+                if not closefd:
+                    raise OperationError(space.w_ValueError, space.wrap(
+                        "Cannot use closefd=False with file name"))
 
-            from pypy.module.posix.interp_posix import (
-                dispatch_filename, rposix)
-            try:
-                self.fd = dispatch_filename(rposix.open)(
-                    space, w_name, flags, 0666)
-            except OSError, e:
-                raise wrap_oserror2(space, e, w_name,
-                                    exception_name='w_IOError')
+                from pypy.module.posix.interp_posix import (
+                    dispatch_filename, rposix)
+                try:
+                    self.fd = dispatch_filename(rposix.open)(
+                        space, w_name, flags, 0666)
+                except OSError, e:
+                    raise wrap_oserror2(space, e, w_name,
+                                        exception_name='w_IOError')
+                finally:
+                    fd_is_own = True
 
             self._dircheck(space, w_name)
-        self.w_name = w_name
+            self.w_name = w_name
 
-        if append:
-            # For consistent behaviour, we explicitly seek to the end of file
-            # (otherwise, it might be done only on the first write()).
-            try:
-                os.lseek(self.fd, 0, os.SEEK_END)
-            except OSError, e:
-                raise wrap_oserror(space, e, exception_name='w_IOError')
+            if append:
+                # For consistent behaviour, we explicitly seek to the end of file
+                # (otherwise, it might be done only on the first write()).
+                try:
+                    os.lseek(self.fd, 0, os.SEEK_END)
+                except OSError, e:
+                    raise wrap_oserror(space, e, exception_name='w_IOError')
+        except:
+            if not fd_is_own:
+                self.fd = -1
+            raise
 
     def _mode(self):
         if self.readable:
         except OSError:
             return
         if stat.S_ISDIR(st.st_mode):
-            self._close(space)
             raise wrap_oserror2(space, OSError(errno.EISDIR, "fstat"),
                                 w_filename, exception_name='w_IOError')
 

File pypy/module/_io/test/test_fileio.py

 
     def test_open_directory(self):
         import _io
+        import os
         raises(IOError, _io.FileIO, self.tmpdir, "rb")
+        fd = os.open(self.tmpdir, os.O_RDONLY)
+        raises(IOError, _io.FileIO, fd, "rb")
+        os.close(fd)
 
     def test_readline(self):
         import _io

File pypy/module/cpyext/api.py

             kwds["link_extra"] = ["msvcrt.lib"]
         elif sys.platform.startswith('linux'):
             compile_extra.append("-Werror=implicit-function-declaration")
+            compile_extra.append('-g')
         export_symbols_eci.append('pypyAPI')
-        compile_extra.append('-g')
     else:
         kwds["includes"] = ['Python.h'] # this is our Python.h
 

File pypy/module/cpyext/longobject.py

     Return a C unsigned long representation of the contents of pylong.
     If pylong is greater than ULONG_MAX, an OverflowError is
     raised."""
-    return rffi.cast(rffi.ULONG, space.uint_w(w_long))
+    try:
+        return rffi.cast(rffi.ULONG, space.uint_w(w_long))
+    except OperationError, e:
+        if e.match(space, space.w_ValueError):
+            e.w_type = space.w_OverflowError
+        raise
 
 @cpython_api([PyObject], rffi.ULONG, error=-1)
 def PyLong_AsUnsignedLongMask(space, w_long):
     Return a C unsigned long representation of the contents of pylong.
     If pylong is greater than ULONG_MAX, an OverflowError is
     raised."""
-    return rffi.cast(rffi.ULONGLONG, space.r_ulonglong_w(w_long))
+    try:
+        return rffi.cast(rffi.ULONGLONG, space.r_ulonglong_w(w_long))
+    except OperationError, e:
+        if e.match(space, space.w_ValueError):
+            e.w_type = space.w_OverflowError
+        raise
 
 @cpython_api([PyObject], rffi.ULONGLONG, error=-1)
 def PyLong_AsUnsignedLongLongMask(space, w_long):

File pypy/module/cpyext/object.py

 
 @cpython_api([PyObject], PyObject)
 def PyObject_Str(space, w_obj):
+    if w_obj is None:
+        return space.wrap("<NULL>")
     return space.str(w_obj)
 
 @cpython_api([PyObject], PyObject)
     representation on success, NULL on failure.  This is the equivalent of the
     Python expression repr(o).  Called by the repr() built-in function and
     by reverse quotes."""
+    if w_obj is None:
+        return space.wrap("<NULL>")
     return space.repr(w_obj)
 
 @cpython_api([PyObject], PyObject)
     string representation on success, NULL on failure. This is the equivalent of
     the Python expression unicode(o).  Called by the unicode() built-in
     function."""
+    if w_obj is None:
+        return space.wrap(u"<NULL>")
     return space.call_function(space.w_unicode, w_obj)
 
 @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1)

File pypy/module/cpyext/test/test_longobject.py

         value = api.PyLong_AsUnsignedLong(w_value)
         assert value == (sys.maxint - 1) * 2
 
+        self.raises(space, api, OverflowError, api.PyLong_AsUnsignedLong, space.wrap(-1))
+
     def test_as_ssize_t(self, space, api):
         w_value = space.newlong(2)
         value = api.PyLong_AsSsize_t(w_value)
         w_l = space.wrap(sys.maxint + 1)
         assert api.PyLong_Check(w_l)
         assert api.PyLong_CheckExact(w_l)
-        
+
         w_i = space.wrap(sys.maxint)
         assert not api.PyLong_Check(w_i)
         assert not api.PyLong_CheckExact(w_i)
-        
+
         L = space.appexec([], """():
             class L(long):
                 pass
         assert api.PyLong_AsUnsignedLongLongMask(
             space.wrap(1<<64)) == 0
 
+        self.raises(space, api, OverflowError, api.PyLong_AsUnsignedLongLong, space.wrap(-1))
+
     def test_as_long_and_overflow(self, space, api):
         overflow = lltype.malloc(rffi.CArrayPtr(rffi.INT_real).TO, 1, flavor='raw')
         assert api.PyLong_AsLongAndOverflow(

File pypy/module/cpyext/test/test_object.py

 
     def test_size(self, space, api):
         assert api.PyObject_Size(space.newlist([space.w_None])) == 1
-        
+
+    def test_str(self, space, api):
+        w_list = space.newlist([space.w_None, space.wrap(42)])
+        assert space.str_w(api.PyObject_Str(None)) == "<NULL>"
+        assert space.str_w(api.PyObject_Str(w_list)) == "[None, 42]"
+        assert space.str_w(api.PyObject_Str(space.wrap("a"))) == "a"
+
     def test_repr(self, space, api):
         w_list = space.newlist([space.w_None, space.wrap(42)])
+        assert space.str_w(api.PyObject_Repr(None)) == "<NULL>"
         assert space.str_w(api.PyObject_Repr(w_list)) == "[None, 42]"
         assert space.str_w(api.PyObject_Repr(space.wrap("a"))) == "'a'"
-        
-        w_list = space.newlist([space.w_None, space.wrap(42)])
-        assert space.str_w(api.PyObject_Str(w_list)) == "[None, 42]"
-        assert space.str_w(api.PyObject_Str(space.wrap("a"))) == "a"
-        
+
     def test_RichCompare(self, space, api):
         def compare(w_o1, w_o2, opid):
             res = api.PyObject_RichCompareBool(w_o1, w_o2, opid)
             w_res = api.PyObject_RichCompare(w_o1, w_o2, opid)
             assert space.is_true(w_res) == res
             return res
-        
+
         def test_compare(o1, o2):
             w_o1 = space.wrap(o1)
             w_o2 = space.wrap(o2)
-            
+
             for opid, expected in [
                     (Py_LT, o1 <  o2), (Py_LE, o1 <= o2),
                     (Py_NE, o1 != o2), (Py_EQ, o1 == o2),
             api.PyErr_Clear()
 
     def test_unicode(self, space, api):
+        assert space.unwrap(api.PyObject_Unicode(None)) == u"<NULL>"
         assert space.unwrap(api.PyObject_Unicode(space.wrap([]))) == u"[]"
         assert space.unwrap(api.PyObject_Unicode(space.wrap("e"))) == u"e"
         assert api.PyObject_Unicode(space.wrap("\xe9")) is None

File pypy/module/fcntl/test/test_fcntl.py

         import sys
         import struct
 
+        class F:
+            def __init__(self, fn):
+                self.fn = fn
+            def fileno(self):
+                return self.fn
+
         f = open(self.tmp + "b", "w+")
 
         fcntl.fcntl(f, 1, 0)
         fcntl.fcntl(f, 1)
+        fcntl.fcntl(F(long(f.fileno())), 1)
         raises(TypeError, fcntl.fcntl, "foo")
         raises(TypeError, fcntl.fcntl, f, "foo")
-        raises((IOError, ValueError), fcntl.fcntl, -1, 1, 0)
+        raises(TypeError, fcntl.fcntl, F("foo"), 1)
+        raises(ValueError, fcntl.fcntl, -1, 1, 0)
+        raises(ValueError, fcntl.fcntl, F(-1), 1, 0)
+        raises(ValueError, fcntl.fcntl, F(long(-1)), 1, 0)
         assert fcntl.fcntl(f, 1, 0) == 0
         assert fcntl.fcntl(f, 2, "foo") == "foo"
         assert fcntl.fcntl(f, 2, buffer("foo")) == "foo"

File pypy/module/thread/test/support.py

 import time
 import thread
 import os
+import errno
 
 from pypy.interpreter.gateway import interp2app, unwrap_spec
 from pypy.module.thread import gil
     def kill():
         for x in range(delay * 10):
             time.sleep(0.1)
-            os.kill(pid, 0)
+            try:
+                os.kill(pid, 0)
+            except OSError, e:
+                if e.errno == errno.ESRCH: # no such process
+                    return
+                raise
         os.kill(pid, 9)
         print "process %s killed!" % (pid,)
     thread.start_new_thread(kill, ())

File pypy/module/thread/test/test_fork.py

 from pypy.module.thread.test.support import GenericTestThread
 
 class AppTestFork(GenericTestThread):
-    def test_fork(self):
+    def test_fork_with_thread(self):
         # XXX This test depends on a multicore machine, as busy_thread must
         # aquire the GIL the instant that the main thread releases it.
         # It will incorrectly pass if the GIL is not grabbed in time.
         if not hasattr(os, 'fork'):
             skip("No fork on this platform")
 
-        run = True
-        done = []
         def busy_thread():
             while run:
                 time.sleep(0)
             done.append(None)
 
-        try:
-            thread.start_new(busy_thread, ())
+        for i in range(1):
+            run = True
+            done = []
+            try:
+                thread.start_new(busy_thread, ())
+                print 'sleep'
 
-            pid = os.fork()
-
-            if pid == 0:
-                os._exit(0)
-
-            else:
-                time.sleep(1)
-                spid, status = os.waitpid(pid, os.WNOHANG)
-                assert spid == pid
-        finally:
-            run = False
-            self.waitfor(lambda: done)
+                pid = os.fork()
+                if pid == 0:
+                    os._exit(0)
+                else:
+                    self.timeout_killer(pid, 5)
+                    exitcode = os.waitpid(pid, 0)[1]
+                    assert exitcode == 0 # if 9, process was killed by timer!
+            finally:
+                run = False
+                self.waitfor(lambda: done)
+                assert done
 
     def test_forked_can_thread(self):
         "Checks that a forked interpreter can start a thread"
-        import os, thread, time
+        import thread
+        import os
 
         if not hasattr(os, 'fork'):
             skip("No fork on this platform")
 
-        # pre-allocate some locks
-        thread.start_new_thread(lambda: None, ())
+        for i in range(10):
+            # pre-allocate some locks
+            thread.start_new_thread(lambda: None, ())
+            print 'sleep'
 
-        pid = os.fork()
-        if pid == 0:
-            print 'in child'
-            thread.start_new_thread(lambda: None, ())
-            os._exit(0)
-        else:
-            self.timeout_killer(pid, 5)
-            exitcode = os.waitpid(pid, 0)[1]
-            assert exitcode == 0 # if 9, process was killed by timer!
+            pid = os.fork()
+            if pid == 0:
+                thread.start_new_thread(lambda: None, ())
+                os._exit(0)
+            else:
+                self.timeout_killer(pid, 5)
+                exitcode = os.waitpid(pid, 0)[1]
+                assert exitcode == 0 # if 9, process was killed by timer!

File rpython/flowspace/framestate.py

+from rpython.flowspace.model import Variable, Constant
 from rpython.rlib.unroll import SpecTag
-from rpython.flowspace.model import *
+
 
 class FrameState:
     def __init__(self, mergeable, blocklist, next_instr):
     raise TypeError('union of %r and %r' % (w1.__class__.__name__,
                                             w2.__class__.__name__))
 
+
 # ____________________________________________________________
 #
 # We have to flatten out the state of the frame into a list of
 PICKLE_TAGS = {}
 UNPICKLE_TAGS = {}
 
+
 def recursively_flatten(space, lst):
     from rpython.flowspace.flowcontext import SuspendedUnroller
     i = 0
             except:
                 tag = PICKLE_TAGS[key] = Constant(PickleTag())
                 UNPICKLE_TAGS[tag] = key
-            lst[i:i+1] = [tag] + vars
+            lst[i:i + 1] = [tag] + vars
+
 
 def recursively_unflatten(space, lst):
-    for i in xrange(len(lst)-1, -1, -1):
+    for i in xrange(len(lst) - 1, -1, -1):
         item = lst[i]
         if item in UNPICKLE_TAGS:
             unrollerclass, argcount = UNPICKLE_TAGS[item]
-            arguments = lst[i+1: i+1+argcount]
-            del lst[i+1: i+1+argcount]
+            arguments = lst[i + 1:i + 1 + argcount]
+            del lst[i + 1:i + 1 + argcount]
             unroller = unrollerclass.state_pack_variables(space, *arguments)
             lst[i] = unroller

File rpython/jit/metainterp/optimizeopt/rewrite.py

 
     def optimize_GUARD_VALUE(self, op):
         value = self.getvalue(op.getarg(0))
+        if value.is_virtual():
+            raise InvalidLoop('A promote of a virtual (a recently allocated object) never makes sense!')
         if value.last_guard:
             # there already has been a guard_nonnull or guard_class or
             # guard_nonnull_class on this value, which is rather silly.

File rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py

         jump(p2)
         """
         self.raises(InvalidLoop, self.optimize_loop,
-                       ops, ops)
+                       ops, "crash!")
 
     def test_invalid_loop_2(self):
         ops = """
         jump(p2)
         """
         self.raises(InvalidLoop, self.optimize_loop,
-                       ops, ops)
+                       ops, "crash!")
 
     def test_invalid_loop_3(self):
         ops = """
         setfield_gc(p3, p4, descr=nextdescr)
         jump(p3)
         """
-        self.raises(InvalidLoop, self.optimize_loop, ops, ops)
-
+        self.raises(InvalidLoop, self.optimize_loop, ops, "crash!")
+
+    def test_invalid_loop_guard_value_of_virtual(self):
+        ops = """
+        [p1]
+        p2 = new_with_vtable(ConstClass(node_vtable))
+        guard_value(p2, ConstPtr(myptr)) []
+        jump(p2)
+        """
+        self.raises(InvalidLoop, self.optimize_loop,
+                       ops, "crash!")
 
     def test_merge_guard_class_guard_value(self):
         ops = """

File rpython/jit/metainterp/optimizeopt/unroll.py

-from rpython.jit.codewriter.effectinfo import EffectInfo
+import sys
+
+from rpython.jit.metainterp.compile import ResumeGuardDescr
+from rpython.jit.metainterp.history import TargetToken, JitCellToken, Const
+from rpython.jit.metainterp.inliner import Inliner
+from rpython.jit.metainterp.optimize import InvalidLoop
+from rpython.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds
+from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer, Optimization
 from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes, BadVirtualState
-from rpython.jit.metainterp.compile import ResumeGuardDescr
-from rpython.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken
-from rpython.jit.metainterp.jitexc import JitException
-from rpython.jit.metainterp.optimize import InvalidLoop
-from rpython.rlib.debug import debug_print, debug_start, debug_stop
-from rpython.jit.metainterp.optimizeopt.optimizer import *
-from rpython.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds
-from rpython.jit.metainterp.inliner import Inliner
 from rpython.jit.metainterp.resoperation import rop, ResOperation
 from rpython.jit.metainterp.resume import Snapshot
-import sys, os
+from rpython.rlib.debug import debug_print, debug_start, debug_stop
+
 
 # FIXME: Introduce some VirtualOptimizer super class instead
 
     opt.inline_short_preamble = inline_short_preamble
     opt.propagate_all_forward()
 
+
 class UnrollableOptimizer(Optimizer):
     def setup(self):
         self.importable_values = {}
     distinction anymore)"""
 
     inline_short_preamble = True
-    
+
     def __init__(self, metainterp_sd, loop, optimizations):
         self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations)
         self.boxes_created_this_iteration = None
     def fix_snapshot(self, jump_args, snapshot):
         if snapshot is None:
             return None
-        snapshot_args = snapshot.boxes 
+        snapshot_args = snapshot.boxes
         new_snapshot_args = []
         for a in snapshot_args:
             a = self.getvalue(a).get_key_box()
             new_snapshot_args.append(a)
         prev = self.fix_snapshot(jump_args, snapshot.prev)
         return Snapshot(prev, new_snapshot_args)
-            
+
     def propagate_all_forward(self):
         loop = self.optimizer.loop
         self.optimizer.clear_newoperations()
             # will clear heap caches
             self.optimizer.send_extra_operation(start_label)
         else:
-            start_label = None            
+            start_label = None
 
         jumpop = loop.operations[-1]
         if jumpop.getopnum() == rop.JUMP or jumpop.getopnum() == rop.LABEL:
         assert isinstance(cell_token, JitCellToken)
         stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token))
 
-        
         if jumpop.getopnum() == rop.JUMP:
             if self.jump_to_already_compiled_trace(jumpop):
                 # Found a compiled trace to jump to
                     return
 
         # Found nothing to jump to, emit a label instead
-        
+
         if self.short:
             # Construct our short preamble
             assert start_label
     def jump_to_start_label(self, start_label, stop_label):
         if not start_label or not stop_label:
             return False
-        
+
         stop_target = stop_label.getdescr()
         start_target = start_label.getdescr()
         assert isinstance(stop_target, TargetToken)
         #virtual_state = modifier.get_virtual_state(args)
         #if self.initial_virtual_state.generalization_of(virtual_state):
         #    return True
-        
 
     def export_state(self, targetop):
         original_jump_args = targetop.getarglist()
 
         modifier = VirtualStateAdder(self.optimizer)
         virtual_state = modifier.get_virtual_state(jump_args)
-            
+
         values = [self.getvalue(arg) for arg in jump_args]
         inputargs = virtual_state.make_inputargs(values, self.optimizer)
         short_inputargs = virtual_state.make_inputargs(values, self.optimizer, keyboxes=True)
 
-
         if self.boxes_created_this_iteration is not None:
             for box in self.inputargs:
                 self.boxes_created_this_iteration[box] = True
             if op and op.result:
                 box = op.result
                 exported_values[box] = self.optimizer.getvalue(box)
-            
+
         target_token.exported_state = ExportedState(short_boxes, inputarg_setup_ops,
                                                     exported_values)
 
             virtual_state = modifier.get_virtual_state(self.inputargs)
             self.initial_virtual_state = virtual_state
             return
-        
+
         self.short = target_token.short_preamble[:]
         self.short_seen = {}
         self.short_boxes = exported_state.short_boxes
                     #if self.optimizer.loop.logops:
                     #    debug_print('  Falling back to add extra: ' +
                     #                self.optimizer.loop.logops.repr_of_resop(op))
-                    
+
         self.optimizer.flush()
         self.optimizer.emitting_dissabled = False
 
         # We dont need to inline the short preamble we are creating as we are conneting
         # the bridge to a different trace with a different short preamble
         self.short_inliner = None
-        
+
         newoperations = self.optimizer.get_newoperations()
         self.boxes_created_this_iteration = {}
         i = 0
                                       'same box passed to multiple of its ' +
                                       'inputargs, but the jump at the ' +
                                       'end of this bridge does not do that.')
-                                      
+
             args[short_inputargs[i]] = jmp_to_short_args[i]
         self.short_inliner = Inliner(short_inputargs, jmp_to_short_args)
         i = 1
                               'loop is not compatible with the virtual ' +
                               'state at the start of the loop which makes ' +
                               'it impossible to close the loop')
-            
+
         #debug_stop('jit-log-virtualstate')
 
         maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards
             target_token = jumpop.getdescr()
             assert isinstance(target_token, TargetToken)
             target_token.targeting_jitcell_token.retraced_count = sys.maxint
-            
+
         self.finilize_short_preamble(start_label)
-            
+
     def finilize_short_preamble(self, start_label):
         short = self.short
         assert short[-1].getopnum() == rop.JUMP
         if op is None:
             return None
         if op.result is not None and op.result in self.short_seen:
-            if emit and self.short_inliner:                
+            if emit and self.short_inliner:
                 return self.short_inliner.inline_arg(op.result)
             else:
                 return None
-        
+
         for a in op.getarglist():
             if not isinstance(a, Const) and a not in self.short_seen:
                 self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed)
         if guards_needed and self.short_boxes.has_producer(op.result):
             value_guards = self.getvalue(op.result).make_guards(op.result)
         else:
-            value_guards = []            
+            value_guards = []
 
         self.short.append(op)
         self.short_seen[op.result] = True
         if newop:
             return newop.result
         return None
-        
+
     def import_box(self, box, inputargs, short_jumpargs, jumpargs):
         if isinstance(box, Const) or box in inputargs:
             return
                             classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu)
                             if not classbox or not classbox.same_constant(target.assumed_classes[shop.result]):
                                 raise InvalidLoop('The class of an opaque pointer at the end ' +
-                                                  'of the bridge does not mach the class ' + 
+                                                  'of the bridge does not mach the class ' +
                                                   'it has at the start of the target loop')
                 except InvalidLoop:
                     #debug_print("Inlining failed unexpectedly",
         debug_stop('jit-log-virtualstate')
         return False
 
+
 class ValueImporter(object):
     def __init__(self, unroll, value, op):
         self.unroll = unroll
 
     def import_value(self, value):
         value.import_from(self.preamble_value, self.unroll.optimizer)
-        self.unroll.add_op_to_short(self.op, False, True)        
+        self.unroll.add_op_to_short(self.op, False, True)
+
 
 class ExportedState(object):
     def __init__(self, short_boxes, inputarg_setup_ops, exported_values):

File rpython/jit/metainterp/test/test_ajit.py

         assert res == -2
 
     def test_guard_always_changing_value(self):
-        myjitdriver = JitDriver(greens = [], reds = ['x'])
-        class A:
-            pass
+        myjitdriver = JitDriver(greens = [], reds = ['x', 'a'])
         def f(x):
+            a = 0
             while x > 0:
-                myjitdriver.can_enter_jit(x=x)
-                myjitdriver.jit_merge_point(x=x)
-                a = A()
+                myjitdriver.can_enter_jit(x=x, a=a)
+                myjitdriver.jit_merge_point(x=x, a=a)
+                a += 1
                 promote(a)
                 x -= 1
         self.meta_interp(f, [50])

File rpython/jit/metainterp/virtualizable.py

+from rpython.jit.metainterp import history
+from rpython.jit.metainterp.typesystem import deref, fieldType, arrayItem
+from rpython.jit.metainterp.warmstate import wrap, unwrap
+from rpython.rlib.unroll import unrolling_iterable
+from rpython.rtyper import rvirtualizable2
 from rpython.rtyper.lltypesystem import lltype, llmemory
 from rpython.rtyper.ootypesystem import ootype
-from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
 from rpython.rtyper.rclass import IR_IMMUTABLE_ARRAY, IR_IMMUTABLE
-from rpython.rtyper import rvirtualizable2
-from rpython.rlib.unroll import unrolling_iterable
-from rpython.rlib.nonconst import NonConstant
-from rpython.jit.metainterp.typesystem import deref, fieldType, arrayItem
-from rpython.jit.metainterp import history
-from rpython.jit.metainterp.warmstate import wrap, unwrap
-from rpython.rlib.objectmodel import specialize
+
 
 class VirtualizableInfo(object):
     TOKEN_NONE            = 0      # must be 0 -- see also x86.call_assembler
         getlength = cpu.ts.getlength
         getarrayitem = cpu.ts.getarrayitem
         setarrayitem = cpu.ts.setarrayitem
-        #
+
         def read_boxes(cpu, virtualizable):
             assert lltype.typeOf(virtualizable) == llmemory.GCREF
             virtualizable = cast_gcref_to_vtype(virtualizable)
                 for i in range(getlength(lst)):
                     boxes.append(wrap(cpu, getarrayitem(lst, i)))
             return boxes
-        #
+
         def write_boxes(virtualizable, boxes):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             i = 0
                     setarrayitem(lst, j, x)
                     i = i + 1
             assert len(boxes) == i + 1
-        #
+
         def write_from_resume_data_partial(virtualizable, reader, numb):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             # Load values from the reader (see resume.py) described by
             assert i >= 0
             for ARRAYITEMTYPE, fieldname in unroll_array_fields_rev:
                 lst = getattr(virtualizable, fieldname)
-                for j in range(getlength(lst)-1, -1, -1):
+                for j in range(getlength(lst) - 1, -1, -1):
                     i -= 1
                     assert i >= 0
                     x = reader.load_value_of_type(ARRAYITEMTYPE, numb.nums[i])
                 x = reader.load_value_of_type(FIELDTYPE, numb.nums[i])
                 setattr(virtualizable, fieldname, x)
             return i
-        #
+
         def load_list_of_boxes(virtualizable, reader, numb):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             # Uses 'virtualizable' only to know the length of the arrays;
             boxes = [reader.decode_box_of_type(self.VTYPEPTR, numb.nums[i])]
             for ARRAYITEMTYPE, fieldname in unroll_array_fields_rev:
                 lst = getattr(virtualizable, fieldname)
-                for j in range(getlength(lst)-1, -1, -1):
+                for j in range(getlength(lst) - 1, -1, -1):
                     i -= 1
                     assert i >= 0
-                    box = reader.decode_box_of_type(ARRAYITEMTYPE,numb.nums[i])
+                    box = reader.decode_box_of_type(ARRAYITEMTYPE, numb.nums[i])
                     boxes.append(box)
             for FIELDTYPE, fieldname in unroll_static_fields_rev:
                 i -= 1
                 boxes.append(box)
             boxes.reverse()
             return boxes
-        #
+
         def check_boxes(virtualizable, boxes):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             # for debugging
                     assert getarrayitem(lst, j) == x
                     i = i + 1
             assert len(boxes) == i + 1
-        #
+
         def get_index_in_array(virtualizable, arrayindex, index):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             index += self.num_static_extra_boxes
                 index += getlength(lst)
                 j = j + 1
             assert False, "invalid arrayindex"
-        #
+
         def get_array_length(virtualizable, arrayindex):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             j = 0
                 if arrayindex == j:
                     lst = getattr(virtualizable, fieldname)
                     return getlength(lst)
-                j = j + 1
+                j += 1
             assert False, "invalid arrayindex"
-        #
+
         unroll_static_fields = unrolling_iterable(zip(FIELDTYPES,
                                                       static_fields))
         unroll_array_fields = unrolling_iterable(zip(ARRAYITEMTYPES,
                                                      array_fields))
         unroll_static_fields_rev = unrolling_iterable(
                                           reversed(list(unroll_static_fields)))
-        unroll_array_fields_rev  = unrolling_iterable(
+        unroll_array_fields_rev = unrolling_iterable(
                                           reversed(list(unroll_array_fields)))
         self.read_boxes = read_boxes
         self.write_boxes = write_boxes
 
     def unwrap_virtualizable_box(self, virtualizable_box):
         return virtualizable_box.getref(llmemory.GCREF)
-     
+
     def is_vtypeptr(self, TYPE):
         return rvirtualizable2.match_virtualizable_type(TYPE, self.VTYPEPTR)
 

File rpython/translator/c/src/thread_nt.c

 /*
  * Thread support.
  */
+/* In rpython, this file is pulled in by thread.c */
 
 typedef struct RPyOpaque_ThreadLock NRMUTEX, *PNRMUTEX;
 

File rpython/translator/c/src/thread_nt.h

 } NRMUTEX, *PNRMUTEX;
 
 /* prototypes */
+long RPyThreadGetIdent(void);
 long RPyThreadStart(void (*func)(void));
 int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock);
 void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock);