Commits

Amaury Forgeot d'Arc  committed bdea9f1

merge from trunk:
svn merge -r78316:78900 ../trunk

  • Participants
  • Parent commits 6ac4e3e
  • Branches fast-forward

Comments (0)

Files changed (132)

File lib-python/conftest.py

     RegrTest('test_binhex.py'),
 
     RegrTest('test_binop.py', core=True),
-    RegrTest('test_bisect.py', core=True),
+    RegrTest('test_bisect.py', core=True, usemodules='_bisect'),
     RegrTest('test_bool.py', core=True),
     RegrTest('test_bsddb.py', skip="unsupported extension module"),
     RegrTest('test_bsddb185.py', skip="unsupported extension module"),

File lib-python/modified-2.5.2/test/mapping_tests.py

 # tests common to dict and UserDict
 import unittest
 import UserDict
+from test import test_support
 
 
 class BasicTestMappingProtocol(unittest.TestCase):
                     self.assertEqual(va, int(ka))
                     kb, vb = tb = b.popitem()
                     self.assertEqual(vb, int(kb))
-                    self.assert_(not(copymode < 0 and ta != tb))
+                    if test_support.check_impl_detail():
+                        self.assert_(not(copymode < 0 and ta != tb))
                 self.assert_(not a)
                 self.assert_(not b)
 

File lib-python/modified-2.5.2/test/test_descr.py

     except TypeError, msg:
         verify(str(msg).find("weak reference") >= 0)
     else:
-        verify(0, "weakref.ref(no) should be illegal")
+        # in PyPy it is (sometimes) possible to take a weakref here
+        #verify(0, "weakref.ref(no) should be illegal")
+        pass
     class Weak(object):
         __slots__ = ['foo', '__weakref__']
     yes = Weak()

File lib-python/modified-2.5.2/test/test_dict.py

+import unittest
+from test import test_support
+
+import sys, UserDict, cStringIO
+
+
+class DictTest(unittest.TestCase):
+    def test_constructor(self):
+        # calling built-in types without argument must return empty
+        self.assertEqual(dict(), {})
+        self.assert_(dict() is not {})
+
+    def test_bool(self):
+        self.assert_(not {})
+        self.assert_({1: 2})
+        self.assert_(bool({}) is False)
+        self.assert_(bool({1: 2}) is True)
+
+    def test_keys(self):
+        d = {}
+        self.assertEqual(d.keys(), [])
+        d = {'a': 1, 'b': 2}
+        k = d.keys()
+        self.assert_(d.has_key('a'))
+        self.assert_(d.has_key('b'))
+
+        self.assertRaises(TypeError, d.keys, None)
+
+    def test_values(self):
+        d = {}
+        self.assertEqual(d.values(), [])
+        d = {1:2}
+        self.assertEqual(d.values(), [2])
+
+        self.assertRaises(TypeError, d.values, None)
+
+    def test_items(self):
+        d = {}
+        self.assertEqual(d.items(), [])
+
+        d = {1:2}
+        self.assertEqual(d.items(), [(1, 2)])
+
+        self.assertRaises(TypeError, d.items, None)
+
+    def test_has_key(self):
+        d = {}
+        self.assert_(not d.has_key('a'))
+        d = {'a': 1, 'b': 2}
+        k = d.keys()
+        k.sort()
+        self.assertEqual(k, ['a', 'b'])
+
+        self.assertRaises(TypeError, d.has_key)
+
+    def test_contains(self):
+        d = {}
+        self.assert_(not ('a' in d))
+        self.assert_('a' not in d)
+        d = {'a': 1, 'b': 2}
+        self.assert_('a' in d)
+        self.assert_('b' in d)
+        self.assert_('c' not in d)
+
+        self.assertRaises(TypeError, d.__contains__)
+
+    def test_len(self):
+        d = {}
+        self.assertEqual(len(d), 0)
+        d = {'a': 1, 'b': 2}
+        self.assertEqual(len(d), 2)
+
+    def test_getitem(self):
+        d = {'a': 1, 'b': 2}
+        self.assertEqual(d['a'], 1)
+        self.assertEqual(d['b'], 2)
+        d['c'] = 3
+        d['a'] = 4
+        self.assertEqual(d['c'], 3)
+        self.assertEqual(d['a'], 4)
+        del d['b']
+        self.assertEqual(d, {'a': 4, 'c': 3})
+
+        self.assertRaises(TypeError, d.__getitem__)
+
+        class BadEq(object):
+            def __eq__(self, other):
+                raise Exc()
+
+        d = {}
+        d[BadEq()] = 42
+        self.assertRaises(KeyError, d.__getitem__, 23)
+
+        class Exc(Exception): pass
+
+        class BadHash(object):
+            fail = False
+            def __hash__(self):
+                if self.fail:
+                    raise Exc()
+                else:
+                    return 42
+
+        x = BadHash()
+        d[x] = 42
+        x.fail = True
+        self.assertRaises(Exc, d.__getitem__, x)
+
+    def test_clear(self):
+        d = {1:1, 2:2, 3:3}
+        d.clear()
+        self.assertEqual(d, {})
+
+        self.assertRaises(TypeError, d.clear, None)
+
+    def test_update(self):
+        d = {}
+        d.update({1:100})
+        d.update({2:20})
+        d.update({1:1, 2:2, 3:3})
+        self.assertEqual(d, {1:1, 2:2, 3:3})
+
+        d.update()
+        self.assertEqual(d, {1:1, 2:2, 3:3})
+
+        self.assertRaises((TypeError, AttributeError), d.update, None)
+
+        class SimpleUserDict:
+            def __init__(self):
+                self.d = {1:1, 2:2, 3:3}
+            def keys(self):
+                return self.d.keys()
+            def __getitem__(self, i):
+                return self.d[i]
+        d.clear()
+        d.update(SimpleUserDict())
+        self.assertEqual(d, {1:1, 2:2, 3:3})
+
+        class Exc(Exception): pass
+
+        d.clear()
+        class FailingUserDict:
+            def keys(self):
+                raise Exc
+        self.assertRaises(Exc, d.update, FailingUserDict())
+
+        class FailingUserDict:
+            def keys(self):
+                class BogonIter:
+                    def __init__(self):
+                        self.i = 1
+                    def __iter__(self):
+                        return self
+                    def next(self):
+                        if self.i:
+                            self.i = 0
+                            return 'a'
+                        raise Exc
+                return BogonIter()
+            def __getitem__(self, key):
+                return key
+        self.assertRaises(Exc, d.update, FailingUserDict())
+
+        class FailingUserDict:
+            def keys(self):
+                class BogonIter:
+                    def __init__(self):
+                        self.i = ord('a')
+                    def __iter__(self):
+                        return self
+                    def next(self):
+                        if self.i <= ord('z'):
+                            rtn = chr(self.i)
+                            self.i += 1
+                            return rtn
+                        raise StopIteration
+                return BogonIter()
+            def __getitem__(self, key):
+                raise Exc
+        self.assertRaises(Exc, d.update, FailingUserDict())
+
+        class badseq(object):
+            def __iter__(self):
+                return self
+            def next(self):
+                raise Exc()
+
+        self.assertRaises(Exc, {}.update, badseq())
+
+        self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
+
+    def test_fromkeys(self):
+        self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
+        d = {}
+        self.assert_(not(d.fromkeys('abc') is d))
+        self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
+        self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
+        self.assertEqual(d.fromkeys([]), {})
+        def g():
+            yield 1
+        self.assertEqual(d.fromkeys(g()), {1:None})
+        self.assertRaises(TypeError, {}.fromkeys, 3)
+        class dictlike(dict): pass
+        self.assertEqual(dictlike.fromkeys('a'), {'a':None})
+        self.assertEqual(dictlike().fromkeys('a'), {'a':None})
+        self.assert_(type(dictlike.fromkeys('a')) is dictlike)
+        self.assert_(type(dictlike().fromkeys('a')) is dictlike)
+        class mydict(dict):
+            def __new__(cls):
+                return UserDict.UserDict()
+        ud = mydict.fromkeys('ab')
+        self.assertEqual(ud, {'a':None, 'b':None})
+        self.assert_(isinstance(ud, UserDict.UserDict))
+        self.assertRaises(TypeError, dict.fromkeys)
+
+        class Exc(Exception): pass
+
+        class baddict1(dict):
+            def __init__(self):
+                raise Exc()
+
+        self.assertRaises(Exc, baddict1.fromkeys, [1])
+
+        class BadSeq(object):
+            def __iter__(self):
+                return self
+            def next(self):
+                raise Exc()
+
+        self.assertRaises(Exc, dict.fromkeys, BadSeq())
+
+        class baddict2(dict):
+            def __setitem__(self, key, value):
+                raise Exc()
+
+        self.assertRaises(Exc, baddict2.fromkeys, [1])
+
+    def test_copy(self):
+        d = {1:1, 2:2, 3:3}
+        self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
+        self.assertEqual({}.copy(), {})
+        self.assertRaises(TypeError, d.copy, None)
+
+    def test_get(self):
+        d = {}
+        self.assert_(d.get('c') is None)
+        self.assertEqual(d.get('c', 3), 3)
+        d = {'a' : 1, 'b' : 2}
+        self.assert_(d.get('c') is None)
+        self.assertEqual(d.get('c', 3), 3)
+        self.assertEqual(d.get('a'), 1)
+        self.assertEqual(d.get('a', 3), 1)
+        self.assertRaises(TypeError, d.get)
+        self.assertRaises(TypeError, d.get, None, None, None)
+
+    def test_setdefault(self):
+        # dict.setdefault()
+        d = {}
+        self.assert_(d.setdefault('key0') is None)
+        d.setdefault('key0', [])
+        self.assert_(d.setdefault('key0') is None)
+        d.setdefault('key', []).append(3)
+        self.assertEqual(d['key'][0], 3)
+        d.setdefault('key', []).append(4)
+        self.assertEqual(len(d['key']), 2)
+        self.assertRaises(TypeError, d.setdefault)
+
+        class Exc(Exception): pass
+
+        class BadHash(object):
+            fail = False
+            def __hash__(self):
+                if self.fail:
+                    raise Exc()
+                else:
+                    return 42
+
+        x = BadHash()
+        d[x] = 42
+        x.fail = True
+        self.assertRaises(Exc, d.setdefault, x, [])
+
+    def test_popitem(self):
+        # dict.popitem()
+        for copymode in -1, +1:
+            # -1: b has same structure as a
+            # +1: b is a.copy()
+            for log2size in range(12):
+                size = 2**log2size
+                a = {}
+                b = {}
+                for i in range(size):
+                    a[repr(i)] = i
+                    if copymode < 0:
+                        b[repr(i)] = i
+                if copymode > 0:
+                    b = a.copy()
+                for i in range(size):
+                    ka, va = ta = a.popitem()
+                    self.assertEqual(va, int(ka))
+                    kb, vb = tb = b.popitem()
+                    self.assertEqual(vb, int(kb))
+                    if test_support.check_impl_detail():
+                        self.assert_(not(copymode < 0 and ta != tb))
+                self.assert_(not a)
+                self.assert_(not b)
+
+        d = {}
+        self.assertRaises(KeyError, d.popitem)
+
+    def test_pop(self):
+        # Tests for pop with specified key
+        d = {}
+        k, v = 'abc', 'def'
+        d[k] = v
+        self.assertRaises(KeyError, d.pop, 'ghi')
+
+        self.assertEqual(d.pop(k), v)
+        self.assertEqual(len(d), 0)
+
+        self.assertRaises(KeyError, d.pop, k)
+
+        # verify longs/ints get same value when key > 32 bits (for 64-bit archs)
+        # see SF bug #689659
+        x = 4503599627370496L
+        y = 4503599627370496
+        h = {x: 'anything', y: 'something else'}
+        self.assertEqual(h[x], h[y])
+
+        self.assertEqual(d.pop(k, v), v)
+        d[k] = v
+        self.assertEqual(d.pop(k, 1), v)
+
+        self.assertRaises(TypeError, d.pop)
+
+        class Exc(Exception): pass
+
+        class BadHash(object):
+            fail = False
+            def __hash__(self):
+                if self.fail:
+                    raise Exc()
+                else:
+                    return 42
+
+        x = BadHash()
+        d[x] = 42
+        x.fail = True
+        self.assertRaises(Exc, d.pop, x)
+
+    def test_mutatingiteration(self):
+        d = {}
+        d[1] = 1
+        try:
+            for i in d:
+                d[i+1] = 1
+        except RuntimeError:
+            pass
+        else:
+            self.fail("changing dict size during iteration doesn't raise Error")
+
+    def test_repr(self):
+        d = {}
+        self.assertEqual(repr(d), '{}')
+        d[1] = 2
+        self.assertEqual(repr(d), '{1: 2}')
+        d = {}
+        d[1] = d
+        self.assertEqual(repr(d), '{1: {...}}')
+
+        class Exc(Exception): pass
+
+        class BadRepr(object):
+            def __repr__(self):
+                raise Exc()
+
+        d = {1: BadRepr()}
+        self.assertRaises(Exc, repr, d)
+
+    def test_le(self):
+        self.assert_(not ({} < {}))
+        self.assert_(not ({1: 2} < {1L: 2L}))
+
+        class Exc(Exception): pass
+
+        class BadCmp(object):
+            def __eq__(self, other):
+                raise Exc()
+
+        d1 = {BadCmp(): 1}
+        d2 = {1: 1}
+        try:
+            d1 < d2
+        except Exc:
+            pass
+        else:
+            self.fail("< didn't raise Exc")
+
+    def test_missing(self):
+        # Make sure dict doesn't have a __missing__ method
+        self.assertEqual(hasattr(dict, "__missing__"), False)
+        self.assertEqual(hasattr({}, "__missing__"), False)
+        # Test several cases:
+        # (D) subclass defines __missing__ method returning a value
+        # (E) subclass defines __missing__ method raising RuntimeError
+        # (F) subclass sets __missing__ instance variable (no effect)
+        # (G) subclass doesn't define __missing__ at a all
+        class D(dict):
+            def __missing__(self, key):
+                return 42
+        d = D({1: 2, 3: 4})
+        self.assertEqual(d[1], 2)
+        self.assertEqual(d[3], 4)
+        self.assert_(2 not in d)
+        self.assert_(2 not in d.keys())
+        self.assertEqual(d[2], 42)
+        class E(dict):
+            def __missing__(self, key):
+                raise RuntimeError(key)
+        e = E()
+        try:
+            e[42]
+        except RuntimeError, err:
+            self.assertEqual(err.args, (42,))
+        else:
+            self.fail("e[42] didn't raise RuntimeError")
+        class F(dict):
+            def __init__(self):
+                # An instance variable __missing__ should have no effect
+                self.__missing__ = lambda key: None
+        f = F()
+        try:
+            f[42]
+        except KeyError, err:
+            self.assertEqual(err.args, (42,))
+        else:
+            self.fail("f[42] didn't raise KeyError")
+        class G(dict):
+            pass
+        g = G()
+        try:
+            g[42]
+        except KeyError, err:
+            self.assertEqual(err.args, (42,))
+        else:
+            self.fail("g[42] didn't raise KeyError")
+
+    def test_tuple_keyerror(self):
+        # SF #1576657
+        d = {}
+        try:
+            d[(1,)]
+        except KeyError, e:
+            self.assertEqual(e.args, ((1,),))
+        else:
+            self.fail("missing KeyError")
+
+
+from test import mapping_tests
+
+class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
+    type2test = dict
+
+class Dict(dict):
+    pass
+
+class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
+    type2test = Dict
+
+def test_main():
+    test_support.run_unittest(
+        DictTest,
+        GeneralMappingTests,
+        SubclassMappingTests,
+    )
+
+if __name__ == "__main__":
+    test_main()

File lib_pypy/ctypes_config_cache/dumpcache.py

+import os
+from ctypes_configure import dumpcache
+from pypy.jit.backend import detect_cpu
+
+def dumpcache2(basename, config):
+    model = detect_cpu.autodetect_main_model_and_size()
+    filename = '_%s_%s_.py' % (basename, model)
+    dumpcache.dumpcache(__file__, filename, config)
+    #
+    filename = os.path.join(os.path.dirname(__file__),
+                            '_%s_cache.py' % (basename,))
+    g = open(filename, 'w')
+    print >> g, '''\
+try:
+    from __pypy__ import cpumodel
+except ImportError:
+    from pypy.jit.backend import detect_cpu
+    cpumodel = detect_cpu.autodetect_main_model_and_size()
+mod = __import__("ctypes_config_cache._%s_%%s_" %% (cpumodel,),
+                 None, None, ["*"])
+globals().update(mod.__dict__)\
+''' % (basename,)
+    g.close()

File lib_pypy/ctypes_config_cache/locale.ctc.py

 
 from ctypes_configure.configure import (configure, ExternalCompilationInfo,
     ConstantInteger, DefinedConstantInteger, SimpleType, check_eci)
-from ctypes_configure.dumpcache import dumpcache
+import dumpcache
 
 # ____________________________________________________________
 
 
 config['ALL_CONSTANTS'] = tuple(_CONSTANTS)
 config['HAS_LANGINFO'] = HAS_LANGINFO
-dumpcache(__file__, '_locale_cache.py', config)
+dumpcache.dumpcache2('locale', config)

File lib_pypy/ctypes_config_cache/pyexpat.ctc.py

 
 import ctypes
 from ctypes import c_char_p, c_int, c_void_p, c_char
-from ctypes_configure import configure, dumpcache
+from ctypes_configure import configure
+import dumpcache
 
 
 class CConfigure:
 
 config = configure.configure(CConfigure)
 
-dumpcache.dumpcache(__file__, '_pyexpat_cache.py', config)
+dumpcache.dumpcache2('pyexpat', config)

File lib_pypy/ctypes_config_cache/rebuild.py

         sys.path[:] = path
 
 def try_rebuild():
+    from pypy.jit.backend import detect_cpu
+    model = detect_cpu.autodetect_main_model_and_size()
+    # remove the files '_*_model_.py'
+    left = {}
+    for p in os.listdir(_dirpath):
+        if p.startswith('_') and (p.endswith('_%s_.py' % model) or
+                                  p.endswith('_%s_.pyc' % model)):
+            os.unlink(os.path.join(_dirpath, p))
+        elif p.startswith('_') and (p.endswith('_.py') or
+                                    p.endswith('_.pyc')):
+            for i in range(2, len(p)-4):
+                left[p[:i]] = True
+    # remove the files '_*_cache.py' if there is no '_*_*_.py' left around
     for p in os.listdir(_dirpath):
         if p.startswith('_') and (p.endswith('_cache.py') or
                                   p.endswith('_cache.pyc')):
-            os.unlink(os.path.join(_dirpath, p))
+            if p[:-9] not in left:
+                os.unlink(os.path.join(_dirpath, p))
+    #
     for p in os.listdir(_dirpath):
         if p.endswith('.ctc.py'):
             try:

File lib_pypy/ctypes_config_cache/resource.ctc.py

 
 
 from ctypes import sizeof
-from ctypes_configure.dumpcache import dumpcache
+import dumpcache
 from ctypes_configure.configure import (configure,
     ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger,
     SimpleType)
         del config[key]
 
 config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants)
-dumpcache(__file__, '_resource_cache.py', config)
+dumpcache.dumpcache2('resource', config)

File lib_pypy/ctypes_config_cache/syslog.ctc.py

 
 from ctypes_configure.configure import (configure,
     ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger)
-from ctypes_configure.dumpcache import dumpcache
+import dumpcache
 
 
 _CONSTANTS = (
 all_constants = config.keys()
 all_constants.sort()
 config['ALL_CONSTANTS'] = tuple(all_constants)
-dumpcache(__file__, '_syslog_cache.py', config)
+dumpcache.dumpcache2('syslog', config)

File lib_pypy/ctypes_config_cache/test/test_cache.py

 
 def run(filename, outputname):
     filepath = dirpath.join(filename)
-    tmpdir = udir.ensure('testcache-' + filename, dir=True)
-    outputpath = tmpdir.join(outputname)
-    d = {'__file__': str(outputpath)}
+    tmpdir2 = udir.ensure('testcache-' + filename, dir=True)
+    tmpdir = tmpdir2.ensure('ctypes_config_cache', dir=True)
+    tmpdir.join('__init__.py').write('\n')
+    tmpdir.join('dumpcache.py').write(dirpath.join('dumpcache.py').read())
     path = sys.path[:]
     try:
-        sys.path.insert(0, str(dirpath))
-        execfile(str(filepath), d)
+        sys.path.insert(0, str(tmpdir))
+        execfile(str(filepath), {})
     finally:
         sys.path[:] = path
+        sys.modules.pop('dumpcache', None)
     #
+    outputpath = tmpdir.join(outputname)
     assert outputpath.check(exists=1)
     d = {}
-    execfile(str(outputpath), d)
+    try:
+        sys.path.insert(0, str(tmpdir2))
+        execfile(str(outputpath), d)
+    finally:
+        sys.path[:] = path
+        sys.modules.pop('ctypes_config_cache', None)
     return d
 
 

File lib_pypy/pypy_test/test_ctypes_support.py

     assert get_errno() == 0
 
 def test_argument_conversion_and_checks():
-    import ctypes
-    libc = ctypes.cdll.LoadLibrary("libc.so.6")
-    libc.strlen.argtypes = ctypes.c_char_p,
-    libc.strlen.restype = ctypes.c_size_t
-    assert libc.strlen("eggs") == 4
-    
+    strlen = standard_c_lib.strlen
+    strlen.argtypes = [c_char_p]
+    strlen.restype = c_size_t
+    assert strlen("eggs") == 4
+
     # Should raise ArgumentError, not segfault
-    py.test.raises(ctypes.ArgumentError, libc.strlen, False)
+    py.test.raises(ArgumentError, strlen, False)
 

File pypy/annotation/bookkeeper.py

 from pypy.rpython import extregistry
 from pypy.tool.identity_dict import identity_dict
 
-class Stats:
+class Stats(object):
 
     def __init__(self, bookkeeper):
         self.bookkeeper = bookkeeper
     def consider_dict_delitem(self, dic):
         return dic
 
-class Bookkeeper:
+class Bookkeeper(object):
     """The log of choices that have been made while analysing the operations.
     It ensures that the same 'choice objects' will be returned if we ask
     again during reflowing.  Like ExecutionContext, there is an implicit
         return True
 
 # for parsing call arguments
-class RPythonCallsSpace:
+class RPythonCallsSpace(object):
     """Pseudo Object Space providing almost no real operation.
     For the Arguments class: if it really needs other operations, it means
     that the call pattern is too complex for R-Python.

File pypy/annotation/classdef.py

 #        same name in all subclasses of A, if any.  (Parent class attributes can
 #        be visible in reads from instances of subclasses.)
 
-class Attribute:
+class Attribute(object):
     # readonly-ness
     # SomeThing-ness
     # NB.  an attribute is readonly if it is a constant class attribute.
 
 # ____________________________________________________________
 
-class InstanceSource:
+class InstanceSource(object):
     instance_level = True
 
     def __init__(self, bookkeeper, obj):

File pypy/annotation/description.py

 from pypy.tool.sourcetools import valid_identifier
 from pypy.tool.pairtype import extendabletype
 
-class CallFamily:
+class CallFamily(object):
     """A family of Desc objects that could be called from common call sites.
     The call families are conceptually a partition of all (callable) Desc
     objects, where the equivalence relation is the transitive closure of
             self.total_calltable_size += 1
 
 
-class FrozenAttrFamily:
+class FrozenAttrFamily(object):
     """A family of FrozenDesc objects that have any common 'getattr' sites.
     The attr families are conceptually a partition of FrozenDesc objects,
     where the equivalence relation is the transitive closure of:
         self.attrs[attrname] = s_value
 
 
-class ClassAttrFamily:
+class ClassAttrFamily(object):
     """A family of ClassDesc objects that have common 'getattr' sites for a
     given attribute name.  The attr families are conceptually a partition
     of ClassDesc objects, where the equivalence relation is the transitive

File pypy/annotation/dictdef.py

             dictdef.dictvalue = self
 
 
-class DictDef:
+class DictDef(object):
     """A dict definition remembers how general the keys and values in that
     particular dict have to be.  Every dict creation makes a new DictDef,
     and the union of two dicts merges the DictKeys and DictValues that each

File pypy/annotation/listdef.py

 class TooLateForChange(Exception):
     pass
 
-class ListItem:
+class ListItem(object):
     mutated = False    # True for lists mutated after creation
     resized = False    # True for lists resized after creation
     range_step = None  # the step -- only for lists only created by a range()
         return updated
 
 
-class ListDef:
+class ListDef(object):
     """A list definition remembers how general the items in that particular
     list have to be.  Every list creation makes a new ListDef, and the union
     of two lists merges the ListItems that each ListDef stores."""

File pypy/annotation/specialize.py

 # ____________________________________________________________________________
 # specializations
 
-class MemoTable:
+class MemoTable(object):
     def __init__(self, funcdesc, args, value):
         self.funcdesc = funcdesc
         self.table = {args: value}

File pypy/annotation/unaryop.py

     def method_clear(dct):
         pass
 
+    def method_popitem(dct):
+        return dct.getanyitem('items')
+
     def _can_only_throw(dic, *ignore):
         if dic1.dictdef.dictkey.custom_eq_hash:
             return None    # r_dict: can throw anything

File pypy/config/pypyoption.py

 working_modules = default_modules.copy()
 working_modules.update(dict.fromkeys(
     ["_socket", "unicodedata", "mmap", "fcntl",
-      "rctime" , "select", "zipimport", "_lsprof",
+     "rctime" , "select", "zipimport", "_lsprof",
      "crypt", "signal", "_rawffi", "termios", "zlib", "bz2",
      "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO",
      "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array",
-     "_multiprocessing", '_warnings']
+     "_bisect", "_multiprocessing", '_warnings']
 ))
 
 working_oo_modules = default_modules.copy()
                    requires=[("objspace.opcodes.CALL_LIKELY_BUILTIN", False),
                              ("objspace.honor__builtins__", False)]),
 
-        BoolOption("withsharingdict",
-                   "use dictionaries that share the keys part",
-                   default=False),
-
         BoolOption("withdictmeasurement",
                    "create huge files with masses of information "
                    "about dictionaries",
                    default=False),
 
-        BoolOption("withinlineddict",
-                   "make instances more compact by revoming a level of indirection",
-                   default=False,
-                   requires=[("objspace.std.withshadowtracking", False)]),
-
         BoolOption("withmapdict",
                    "make instances really small but slow without the JIT",
                    default=False,
-                   requires=[("objspace.std.withshadowtracking", False),
-                             ("objspace.std.withinlineddict", False),
-                             ("objspace.std.withsharingdict", False),
-                             ("objspace.std.getattributeshortcut", True),
+                   requires=[("objspace.std.getattributeshortcut", True),
                              ("objspace.std.withtypeversion", True),
                        ]),
 
                    # weakrefs needed, because of get_subclasses()
                    requires=[("translation.rweakref", True)]),
 
-        BoolOption("withshadowtracking",
-                   "track whether an instance attribute shadows a type"
-                   " attribute",
-                   default=False,
-                   requires=[("objspace.std.withtypeversion", True),
-                             ("translation.rweakref", True)]),
         BoolOption("withmethodcache",
                    "try to cache method lookups",
                    default=False,
         config.objspace.std.suggest(optimized_list_getitem=True)
         config.objspace.std.suggest(getattributeshortcut=True)
         config.objspace.std.suggest(newshortcut=True)        
-        if type_system != 'ootype':
-            config.objspace.std.suggest(withsharingdict=True)
-        config.objspace.std.suggest(withinlineddict=True)
 
     # extra costly optimizations only go in level 3
     if level == '3':
     # extra optimizations with the JIT
     if level == 'jit':
         config.objspace.std.suggest(withcelldict=True)
-        #config.objspace.std.suggest(withmapdict=True)
+        config.objspace.std.suggest(withmapdict=True)
 
 
 def enable_allworkingmodules(config):

File pypy/config/test/test_pypyoption.py

 def test_set_pypy_opt_level():
     conf = get_pypy_config()
     set_pypy_opt_level(conf, '2')
-    assert conf.objspace.std.withsharingdict
+    assert conf.objspace.std.newshortcut
     conf = get_pypy_config()
     set_pypy_opt_level(conf, '0')
     assert not conf.objspace.std.newshortcut
 
     assert not conf.objspace.std.withtypeversion
     assert not conf.objspace.std.withmethodcache
-    assert not conf.objspace.std.withshadowtracking
 
 def test_check_documentation():
     def check_file_exists(fn):

File pypy/doc/config/objspace.opcodes.CALL_METHOD.txt

 case.  So far, this only works for calls with no keyword, no ``*arg``
 and no ``**arg`` but it would be easy to extend.
 
-Gives the best results combined with :config:`objspace.std.withshadowtracking`.
-
 For more information, see the section in `Standard Interpreter Optimizations`_.
 
 .. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#lookup-method-call-method

File pypy/doc/config/objspace.std.withinlineddict.txt

-Make instances smaller by creating the __dict__ only when somebody actually
-accesses it. Also makes attribute accesses a tiny bit faster.

File pypy/doc/config/objspace.std.withshadowtracking.txt

-Enable "shadow tracking". This means a special dict representation is used
--- but only for instance
-dictionaries. The instance dictionary tracks whether an instance attribute
-shadows an attribute of its class. This makes method calls slightly faster in
-the following way: When calling a method the first thing that is checked is the
-class dictionary to find descriptors. Usually, when a method is found, the
-instance dictionary is then checked for instance attributes shadowing the class
-attribute. If we know that there is no shadowing (since our instance dict tells
-us that) we can save this lookup on the instance dictionary.

File pypy/doc/config/objspace.std.withsharingdict.txt

-Enable "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts
-

File pypy/doc/config/objspace.usemodules._bisect.txt

+Use the '_bisect' module.
+Used, optionally,  by the 'bisect' standard lib module. This module is expected to be working and is included by default.
+
+

File pypy/doc/getting-started.txt

 translation process - as opposed to encoding low level details into the
 language implementation itself. `more...`_
 
+
 .. _Python: http://docs.python.org/ref
 .. _`more...`: architecture.html
 

File pypy/doc/interpreter-optimizations.txt

 dicts:
 the representation of the instance dict contains only a list of values.
 
-You can enable this feature with the :config:`objspace.std.withsharingdict`
-option.
+A more advanced version of sharing dicts, called *map dicts,* is available
+with the :config:`objspace.std.withmapdict` option.
 
 Builtin-Shadowing
 +++++++++++++++++
 shadowing the class attribute. If we know that there is no shadowing (since
 instance dict tells us that) we can save this lookup on the instance dictionary.
 
-You can enable this feature with the :config:`objspace.std.withshadowtracking`
-option.
+*This was deprecated and is no longer available.*
 
 
 Method Caching

File pypy/interpreter/baseobjspace.py

             return space.finditem_str(w_dict, attr)
         return None
 
-    def getdictvalue_attr_is_in_class(self, space, attr):
-        return self.getdictvalue(space, attr)
-
-    def setdictvalue(self, space, attr, w_value, shadows_type=True):
+    def setdictvalue(self, space, attr, w_value):
         w_dict = self.getdict()
         if w_dict is not None:
-            space.setitem_str(w_dict, attr, w_value, shadows_type)
+            space.setitem_str(w_dict, attr, w_value)
             return True
         return False
 
 
         self.interned_strings = {}
         self.actionflag = ActionFlag()    # changed by the signal module
+        self.check_signal_action = None   # changed by the signal module
         self.user_del_action = UserDelAction(self)
         self.frame_trace_action = FrameTraceAction(self)
-        self.actionflag.register_action(self.user_del_action)
-        self.actionflag.register_action(self.frame_trace_action)
 
         from pypy.interpreter.pycode import cpython_magic, default_magic
         self.our_magic = default_magic
         """shortcut for space.int_w(space.hash(w_obj))"""
         return self.int_w(self.hash(w_obj))
 
-    def setitem_str(self, w_obj, key, w_value, shadows_type=True):
+    def setitem_str(self, w_obj, key, w_value):
         return self.setitem(w_obj, self.wrap(key), w_value)
 
     def finditem_str(self, w_obj, key):

File pypy/interpreter/executioncontext.py

 from pypy.rlib.unroll import unrolling_iterable
 from pypy.rlib import jit
 
+TICK_COUNTER_STEP = 100
+
 def app_profile_call(space, w_callable, frame, event, w_arg):
     space.call_function(w_callable,
                         space.wrap(frame),
     # XXX   self.w_tracefunc, self.profilefunc
     # XXX   frame.is_being_profiled
 
+    # XXX [fijal] but they're not. is_being_profiled is guarded a bit all
+    #     over the place as well as w_tracefunc
+
     def __init__(self, space):
         self.space = space
         self.topframeref = jit.vref_None
         if self.w_tracefunc is not None:
             self._trace(frame, 'return', w_retval)
 
-    def bytecode_trace(self, frame):
+    def bytecode_trace(self, frame, decr_by=TICK_COUNTER_STEP):
         "Trace function called before each bytecode."
         # this is split into a fast path and a slower path that is
         # not invoked every time bytecode_trace() is.
         actionflag = self.space.actionflag
-        ticker = actionflag.get()
-        if actionflag.has_bytecode_counter:    # this "if" is constant-folded
-            ticker += 1
-            actionflag.set(ticker)
-        if ticker & actionflag.interesting_bits:  # fast check
+        if actionflag.decrement_ticker(decr_by) < 0:
             actionflag.action_dispatcher(self, frame)     # slow path
     bytecode_trace._always_inline_ = True
 
     def bytecode_trace_after_exception(self, frame):
         "Like bytecode_trace(), but without increasing the ticker."
         actionflag = self.space.actionflag
-        ticker = actionflag.get()
-        if ticker & actionflag.interesting_bits:  # fast check
+        if actionflag.get_ticker() < 0:
             actionflag.action_dispatcher(self, frame)     # slow path
     bytecode_trace_after_exception._always_inline_ = True
 
                 frame.last_exception = last_exception
                 self.is_tracing -= 1
 
+    def checksignals(self):
+        """Similar to PyErr_CheckSignals().  If called in the main thread,
+        and if signals are pending for the process, deliver them now
+        (i.e. call the signal handlers)."""
+        if self.space.check_signal_action is not None:
+            self.space.check_signal_action.perform(self, None)
+
     def _freeze_(self):
         raise Exception("ExecutionContext instances should not be seen during"
                         " translation.  Now is a good time to inspect the"
                         " traceback and see where this one comes from :-)")
 
 
-class AbstractActionFlag:
-    """This holds the global 'action flag'.  It is a single bitfield
-    integer, with bits corresponding to AsyncAction objects that need to
-    be immediately triggered.  The correspondance from bits to
-    AsyncAction instances is built at translation time.  We can quickly
-    check if there is anything at all to do by checking if any of the
-    relevant bits is set.  If threads are enabled, they consume the 20
-    lower bits to hold a counter incremented at each bytecode, to know
-    when to release the GIL.
+class AbstractActionFlag(object):
+    """This holds in an integer the 'ticker'.  If threads are enabled,
+    it is decremented at each bytecode; when it reaches zero, we release
+    the GIL.  And whether we have threads or not, it is forced to zero
+    whenever we fire any of the asynchronous actions.
     """
     def __init__(self):
         self._periodic_actions = []
         self._nonperiodic_actions = []
-        self.unused_bits = self.FREE_BITS[:]
         self.has_bytecode_counter = False
-        self.interesting_bits = 0
+        self.fired_actions = None
+        self.checkinterval_scaled = 100 * TICK_COUNTER_STEP
         self._rebuild_action_dispatcher()
 
     def fire(self, action):
-        """Request for the action to be run before the next opcode.
-        The action must have been registered at space initalization time."""
-        ticker = self.get()
-        self.set(ticker | action.bitmask)
+        """Request for the action to be run before the next opcode."""
+        if not action._fired:
+            action._fired = True
+            if self.fired_actions is None:
+                self.fired_actions = []
+            self.fired_actions.append(action)
+            # set the ticker to -1 in order to force action_dispatcher()
+            # to run at the next possible bytecode
+            self.reset_ticker(-1)
 
-    def register_action(self, action):
-        "NOT_RPYTHON"
-        assert isinstance(action, AsyncAction)
-        if action.bitmask == 0:
-            while True:
-                action.bitmask = self.unused_bits.pop(0)
-                if not (action.bitmask & self.interesting_bits):
-                    break
-        self.interesting_bits |= action.bitmask
-        if action.bitmask & self.BYTECODE_COUNTER_OVERFLOW_BIT:
-            assert action.bitmask == self.BYTECODE_COUNTER_OVERFLOW_BIT
-            self._periodic_actions.append(action)
+    def register_periodic_action(self, action, use_bytecode_counter):
+        """NOT_RPYTHON:
+        Register the PeriodicAsyncAction action to be called whenever the
+        tick counter becomes smaller than 0.  If 'use_bytecode_counter' is
+        True, make sure that we decrease the tick counter at every bytecode.
+        This is needed for threads.  Note that 'use_bytecode_counter' can be
+        False for signal handling, because whenever the process receives a
+        signal, the tick counter is set to -1 by C code in signals.h.
+        """
+        assert isinstance(action, PeriodicAsyncAction)
+        self._periodic_actions.append(action)
+        if use_bytecode_counter:
             self.has_bytecode_counter = True
-            self.force_tick_counter()
-        else:
-            self._nonperiodic_actions.append((action, action.bitmask))
         self._rebuild_action_dispatcher()
 
-    def setcheckinterval(self, space, interval):
-        if interval < self.CHECK_INTERVAL_MIN:
-            interval = self.CHECK_INTERVAL_MIN
-        elif interval > self.CHECK_INTERVAL_MAX:
-            interval = self.CHECK_INTERVAL_MAX
-        space.sys.checkinterval = interval
-        self.force_tick_counter()
+    def getcheckinterval(self):
+        return self.checkinterval_scaled // TICK_COUNTER_STEP
 
-    def force_tick_counter(self):
-        # force the tick counter to a valid value -- this actually forces
-        # it to reach BYTECODE_COUNTER_OVERFLOW_BIT at the next opcode.
-        ticker = self.get()
-        ticker &= ~ self.BYTECODE_COUNTER_OVERFLOW_BIT
-        ticker |= self.BYTECODE_COUNTER_MASK
-        self.set(ticker)
+    def setcheckinterval(self, interval):
+        MAX = sys.maxint // TICK_COUNTER_STEP
+        if interval < 1:
+            interval = 1
+        elif interval > MAX:
+            interval = MAX
+        self.checkinterval_scaled = interval * TICK_COUNTER_STEP
 
     def _rebuild_action_dispatcher(self):
         periodic_actions = unrolling_iterable(self._periodic_actions)
-        nonperiodic_actions = unrolling_iterable(self._nonperiodic_actions)
-        has_bytecode_counter = self.has_bytecode_counter
 
         @jit.dont_look_inside
         def action_dispatcher(ec, frame):
-            # periodic actions
-            if has_bytecode_counter:
-                ticker = self.get()
-                if ticker & self.BYTECODE_COUNTER_OVERFLOW_BIT:
-                    # We must run the periodic actions now, but first
-                    # reset the bytecode counter (the following line
-                    # works by assuming that we just overflowed the
-                    # counter, i.e. BYTECODE_COUNTER_OVERFLOW_BIT is
-                    # set but none of the BYTECODE_COUNTER_MASK bits
-                    # are).
-                    ticker -= ec.space.sys.checkinterval
-                    self.set(ticker)
-                    for action in periodic_actions:
-                        action.perform(ec, frame)
+            # periodic actions (first reset the bytecode counter)
+            self.reset_ticker(self.checkinterval_scaled)
+            for action in periodic_actions:
+                action.perform(ec, frame)
 
             # nonperiodic actions
-            for action, bitmask in nonperiodic_actions:
-                ticker = self.get()
-                if ticker & bitmask:
-                    self.set(ticker & ~ bitmask)
+            list = self.fired_actions
+            if list is not None:
+                self.fired_actions = None
+                for action in list:
+                    action._fired = False
                     action.perform(ec, frame)
 
         action_dispatcher._dont_inline_ = True
         self.action_dispatcher = action_dispatcher
 
-    # Bits reserved for the bytecode counter, if used
-    BYTECODE_COUNTER_MASK = (1 << 20) - 1
-    BYTECODE_COUNTER_OVERFLOW_BIT = (1 << 20)
-
-    # Free bits
-    FREE_BITS = [1 << _b for _b in range(21, LONG_BIT-1)]
-
-    # The acceptable range of values for sys.checkinterval, so that
-    # the bytecode_counter fits in 20 bits
-    CHECK_INTERVAL_MIN = 1
-    CHECK_INTERVAL_MAX = BYTECODE_COUNTER_OVERFLOW_BIT
-
 
 class ActionFlag(AbstractActionFlag):
     """The normal class for space.actionflag.  The signal module provides
     a different one."""
-    _flags = 0
+    _ticker = 0
 
-    def get(self):
-        return self._flags
+    def get_ticker(self):
+        return self._ticker
 
-    def set(self, value):
-        self._flags = value
+    def reset_ticker(self, value):
+        self._ticker = value
+
+    def decrement_ticker(self, by):
+        value = self._ticker
+        if self.has_bytecode_counter:    # this 'if' is constant-folded
+            value -= by
+            self._ticker = value
+        return value
 
 
 class AsyncAction(object):
     asynchronously with regular bytecode execution, but that still need
     to occur between two opcodes, not at a completely random time.
     """
-    bitmask = 0      # means 'please choose one bit automatically'
+    _fired = False
 
     def __init__(self, space):
         self.space = space
     def fire_after_thread_switch(self):
         """Bit of a hack: fire() the action but only the next time the GIL
         is released and re-acquired (i.e. after a potential thread switch).
-        Don't call this if threads are not enabled.
+        Don't call this if threads are not enabled.  Currently limited to
+        one action (i.e. reserved for CheckSignalAction from module/signal).
         """
         from pypy.module.thread.gil import spacestate
-        spacestate.set_actionflag_bit_after_thread_switch |= self.bitmask
+        spacestate.action_after_thread_switch = self
 
     def perform(self, executioncontext, frame):
         """To be overridden."""
     """Abstract base class for actions that occur automatically
     every sys.checkinterval bytecodes.
     """
-    bitmask = ActionFlag.BYTECODE_COUNTER_OVERFLOW_BIT
 
 
 class UserDelAction(AsyncAction):

File pypy/interpreter/gateway.py

 # internal non-translatable parts: 
 import py
 
-class SignatureBuilder:
+class SignatureBuilder(object):
     "NOT_RPYTHON"
     def __init__(self, func=None, argnames=None, varargname=None,
                  kwargname=None, name = None):
 
 #________________________________________________________________
 
-class UnwrapSpecRecipe:
+class UnwrapSpecRecipe(object):
     "NOT_RPYTHON"
 
     bases_order = [Wrappable, W_Root, ObjSpace, Arguments, object]

File pypy/interpreter/pyopcode.py

     state_pack_variables = staticmethod(state_pack_variables)
 
 
-class FrameBlock:
+class FrameBlock(object):
 
     """Abstract base class for frame blocks from the blockstack,
     used by the SETUP_XXX and POP_BLOCK opcodes."""

File pypy/interpreter/test/test_executioncontext.py

 
         space = self.space
         a1 = DemoAction(space)
-        space.actionflag.register_action(a1)
         for i in range(20):
             # assert does not raise:
             space.appexec([], """():
 
         space = self.space
         a2 = DemoAction(space)
-        space.actionflag.register_action(a2)
+        space.actionflag.register_periodic_action(a2, True)
         try:
             for i in range(500):
                 space.appexec([], """():
                 """)
         except Finished:
             pass
-        assert space.sys.checkinterval / 10 < i < space.sys.checkinterval * 3
+        checkinterval = space.actionflag.getcheckinterval()
+        assert checkinterval / 10 < i < checkinterval * 1.1
 
     def test_llprofile(self):
         l = []

File pypy/interpreter/typedef.py

         from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin
         add(BaseMapdictObject)
         add(ObjectMixin)
+        body["user_overridden_class"] = True
         features = ()
 
     if "user" in features:     # generic feature needed by all subcls
                 return self.slots_w[index]
         add(Proto)
 
-    wantdict = "dict" in features
-    if wantdict and config.objspace.std.withinlineddict:
-        from pypy.objspace.std.objectobject import W_ObjectObject
-        from pypy.objspace.std.inlinedict import make_mixin
-        if supercls is W_ObjectObject:
-            Mixin = make_mixin(config)
-            add(Mixin)
-            wantdict = False
-
-    if wantdict:
+    if "dict" in features:
         base_user_setup = supercls.user_setup.im_func
         if "user_setup" in body:
             base_user_setup = body["user_setup"]
             def setclass(self, space, w_subtype):
                 # only used by descr_set___class__
                 self.w__class__ = w_subtype
-                if space.config.objspace.std.withshadowtracking:
-                    self.w__dict__.set_shadows_anything()
-
-            def getdictvalue_attr_is_in_class(self, space, name):
-                w_dict = self.w__dict__
-                if space.config.objspace.std.withshadowtracking:
-                    if not w_dict.shadows_anything():
-                        return None
-                return space.finditem_str(w_dict, name)
 
         add(Proto)
 

File pypy/jit/backend/detect_cpu.py

         mach = os.popen('uname -m', 'r').read().strip()
         if not mach:
             raise ProcessorAutodetectError, "cannot run 'uname -m'"
-    if mach == 'x86_64':
-        if sys.maxint == 2147483647:
-            mach = 'x86'     # it's a 64-bit processor but in 32-bits mode, maybe
-        else:
-            assert sys.maxint == 2 ** 63 - 1
     try:
         return {'i386': 'x86',
                 'i486': 'x86',
                 'i86pc': 'x86',    # Solaris/Intel
                 'x86':   'x86',    # Apple
                 'Power Macintosh': 'ppc',
-                'x86_64': 'x86_64', 
+                'x86_64': 'x86', 
                 }[mach]
     except KeyError:
-        raise ProcessorAutodetectError, "unsupported processor '%s'" % mach
+        return mach
+
+def autodetect_main_model_and_size():
+    model = autodetect_main_model()
+    if sys.maxint == 2**31-1:
+        model += '_32'
+    elif sys.maxint == 2**63-1:
+        model += '_64'
+    else:
+        raise AssertionError, "bad value for sys.maxint"
+    return model
 
 def autodetect():
     model = autodetect_main_model()
-    if model == 'x86':
-        from pypy.jit.backend.x86.detect_sse2 import detect_sse2
-        if not detect_sse2():
-            model = 'x86-without-sse2'
+    if sys.maxint == 2**63-1:
+        model += '_64'
+    else:
+        assert sys.maxint == 2**31-1
+        if model == 'x86':
+            from pypy.jit.backend.x86.detect_sse2 import detect_sse2
+            if not detect_sse2():
+                model = 'x86-without-sse2'
     return model
 
 def getcpuclassname(backend_name="auto"):

File pypy/jit/backend/llgraph/llimpl.py

     'unicodegetitem'  : (('ref', 'int'), 'int'),
     'unicodesetitem'  : (('ref', 'int', 'int'), 'int'),
     'cast_ptr_to_int' : (('ref',), 'int'),
-    'debug_merge_point': (('ref',), None),
+    'debug_merge_point': (('ref', 'int'), None),
     'force_token'     : ((), 'int'),
     'call_may_force'  : (('int', 'varargs'), 'intorptr'),
     'guard_not_forced': ((), None),
         #
         return _op_default_implementation
 
-    def op_debug_merge_point(self, _, value):
+    def op_debug_merge_point(self, _, value, recdepth):
         from pypy.jit.metainterp.warmspot import get_stats
         loc = ConstPtr(value)._get_str()
         get_stats().add_merge_point_location(loc)

File pypy/jit/backend/llgraph/runner.py

             self._descrs[key] = descr
             return descr
 
-    def compile_bridge(self, faildescr, inputargs, operations):
+    def compile_bridge(self, faildescr, inputargs, operations, log=True):
         c = llimpl.compile_start()
         self._compile_loop_or_bridge(c, inputargs, operations)
         old, oldindex = faildescr._compiled_fail
         llimpl.compile_redirect_fail(old, oldindex, c)
 
-    def compile_loop(self, inputargs, operations, loopdescr):
+    def compile_loop(self, inputargs, operations, loopdescr, log=True):
         """In a real assembler backend, this should assemble the given
         list of operations.  Here we just generate a similar CompiledLoop
         instance.  The code here is RPython, whereas the code in llimpl

File pypy/jit/backend/llsupport/test/test_gc.py

 
     def test_get_rid_of_debug_merge_point(self):
         operations = [
-            ResOperation(rop.DEBUG_MERGE_POINT, ['dummy'], None),
+            ResOperation(rop.DEBUG_MERGE_POINT, ['dummy', 2], None),
             ]
         gc_ll_descr = self.gc_ll_descr
         gc_ll_descr.rewrite_assembler(None, operations)

File pypy/jit/backend/model.py

         pass
 
 
-    def compile_loop(self, inputargs, operations, looptoken):
+    def compile_loop(self, inputargs, operations, looptoken, log=True):
         """Assemble the given loop.
         Extra attributes should be put in the LoopToken to
         point to the compiled loop in assembler.
         """
         raise NotImplementedError
 
-    def compile_bridge(self, faildescr, inputargs, operations):
+    def compile_bridge(self, faildescr, inputargs, operations, log=True):
         """Assemble the bridge.
         The FailDescr is the descr of the original guard that failed.
         """

File pypy/jit/backend/x86/assembler.py

         self.mc.RET()
         self.mc.done()
 
-    def assemble_loop(self, inputargs, operations, looptoken):
+    def assemble_loop(self, inputargs, operations, looptoken, log):
         """adds the following attributes to looptoken:
                _x86_loop_code       (an integer giving an address)
                _x86_bootstrap_code  (an integer giving an address)
                _x86_frame_depth
                _x86_param_depth
                _x86_arglocs
+               _x86_debug_checksum
         """
         if not we_are_translated():
             # Arguments should be unique
 
         self.setup()
         funcname = self._find_debug_merge_point(operations)
-
+        if log:
+            self._register_counter()
+            operations = self._inject_debugging_code(looptoken, operations)
         
         regalloc = RegAlloc(self, self.cpu.translate_support_code)
-        operations = self._inject_debugging_code(operations)
         arglocs = regalloc.prepare_loop(inputargs, operations, looptoken)
         looptoken._x86_arglocs = arglocs
 
         self._assemble_bootstrap_direct_call(arglocs, curadr,
                                              frame_depth+param_depth)
         #
-        debug_print("Loop #", looptoken.number, "has address",
-                    looptoken._x86_loop_code, "to", self.mc.tell())
+        debug_print("Loop #%d has address %x to %x" % (looptoken.number,
+                                                       looptoken._x86_loop_code,
+                                                       self.mc.tell()))
         self.mc.end_function()
         self.write_pending_failure_recoveries()
         
-    def assemble_bridge(self, faildescr, inputargs, operations):
+    def assemble_bridge(self, faildescr, inputargs, operations, log):
         if not we_are_translated():
             # Arguments should be unique
             assert len(set(inputargs)) == len(inputargs)
 
         self.setup()
         funcname = self._find_debug_merge_point(operations)
+        if log:
+            self._register_counter()
+            operations = self._inject_debugging_code(faildescr, operations)
 
         arglocs = self.rebuild_faillocs_from_descr(
             faildescr._x86_failure_recovery_bytecode)
             assert ([loc.assembler() for loc in arglocs] ==
                     [loc.assembler() for loc in faildescr._x86_debug_faillocs])
         regalloc = RegAlloc(self, self.cpu.translate_support_code)
-        operations = self._inject_debugging_code(operations)
         fail_depths = faildescr._x86_current_depths
         regalloc.prepare_bridge(fail_depths, inputargs, arglocs,
                                 operations)
             faildescr._x86_bridge_param_depth = param_depth
         # patch the jump from original guard
         self.patch_jump_for_descr(faildescr, adr_bridge)
-        debug_print("Bridge out of guard",
-                    descr_number,
-                    "has address", adr_bridge, "to", self.mc.tell())
+        debug_print("Bridge out of guard %d has address %x to %x" %
+                    (descr_number, adr_bridge, self.mc.tell()))
         self.mc.end_function()
         self.write_pending_failure_recoveries()
 
         else:
             funcname = "<loop %d>" % len(self.loop_run_counters)
         # invent the counter, so we don't get too confused
+        return funcname
+
+    def _register_counter(self):
         if self._debug:
             struct = lltype.malloc(DEBUG_COUNTER, flavor='raw',
                                    track_allocation=False)   # known to leak
             struct.i = 0
             self.loop_run_counters.append((len(self.loop_run_counters), struct))
-        return funcname
         
     def patch_jump_for_descr(self, faildescr, adr_new_target):
         adr_jump_offset = faildescr._x86_adr_jump_offset
 
         mc.done()
 
-    def _inject_debugging_code(self, operations):
+    @specialize.argtype(1)
+    def _inject_debugging_code(self, looptoken, operations):
         if self._debug:
             # before doing anything, let's increase a counter
+            s = 0
+            for op in operations:
+                s += op.getopnum()
+            looptoken._x86_debug_checksum = s
             c_adr = ConstInt(rffi.cast(lltype.Signed,
                                      self.loop_run_counters[-1][1]))
             box = BoxInt()
             dispatch_opnum = guard_opnum
         else:
             dispatch_opnum = op.getopnum()
-        res = genop_guard_list[dispatch_opnum](self, op, guard_op, guard_token,
-                                               arglocs, resloc)
-        faildescr._x86_adr_jump_offset = res
+        genop_guard_list[dispatch_opnum](self, op, guard_op, guard_token,
+                                         arglocs, resloc)
+        if not we_are_translated():
+            # must be added by the genop_guard_list[]()
+            assert hasattr(faildescr, '_x86_adr_jump_offset')
 
     def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc,
                                current_depths):
             if isinstance(op.getarg(0), Const):
                 self.mc.CMP(arglocs[1], arglocs[0])
                 if guard_opnum == rop.GUARD_FALSE:
-                    return self.implement_guard(guard_token, rev_cond)
+                    self.implement_guard(guard_token, rev_cond)
                 else:
-                    return self.implement_guard(guard_token, false_rev_cond)
+                    self.implement_guard(guard_token, false_rev_cond)
             else:
                 self.mc.CMP(arglocs[0], arglocs[1])
                 if guard_opnum == rop.GUARD_FALSE:
-                    return self.implement_guard(guard_token, cond)
+                    self.implement_guard(guard_token, cond)
                 else:
-                    return self.implement_guard(guard_token, false_cond)
+                    self.implement_guard(guard_token, false_cond)
         return genop_cmp_guard
 
     def _cmpop_guard_float(cond, false_cond, need_jp):
             if guard_opnum == rop.GUARD_FALSE:
                 if need_jp:
                     self.mc.J_il8(rx86.Conditions['P'], 6)
-                return self.implement_guard(guard_token, cond)
+                self.implement_guard(guard_token, cond)
             else:
                 if need_jp:
                     self.mc.J_il8(rx86.Conditions['P'], 2)
                     self.mc.J_il8(rx86.Conditions[cond], 5)
-                    return self.implement_guard(guard_token)
-                return self.implement_guard(guard_token, false_cond)
+                    self.implement_guard(guard_token)
+                else:
+                    self.implement_guard(guard_token, false_cond)
         return genop_cmp_guard_float
 
     def _emit_call(self, x, arglocs, start=0, tmp=eax):
         self.mc.ensure_bytes_available(16 + guard_token.recovery_stub_size())
         if guard_opnum == rop.GUARD_TRUE:
             self.mc.J_il8(rx86.Conditions['P'], 6)
-            return self.implement_guard(guard_token, 'E')
+            self.implement_guard(guard_token, 'E')
         else:
             self.mc.J_il8(rx86.Conditions['P'], 2)
             self.mc.J_il8(rx86.Conditions['E'], 5)
-            return self.implement_guard(guard_token)
+            self.implement_guard(guard_token)
 
     def genop_float_neg(self, op, arglocs, resloc):
         # Following what gcc does: res = x ^ 0x8000000000000000
         guard_opnum = guard_op.getopnum()
         self.mc.CMP(arglocs[0], imm0)
         if guard_opnum == rop.GUARD_TRUE:
-            return self.implement_guard(guard_token, 'Z')
+            self.implement_guard(guard_token, 'Z')
         else:
-            return self.implement_guard(guard_token, 'NZ')
+            self.implement_guard(guard_token, 'NZ')
 
     def genop_int_is_true(self, op, arglocs, resloc):
         self.mc.CMP(arglocs[0], imm0)
         guard_opnum = guard_op.getopnum()
         self.mc.CMP(arglocs[0], imm0)
         if guard_opnum == rop.GUARD_TRUE:
-            return self.implement_guard(guard_token, 'NZ')
+            self.implement_guard(guard_token, 'NZ')
         else:
-            return self.implement_guard(guard_token, 'Z')
+            self.implement_guard(guard_token, 'Z')
 
     def genop_int_is_zero(self, op, arglocs, resloc):
         self.mc.CMP(arglocs[0], imm0)
     def genop_guard_guard_true(self, ign_1, guard_op, guard_token, locs, ign_2):
         loc = locs[0]
         self.mc.TEST(loc, loc)
-        return self.implement_guard(guard_token, 'Z')
+        self.implement_guard(guard_token, 'Z')
     genop_guard_guard_nonnull = genop_guard_guard_true
 
     def genop_guard_guard_no_exception(self, ign_1, guard_op, guard_token,
                                        locs, ign_2):
         self.mc.CMP(heap(self.cpu.pos_exception()), imm0)
-        return self.implement_guard(guard_token, 'NZ')
+        self.implement_guard(guard_token, 'NZ')
 
     def genop_guard_guard_exception(self, ign_1, guard_op, guard_token,
                                     locs, resloc):
         loc1 = locs[1]
         self.mc.MOV(loc1, heap(self.cpu.pos_exception()))