Commits

Philip Jenvey committed 1d4f67c Merge

merge default

Comments (0)

Files changed (45)

README

-=====================================
-PyPy: Python in Python Implementation 
-=====================================
-
-Welcome to PyPy!
-
-PyPy is both an implementation of the Python programming language, and
-an extensive compiler framework for dynamic language implementations.
-You can build self-contained Python implementations which execute
-independently from CPython.
-
-The home page is:
-
-    http://pypy.org/
-
-The getting-started document will help guide you:
-
-    http://doc.pypy.org/en/latest/getting-started.html
-
-It will also point you to the rest of the documentation which is generated
-from files in the pypy/doc directory within the source repositories. Enjoy
-and send us feedback!
-
-    the pypy-dev team <pypy-dev@python.org>
+=====================================
+PyPy: Python in Python Implementation 
+=====================================
+
+Welcome to PyPy!
+
+PyPy is both an implementation of the Python programming language, and
+an extensive compiler framework for dynamic language implementations.
+You can build self-contained Python implementations which execute
+independently from CPython.
+
+The home page is:
+
+    http://pypy.org/
+
+The getting-started document will help guide you:
+
+    http://doc.pypy.org/en/latest/getting-started.html
+
+It will also point you to the rest of the documentation which is generated
+from files in the pypy/doc directory within the source repositories. Enjoy
+and send us feedback!
+
+    the pypy-dev team <pypy-dev@python.org>

lib_pypy/stackless.py

     def insert(self):
         if self.blocked:
             raise RuntimeError("You cannot run a blocked tasklet")
-            if not self.alive:
-                raise RuntimeError("You cannot run an unbound(dead) tasklet")
+        if not self.alive:
+            raise RuntimeError("You cannot run an unbound(dead) tasklet")
         _scheduler_append(self)
 
     def remove(self):

pypy/annotation/test/test_annrpython.py

         assert getcdef(snippet.H).about_attribute('attr') == (
                           a.bookkeeper.immutablevalue(1))
 
-    def DISABLED_test_knownkeysdict(self):
-        # disabled, SomeDict() is now a general {s_key: s_value} dict
-        a = self.RPythonAnnotator()
-        s = a.build_types(snippet.knownkeysdict, [int])
-        # result should be an integer
-        assert s.knowntype == int
-
     def test_generaldict(self):
         a = self.RPythonAnnotator()
         s = a.build_types(snippet.generaldict, [str, int, str, int])
         s = a.build_types(f, [str])
         assert isinstance(s, annmodel.SomeString)
 
+    def test_str_isalpha(self):
+        def f(s):
+            return s.isalpha()
+        a = self.RPythonAnnotator()
+        s = a.build_types(f, [str])
+        assert isinstance(s, annmodel.SomeBool)
+
     def test_simple_slicing(self):
         a = self.RPythonAnnotator()
         s = a.build_types(snippet.simple_slice, [somelist(annmodel.s_Int)])

pypy/annotation/unaryop.py

         return SomeString()
     method_encode.can_only_throw = [UnicodeEncodeError]
 
+
 class __extend__(SomeString):
+    def method_isdigit(chr):
+        return s_Bool
+
+    def method_isalpha(chr):
+        return s_Bool
+
     def method_upper(str):
         return SomeString()
 
     def method_isspace(chr):
         return s_Bool
 
-    def method_isdigit(chr):
-        return s_Bool
-
-    def method_isalpha(chr):
-        return s_Bool
-
     def method_isalnum(chr):
         return s_Bool
 

pypy/module/cpyext/src/pythread.c

 int
 PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
 {
-    return RPyThreadAcquireLock((struct RPyOpaqueThreadLock*)lock, waitflag);
+    return RPyThreadAcquireLock((struct RPyOpaque_ThreadLock*)lock, waitflag);
 }
 
 void
 PyThread_release_lock(PyThread_type_lock lock)
 {
-    RPyThreadReleaseLock((struct RPyOpaqueThreadLock*)lock);
+    RPyThreadReleaseLock((struct RPyOpaque_ThreadLock*)lock);
 }
 
 

pypy/module/fcntl/interp_fcntl.py

     return wrap_oserror(space, OSError(errno, funcname),
                         exception_name = 'w_IOError')
 
-def _check_flock_op(space, op):
-
-    if op == LOCK_UN:
-        l_type = F_UNLCK
-    elif op & LOCK_SH:
-        l_type = F_RDLCK
-    elif op & LOCK_EX:
-        l_type = F_WRLCK
-    else:
-        raise OperationError(space.w_ValueError,
-            space.wrap("unrecognized flock argument"))
-    l = lltype.malloc(_flock.TO, flavor='raw')
-    l.c_l_type = rffi.cast(rffi.SHORT, l_type)
-    return l
-
 @unwrap_spec(op=int, w_arg=WrappedDefault(0))
 def fcntl(space, w_fd, op, w_arg):
     """fcntl(fd, op, [arg])
     manual flock(3) for details.  (On some systems, this function is
     emulated using fcntl().)"""
 
-    fd = space.c_filedescriptor_w(w_fd)
-
     if has_flock:
+        fd = space.c_filedescriptor_w(w_fd)
+        op = rffi.cast(rffi.INT, op)        # C long => C int
         rv = c_flock(fd, op)
         if rv < 0:
             raise _get_error(space, "flock")
     else:
-        l = _check_flock_op(space, op)
-        rffi.setintfield(l, 'c_l_whence', 0)
-        rffi.setintfield(l, 'c_l_start', 0)
-        rffi.setintfield(l, 'c_l_len', 0)
-        op = [F_SETLKW, F_SETLK][int(bool(op & LOCK_NB))]
-        op = rffi.cast(rffi.INT, op)        # C long => C int
-        fcntl_flock(fd, op, l)
-        lltype.free(l, flavor='raw')
+        lockf(space, w_fd, op)
 
 @unwrap_spec(op=int, length=int, start=int, whence=int)
 def lockf(space, w_fd, op, length=0, start=0, whence=0):
 
     fd = space.c_filedescriptor_w(w_fd)
 
-    l = _check_flock_op(space, op)
-    if start:
+    if op == LOCK_UN:
+        l_type = F_UNLCK
+    elif op & LOCK_SH:
+        l_type = F_RDLCK
+    elif op & LOCK_EX:
+        l_type = F_WRLCK
+    else:
+        raise OperationError(space.w_ValueError,
+            space.wrap("unrecognized lock operation"))
+
+    op = [F_SETLKW, F_SETLK][int(bool(op & LOCK_NB))]
+    op = rffi.cast(rffi.INT, op)        # C long => C int
+
+    l = lltype.malloc(_flock.TO, flavor='raw')
+    try:
+        rffi.setintfield(l, 'c_l_type', l_type)
         rffi.setintfield(l, 'c_l_start', int(start))
-    else:
-        rffi.setintfield(l, 'c_l_start', 0)
-    if len:
         rffi.setintfield(l, 'c_l_len', int(length))
-    else:
-        rffi.setintfield(l, 'c_l_len', 0)
-
-    l.c_l_whence = rffi.cast(rffi.SHORT, whence)
-
-    try:
-        op = [F_SETLKW, F_SETLK][int(bool(op & LOCK_NB))]
-        op = rffi.cast(rffi.INT, op)        # C long => C int
-        fcntl_flock(fd, op, l)
+        rffi.setintfield(l, 'c_l_whence', int(whence))
+        rv = fcntl_flock(fd, op, l)
+        if rv < 0:
+            raise _get_error(space, "fcntl")
     finally:
         lltype.free(l, flavor='raw')
 

pypy/module/fcntl/test/test_fcntl.py

             os.unlink(i)
 
 class AppTestFcntl:
-    spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios'))
+    spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', 'select', 'time'))
     def setup_class(cls):
         tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_'))
         cls.w_tmp = cls.space.wrap(tmpprefix)
 
     def test_flock(self):
         import fcntl
-        import sys
+        import os
+        import errno
 
         f = open(self.tmp + "c", "w+")
 
         raises(TypeError, fcntl.flock, "foo")
         raises(TypeError, fcntl.flock, f, "foo")
-        fcntl.flock(f, fcntl.LOCK_SH)
-        # this is an error EWOULDBLOCK, man: The file is locked and the
-        # LOCK_NB flag was selected.
-        raises(IOError, fcntl.flock, f, fcntl.LOCK_NB)
+
+        fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+        pid = os.fork()
+        if pid == 0:
+            rval = 2
+            try:
+                fcntl.flock(open(f.name, f.mode), fcntl.LOCK_EX | fcntl.LOCK_NB)
+            except IOError, e:
+                if e.errno not in (errno.EACCES, errno.EAGAIN):
+                    raise
+                rval = 0
+            else:
+                rval = 1
+            finally:
+                os._exit(rval)
+
+        assert pid > 0
+        (pid, status) = os.waitpid(pid, 0)
+        assert os.WIFEXITED(status) == True
+        assert os.WEXITSTATUS(status) == 0
+
         fcntl.flock(f, fcntl.LOCK_UN)
 
         f.close()
 
     def test_lockf(self):
         import fcntl
+        import os
+        import errno
 
         f = open(self.tmp + "d", "w+")
 
         raises(ValueError, fcntl.lockf, f, -256)
         raises(ValueError, fcntl.lockf, f, 256)
 
-        fcntl.lockf(f, fcntl.LOCK_SH)
+        fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+        pid = os.fork()
+        if pid == 0:
+            rval = 2
+            try:
+                fcntl.lockf(open(f.name, f.mode), fcntl.LOCK_EX | fcntl.LOCK_NB)
+            except IOError, e:
+                if e.errno not in (errno.EACCES, errno.EAGAIN):
+                    raise
+                rval = 0
+            else:
+                rval = 1
+            finally:
+                os._exit(rval)
+
+        assert pid > 0
+        (pid, status) = os.waitpid(pid, 0)
+        assert os.WIFEXITED(status) == True
+        assert os.WEXITSTATUS(status) == 0
+
         fcntl.lockf(f, fcntl.LOCK_UN)
 
         f.close()
     def test_ioctl(self):
         import fcntl
         import array
-        import sys, os
+        import os
+        import pty
+        import time
 
         try:
             from termios import TIOCGPGRP
-            import pty
         except ImportError:
             skip("don't know how to test ioctl() on this platform")
 
         child_pid, mfd = pty.fork()
         if child_pid == 0:
             # We're the child
+            time.sleep(1)
             return
         try:
             buf = array.array('i', [0])
     def test_ioctl_int(self):
         import os
         import fcntl
+        import pty
 
         try:
             from termios import TCFLSH, TCIOFLUSH
-            import pty
         except ImportError:
             skip("don't know how to test ioctl() on this platform")
 
             os.close(mfd)
             os.close(sfd)
 
-    def test_lockf_with_ex(self):
-        import fcntl
-        f = open(self.tmp, "w")
-        fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
-
     def test_large_flag(self):
         import sys
         if any(plat in sys.platform

pypy/module/micronumpy/test/test_ufuncs.py

     def test_isnan_isinf(self):
         from _numpypy import isnan, isinf, float64, array
         assert isnan(float('nan'))
+        assert not isnan(3)
+        assert not isinf(3)
         assert isnan(float64(float('nan')))
         assert not isnan(3)
         assert isinf(float('inf'))
     def test_isposinf_isneginf(self):
         from _numpypy import isneginf, isposinf
         assert isposinf(float('inf'))
+        assert not isposinf(3)
+        assert not isneginf(3)
         assert not isposinf(float('-inf'))
         assert not isposinf(float('nan'))
         assert not isposinf(0)
             [True, True, True, True]).all()
         assert (isfinite([ninf, inf, -nan, nan]) ==
             [False, False, False, False]).all()
+        assert (isfinite([1, 2, 3]) == [True, True, True]).all()
 
         a = [complex(0, 0), complex(1e50, -1e-50), complex(inf, 0),
              complex(inf, inf), complex(inf, ninf), complex(0, inf),

pypy/module/micronumpy/types.py

             assert v == 0
             return 0
 
+    @raw_unary_op
+    def isfinite(self, v):
+        return True
+
+    @raw_unary_op
+    def isnan(self, v):
+        return False
+
+    @raw_unary_op
+    def isinf(self, v):
+        return False
+
+    @raw_unary_op
+    def isposinf(self, v):
+        return False
+
+    @raw_unary_op
+    def isneginf(self, v):
+        return False
+
     @simple_binary_op
     def bitwise_and(self, v1, v2):
         return v1 & v2

pypy/module/select/test/test_select.py

             try:
                 space.call_method(cls.w_sock, "bind", cls.w_sockaddress)
                 break
+            except OperationError, e:   # should get a "Permission denied"
+                if not e.match(space, space.getattr(w_socketmod, space.wrap("error"))):
+                    raise
+                print e
             except cls.w_sock_err, e:   # should get a "Permission denied"
                 print e
             else:

pypy/rlib/parsing/ebnfparse.py

 import py
+
 from pypy.rlib.parsing.parsing import PackratParser, Rule
-from pypy.rlib.parsing.tree import Nonterminal, Symbol, RPythonVisitor
+from pypy.rlib.parsing.tree import Nonterminal, RPythonVisitor
 from pypy.rlib.parsing.codebuilder import Codebuilder
 from pypy.rlib.parsing.regexparse import parse_regex
-import string
-from pypy.rlib.parsing.regex import *
+from pypy.rlib.parsing.regex import StringExpression
 from pypy.rlib.parsing.deterministic import DFA
 from pypy.rlib.parsing.lexer import Lexer, DummyLexer
 from pypy.rlib.objectmodel import we_are_translated
 
-set = py.builtin.set
 
 def make_ebnf_parser():
     NONTERMINALNAME = parse_regex("([a-z]|_)[a-z0-9_]*")
     assert len(s) == 1
     s = s[0]
     s.visit(visitor)
-    
+
     rules, changes = visitor.get_rules_and_changes()
     maker = TransformerMaker(rules, changes)
     ToAstVisitor = maker.make_transformer()
         s = parser.parse(tokens)
         if not we_are_translated():
             try:
-                if py.test.config.option.view: 
+                if py.test.config.option.view:
                     s.view()
             except AttributeError:
                 pass
-                
+
         return s
     return parse
 
             assert change == " " or change == newchange
             result.append((name, newchange))
         return result
-    
+
     def visit_decorated(self, node):
         expansions = node.children[0].visit(self)
         expansions, changes = zip(*expansions)
 
     def create_returning_code(self, expansion, subchange):
         assert len(expansion) == len(subchange)
-        children = []
         self.emit("children = []")
         for i, (symbol, c) in enumerate(zip(expansion, subchange)):
             if c == "[":
 def get_rpy_roots():
     "NOT_RPYTHON"
     # Return the 'roots' from the GC.
-    # This stub is not usable on top of CPython.
     # The gc typically returns a list that ends with a few NULL_GCREFs.
-    raise NotImplementedError
+    return [_GcRef(x) for x in gc.get_objects()]
 
 def get_rpy_referents(gcref):
     "NOT_RPYTHON"

pypy/rpython/lltypesystem/rffi.py

     # char* -> str
     # doesn't free char*
     def charp2str(cp):
-        b = builder_class()
+        size = 0
+        while cp[size] != lastchar:
+            size += 1
+        b = builder_class(size)
         i = 0
         while cp[i] != lastchar:
             b.append(cp[i])

pypy/rpython/rstr.py

         hop.exception_cannot_occur()
         return hop.gendirectcall(self.ll.ll_lower, v_str)
 
+    def rtype_method_isdigit(self, hop):
+        string_repr = hop.args_r[0].repr
+        [v_str] = hop.inputargs(string_repr)
+        hop.exception_cannot_occur()
+        return hop.gendirectcall(self.ll.ll_isdigit, v_str)
+
+    def rtype_method_isalpha(self, hop):
+        string_repr = hop.args_r[0].repr
+        [v_str] = hop.inputargs(string_repr)
+        hop.exception_cannot_occur()
+        return hop.gendirectcall(self.ll.ll_isalpha, v_str)
+
     def _list_length_items(self, hop, v_lst, LIST):
         """Return two Variables containing the length and items of a
         list. Need to be overriden because it is typesystem-specific."""
 class AbstractLLHelpers:
     __metaclass__ = StaticMethods
 
+    def ll_isdigit(s):
+        from pypy.rpython.annlowlevel import hlstr
+
+        s = hlstr(s)
+        if not s:
+            return False
+        for ch in s:
+            if not ch.isdigit():
+                return False
+        return True
+
+    def ll_isalpha(s):
+        from pypy.rpython.annlowlevel import hlstr
+
+        s = hlstr(s)
+        if not s:
+            return False
+        for ch in s:
+            if not ch.isalpha():
+                return False
+        return True
+
     def ll_char_isspace(ch):
         c = ord(ch)
         return c == 32 or (9 <= c <= 13)   # c in (9, 10, 11, 12, 13, 32)

pypy/rpython/test/test_rfloat.py

     def test_formatd_huge(self):
         skip('formatd is broken on ootype')
 
-    def test_string_to_float(self):
-        skip('string_to_float is broken on ootype')
+    def test_parts_to_float(self):
+        skip('parts_to_float is broken on ootype')

pypy/rpython/test/test_rstr.py

             res = self.interpret(fn, [ch])
             assert res == fn(ch)
 
+    def test_isdigit(self):
+        const = self.const
+
+        def fn(i):
+            consts = [const(''), const('anc'), const('abc123'), const('123')]
+            return consts[i].isdigit()
+        for i in xrange(3):
+            assert self.interpret(fn, [i]) == fn(i)
+
+    def test_str_isalpha(self):
+        const = self.const
+
+        def fn(i):
+            consts = [const(''), const('anc'), const('abc123')]
+            return consts[i].isalpha()
+        for i in xrange(3):
+            assert self.interpret(fn, [i]) == fn(i)
+
     def test_char_compare(self):
         const = self.const
         res = self.interpret(lambda c1, c2: c1 == c2,  [const('a'),

pypy/tool/algo/unionfind.py

-# This is a general algorithm used by the annotator.
+# This is a general algorithm used by the annotator, translator, and other code
 
 # union-find impl, a info object is attached to the roots
 
+
 class UnionFind(object):
-
     def __init__(self, info_factory=None):
         self.link_to_parent = {}
         self.weight = {}
     # mapping-like [] access
     def __getitem__(self, obj):
         if obj not in self.link_to_parent:
-            raise KeyError, obj
+            raise KeyError(obj)
 
         ignore, rep, info = self.find(obj)
 
 
         return False, parent, self.root_info[parent]
 
-
     def union(self, obj1, obj2): # -> not_noop, rep, info
 
         new1, rep1, info1 = self.find(obj1)
         self.root_info[rep1] = info1
 
         return True, rep1, info1
-        
-
-    

pypy/tool/genstatistic.py

 pypydir = py.path.local(autopath.pypydir)
 
 def isdocfile(p):
-    return p.ext == '.txt' or p.basename in ('README', 'NOTES', 'LICENSE')
+    return (p.ext in ('.txt', '.rst') or
+            p.basename in ('README', 'NOTES', 'LICENSE'))
 
 def istestfile(p):
     if not p.check(file=1, ext='.py'): 

pypy/tool/release/package.py

     shutil.copytree(str(basedir.join('lib_pypy')),
                     str(pypydir.join('lib_pypy')),
                     ignore=ignore_patterns('.svn', 'py', '*.pyc', '*~'))
-    for file in ['LICENSE', 'README']:
+    for file in ['LICENSE', 'README.rst']:
         shutil.copy(str(basedir.join(file)), str(pypydir))
     pypydir.ensure('include', dir=True)
     if sys.platform == 'win32':

pypy/tool/release/test/test_package.py

         assert not prefix.join('lib_pypy', 'py').check()
         assert not prefix.join('lib_pypy', 'ctypes_configure').check()
         assert prefix.join('LICENSE').check()
-        assert prefix.join('README').check()
+        assert prefix.join('README.rst').check()
         if package.USE_ZIPFILE_MODULE:
             zh = zipfile.ZipFile(str(builddir.join('%s.zip' % test)))
             assert zh.open('%s/lib_pypy/syslog.py' % test)

pypy/translator/backendopt/canraise.py

-from pypy.translator.simplify import get_graph
-from pypy.rpython.lltypesystem.lloperation import llop, LL_OPERATIONS
-from pypy.rpython.lltypesystem import lltype
+import py
+
+from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS
+from pypy.tool.ansi_print import ansi_log
 from pypy.translator.backendopt import graphanalyze
 from pypy.translator.simplify import get_funcobj
 
-import py
-from pypy.tool.ansi_print import ansi_log
-log = py.log.Producer("canraise") 
-py.log.setconsumer("canraise", ansi_log) 
+log = py.log.Producer("canraise")
+py.log.setconsumer("canraise", ansi_log)
+
 
 class RaiseAnalyzer(graphanalyze.BoolGraphAnalyzer):
     def analyze_simple_operation(self, op, graphinfo):

pypy/translator/backendopt/constfold.py

-from pypy.objspace.flow.model import Constant, Variable, SpaceOperation
-from pypy.objspace.flow.model import c_last_exception
-from pypy.objspace.flow.model import mkentrymap
-from pypy.translator.backendopt.support import log
-from pypy.translator.simplify import eliminate_empty_blocks
+from pypy.objspace.flow.model import (Constant, Variable, SpaceOperation,
+    c_last_exception, mkentrymap)
+from pypy.rpython.lltypesystem import lltype
+from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.translator.unsimplify import insert_empty_block, split_block
-from pypy.rpython.lltypesystem.lloperation import llop
-from pypy.rpython.lltypesystem import lltype
 
 
 def fold_op_list(operations, constants, exit_early=False, exc_catch=False):
                     pass
                 except (KeyboardInterrupt, SystemExit):
                     raise
-                except Exception, e:
+                except Exception:
                     pass   # turn off reporting these as warnings: useless
                     #log.WARNING('constant-folding %r:' % (spaceop,))
                     #log.WARNING('  %s: %s' % (e.__class__.__name__, e))
 def constant_fold_block(block):
     constants = {}
     block.operations = fold_op_list(block.operations, constants,
-                           exc_catch = block.exitswitch == c_last_exception)
+                           exc_catch=block.exitswitch == c_last_exception)
     if constants:
         if block.exitswitch in constants:
             switch = constants[block.exitswitch].value
         if same_as:
             constant_fold_block(block)
     return count
-                
+
 def constant_fold_graph(graph):
     # first fold inside the blocks
     for block in graph.iterblocks():
                     constants[v2] = v1
             if constants:
                 prepare_constant_fold_link(link, constants, splitblocks)
-        if  splitblocks:
+        if splitblocks:
             rewire_links(splitblocks, graph)
         if not diffused and not splitblocks:
             break # finished

pypy/translator/backendopt/escape.py

-from pypy.objspace.flow.model import Variable, Constant
+from pypy.objspace.flow.model import Variable
 from pypy.rpython.lltypesystem import lltype
 from pypy.translator.simplify import get_graph
-from pypy.rpython.rmodel import inputconst
-from pypy.translator.backendopt import support
 from pypy.tool.uid import uid
 
+
 class CreationPoint(object):
     def __init__(self, creation_method, TYPE, op=None):
         self.escapes = False
 
     def seen_graphs(self):
         return self.functionargs.keys()
-    
+
     def getstate(self, var_or_const):
         if not isonheap(var_or_const):
             return None
             varstate = VarState(crep)
         self.varstates[var_or_const] = varstate
         return varstate
-            
+
     def getstates(self, varorconstlist):
         return [self.getstate(var) for var in varorconstlist]
-    
+
     def setstate(self, var, state):
         self.varstates[var] = state
-    
+
     def get_creationpoint(self, var, method="?", op=None):
         if var in self.creationpoints:
             return self.creationpoints[var]
         crep = CreationPoint(method, var.concretetype, op)
         self.creationpoints[var] = crep
         return crep
-    
+
     def schedule_function(self, graph):
         startblock = graph.startblock
         if graph in self.functionargs:
             return
         self.curr_block = block
         self.curr_graph = graph
-        
+
         for op in block.operations:
             self.flow_operation(op)
         for exit in block.exits:
 
     def flow_operation(self, op):
         args = self.getstates(op.args)
-        opimpl = getattr(self, 'op_'+op.opname, None)
+        opimpl = getattr(self, 'op_' + op.opname, None)
         if opimpl is not None:
             res = opimpl(op, *args)
             if res is not NotImplemented:
                 self.setstate(op.result, res)
                 return
-            
+
         if isonheap(op.result) or filter(None, args):
             for arg in args:
                 if arg is not None:
                     self.escapes(arg)
-        
+
     def complete(self):
         while self.scheduled:
             block, graph = self.scheduled.popitem()
 
     def op_cast_pointer(self, op, state):
         return state
-    
+
     def op_setfield(self, op, objstate, fieldname, valuestate):
         if valuestate is not None:
             # be pessimistic for now:
     def op_getarrayitem(self, op, objstate, indexstate):
         if isonheap(op.result):
             return VarState(self.get_creationpoint(op.result, "getarrayitem", op))
-    
+
     def op_getfield(self, op, objstate, fieldname):
         if isonheap(op.result):
             # assume that getfield creates a new value
     seen = {}
     return [graph for graph in adi.seen_graphs()
         if is_malloc_like(adi, graph, seen)]
-
-
-

pypy/translator/backendopt/finalizer.py

     ok_operations = ['ptr_nonzero', 'ptr_eq', 'ptr_ne', 'free', 'same_as',
                      'direct_ptradd', 'force_cast', 'track_alloc_stop',
                      'raw_free']
-    
+
     def analyze_light_finalizer(self, graph):
         result = self.analyze_direct_call(graph)
         if (result is self.top_result() and
             getattr(graph.func, '_must_be_light_finalizer_', False)):
             raise FinalizerError(FinalizerError.__doc__, graph)
         return result
-    
+
     def analyze_simple_operation(self, op, graphinfo):
         if op.opname in self.ok_operations:
             return self.bottom_result()
             TP = op.result.concretetype
             if not isinstance(TP, lltype.Ptr) or TP.TO._gckind == 'raw':
                 # primitive type
-                return self.bottom_result()            
+                return self.bottom_result()
         return self.top_result()

pypy/translator/backendopt/graphanalyze.py

 from pypy.translator.simplify import get_graph, get_funcobj
-from pypy.rpython.lltypesystem.lloperation import llop, LL_OPERATIONS
-from pypy.rpython.lltypesystem import lltype
+from pypy.tool.algo.unionfind import UnionFind
+
 
 class GraphAnalyzer(object):
     verbose = False
 
     def __init__(self, translator):
         self.translator = translator
-        self.analyzed_calls = {}
-        self.recursion_hit = False
+        self._analyzed_calls = UnionFind(lambda graph: Dependency(self))
 
     # method overridden by subclasses
 
     @staticmethod
-    def join_two_results(result1, result2):
-        raise NotImplementedError("abstract base class")
-
-    @staticmethod
     def bottom_result():
         raise NotImplementedError("abstract base class")
 
         # only an optimization, safe to always return False
         return False
 
+    @staticmethod
+    def result_builder():
+        raise NotImplementedError("abstract base class")
+
+    @staticmethod
+    def add_to_result(result, other):
+        raise NotImplementedError("abstract base class")
+
+    @staticmethod
+    def finalize_builder(result):
+        raise NotImplementedError("abstract base class")
+
+    @staticmethod
+    def join_two_results(result1, result2):
+        raise NotImplementedError("abstract base class")
+
     def analyze_simple_operation(self, op, graphinfo=None):
         raise NotImplementedError("abstract base class")
 
 
     # general methods
 
-    def join_results(self, results):
-        result = self.bottom_result()
-        for sub in results:
-            result = self.join_two_results(result, sub)
-        return result
-
     def compute_graph_info(self, graph):
         return None
 
         return x
 
     def analyze_direct_call(self, graph, seen=None):
-        if graph in self.analyzed_calls:
-            return self.analyzed_calls[graph]
         if seen is None:
-            seen = set([graph])
-            self.recursion_hit = False
-            started_here = True
-        elif graph in seen:
-            self.recursion_hit = True
-            return self.bottom_result()
-        else:
-            started_here = False
-            seen.add(graph)
-        result = self.bottom_result()
+            seen = DependencyTracker(self)
+        if not seen.enter(graph):
+            return seen.get_cached_result(graph)
+        result = self.result_builder()
         graphinfo = self.compute_graph_info(graph)
         for block in graph.iterblocks():
             if block is graph.startblock:
-                result = self.join_two_results(
-                        result, self.analyze_startblock(block, seen))
+                result = self.add_to_result(
+                    result,
+                    self.analyze_startblock(block, seen)
+                )
             elif block is graph.exceptblock:
-                result = self.join_two_results(
-                        result, self.analyze_exceptblock(block, seen))
-            for op in block.operations:
-                result = self.join_two_results(
-                        result, self.analyze(op, seen, graphinfo))
-            for exit in block.exits:
-                result = self.join_two_results(
-                        result, self.analyze_link(exit, seen))
+                result = self.add_to_result(
+                    result,
+                    self.analyze_exceptblock(block, seen)
+                )
+            if not self.is_top_result(result):
+                for op in block.operations:
+                    result = self.add_to_result(
+                        result,
+                        self.analyze(op, seen, graphinfo)
+                    )
+                    if self.is_top_result(result):
+                        break
+            if not self.is_top_result(result):
+                for exit in block.exits:
+                    result = self.add_to_result(
+                        result,
+                        self.analyze_link(exit, seen)
+                    )
+                    if self.is_top_result(result):
+                        break
             if self.is_top_result(result):
-                self.analyzed_calls[graph] = result
-                return result
-        if not self.recursion_hit or started_here:
-            self.analyzed_calls[graph] = result
+                break
+        result = self.finalize_builder(result)
+        seen.leave_with(result)
         return result
 
     def analyze_indirect_call(self, graphs, seen=None):
-        results = []
+        result = self.result_builder()
         for graph in graphs:
-            results.append(self.analyze_direct_call(graph, seen))
-        return self.join_results(results)
+            result = self.add_to_result(
+                result,
+                self.analyze_direct_call(graph, seen)
+            )
+            if self.is_top_result(result):
+                break
+        return self.finalize_builder(result)
 
     def analyze_oosend(self, TYPE, name, seen=None):
         graphs = TYPE._lookup_graphs(name)
             for block, op in graph.iterblockops():
                 self.analyze(op)
 
+
+class Dependency(object):
+    def __init__(self, analyzer):
+        self._analyzer = analyzer
+        self._result = analyzer.bottom_result()
+
+    def merge_with_result(self, result):
+        self._result = self._analyzer.join_two_results(self._result, result)
+
+    def absorb(self, other):
+        self.merge_with_result(other._result)
+
+
+class DependencyTracker(object):
+    """This tracks the analysis of cyclic call graphs."""
+
+    # The point is that GraphAnalyzer works fine if the question we ask
+    # is about a single graph; but in the case of recursion, it will
+    # fail if we ask about multiple graphs.  The purpose of this
+    # class is to fix the cache in GraphAnalyzer._analyzed_calls after
+    # each round, whenever a new set of graphs have been added to it.
+    # It works by assuming that the following is correct: for any set of
+    # graphs that can all (indirectly) call each other, all these graphs
+    # will get the same answer that is the 'join_two_results' of all of
+    # them.
+
+    def __init__(self, analyzer):
+        self.analyzer = analyzer
+        # the UnionFind object, which works like a mapping {graph: Dependency}
+        # (shared with GraphAnalyzer._analyzed_calls)
+        self.graph_results = analyzer._analyzed_calls
+        # the current stack of graphs being analyzed
+        self.current_stack = []
+        self.current_stack_set = set()
+
+    def enter(self, graph):
+        if graph not in self.graph_results:
+            self.current_stack.append(graph)
+            self.current_stack_set.add(graph)
+            self.graph_results.find(graph)
+            return True
+        else:
+            if graph in self.current_stack_set:
+                # found a cycle; merge all graphs in that cycle
+                i = len(self.current_stack) - 1
+                while self.current_stack[i] is not graph:
+                    self.graph_results.union(self.current_stack[i], graph)
+                    i -= 1
+            return False
+
+    def leave_with(self, result):
+        graph = self.current_stack.pop()
+        self.current_stack_set.remove(graph)
+        dep = self.graph_results[graph]
+        dep.merge_with_result(result)
+
+    def get_cached_result(self, graph):
+        dep = self.graph_results[graph]
+        return dep._result
+
+
 class BoolGraphAnalyzer(GraphAnalyzer):
     """generic way to analyze graphs: recursively follow it until the first
     operation is found on which self.analyze_simple_operation returns True"""
 
-    @staticmethod
-    def join_two_results(result1, result2):
-        return result1 or result2
+    def bottom_result(self):
+        return False
 
-    @staticmethod
-    def is_top_result(result):
+    def top_result(self):
+        return True
+
+    def is_top_result(self, result):
         return result
 
-    @staticmethod
-    def bottom_result():
+    def result_builder(self):
         return False
 
-    @staticmethod
-    def top_result():
-        return True
+    def add_to_result(self, result, other):
+        return self.join_two_results(result, other)
 
+    def finalize_builder(self, result):
+        return result
+
+    def join_two_results(self, result1, result2):
+        return result1 or result2

pypy/translator/backendopt/inline.py

 import sys
-from pypy.translator.simplify import join_blocks, cleanup_graph
-from pypy.translator.simplify import get_graph, get_funcobj
-from pypy.translator.unsimplify import copyvar
-from pypy.objspace.flow.model import Variable, Constant, Block, Link
-from pypy.objspace.flow.model import SpaceOperation, c_last_exception
-from pypy.objspace.flow.model import FunctionGraph
-from pypy.objspace.flow.model import mkentrymap, checkgraph
-from pypy.annotation import model as annmodel
-from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr
-from pypy.rpython.lltypesystem.lltype import normalizeptr
+
+from pypy.objspace.flow.model import (Variable, Constant, Block, Link,
+    SpaceOperation, c_last_exception, FunctionGraph, mkentrymap)
+from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr, normalizeptr
 from pypy.rpython.ootypesystem import ootype
-from pypy.rpython import rmodel
 from pypy.tool.algo import sparsemat
 from pypy.translator.backendopt import removenoops
-from pypy.translator.backendopt.support import log
-from pypy.translator.unsimplify import split_block
-from pypy.translator.backendopt.support import find_backedges, find_loop_blocks
 from pypy.translator.backendopt.canraise import RaiseAnalyzer
+from pypy.translator.backendopt.support import log, find_loop_blocks
+from pypy.translator.simplify import join_blocks, cleanup_graph, get_graph, get_funcobj
+from pypy.translator.unsimplify import copyvar, split_block
+
 
 class CannotInline(Exception):
     pass
 
+
 def get_meth_from_oosend(op):
-    method_name = op.args[0].value
     INSTANCE = op.args[1].concretetype
     _, meth = INSTANCE._lookup(op.args[0].value)
     virtual = getattr(meth, '_virtual', True)
     else:
         return meth
 
-class CanRaise:
+
+class CanRaise(object):
     def __init__(self, can_raise):
         self.can_raise = can_raise
 
+
 def collect_called_graphs(graph, translator, include_oosend=True):
     graphs_or_something = {}
     for block in graph.iterblocks():
 def inline_function(translator, inline_func, graph, lltype_to_classdef,
                     raise_analyzer, call_count_pred=None, cleanup=True):
     inliner = Inliner(translator, graph, inline_func, lltype_to_classdef,
-                      raise_analyzer = raise_analyzer,
+                      raise_analyzer=raise_analyzer,
                       call_count_pred=call_count_pred, cleanup=cleanup)
     return inliner.inline_all()
 
 def simple_inline_function(translator, inline_func, graph):
     inliner = Inliner(translator, graph, inline_func,
                       translator.rtyper.lltype_to_classdef_mapping(),
-                      raise_analyzer = RaiseAnalyzer(translator))
+                      raise_analyzer=RaiseAnalyzer(translator))
     return inliner.inline_all()
 
 
     #(e.g. if you do only raise XXXError) by doing pattern matching
     currvar = block.exits[0].args[1]
     ops = block.operations
-    i = len(ops)-1
+    i = len(ops) - 1
     while True:
         if isinstance(currvar, Constant):
             value = currvar.value
     return False
 
 class BaseInliner(object):
-    def __init__(self, translator, graph, lltype_to_classdef, 
+    def __init__(self, translator, graph, lltype_to_classdef,
                  inline_guarded_calls=False,
                  inline_guarded_calls_no_matter_what=False,
                  raise_analyzer=None,
                 label = countop.args[1].value
                 if not call_count_pred(label):
                     continue
-            operation = block.operations[index_operation]
             self.inline_once(block, index_operation)
             count += 1
         if self.do_cleanup:
             index_operation == len(block.operations) - 1):
             self.exception_guarded = True
             if self.inline_guarded_calls:
-                if (not self.inline_guarded_calls_no_matter_what and 
+                if (not self.inline_guarded_calls_no_matter_what and
                     does_raise_directly(self.graph_to_inline, self.raise_analyzer)):
                     raise CannotInline("can't inline because the call is exception guarded")
             elif any_call_to_raising_graphs(self.graph_to_inline,
                       for var in self.original_passon_vars]
         self._passon_vars[cache_key] = result
         return result
-        
+
     def copy_operation(self, op):
         args = [self.get_new_name(arg) for arg in op.args]
         result = SpaceOperation(op.opname, args, self.get_new_name(op.result))
         if hasattr(link, 'llexitcase'):
             newlink.llexitcase = link.llexitcase
         return newlink
-        
 
     def find_args_in_exceptional_case(self, link, block, etype, evalue, afterblock, passon_vars):
         linkargs = []
         copiedreturnblock.exitswitch = None
         copiedreturnblock.recloseblock(linkfrominlined)
         assert copiedreturnblock.exits[0].target == afterblock
-       
+
     def rewire_exceptblock(self, afterblock):
         #let links to exceptblock of the graph to inline go to graphs exceptblock
         copiedexceptblock = self.copy_block(self.graph_to_inline.exceptblock)
                         else:
                             # if self.graph.exceptblock was never used before
                             a2.concretetype = a1.concretetype
-    
+
     def rewire_exceptblock_with_guard(self, afterblock, copiedexceptblock):
         # this rewiring does not always succeed. in the cases where it doesn't
         # there will be generic code inserted
 
     def generic_exception_matching(self, afterblock, copiedexceptblock):
         #XXXXX don't look: insert blocks that do exception matching
-        #for the cases where direct matching did not work        
+        #for the cases where direct matching did not work
         exc_match = Constant(
             self.translator.rtyper.getexceptiondata().fn_exception_match)
         exc_match.concretetype = typeOf(exc_match.value)
         linktoinlined.args = passon_args
         afterblock.inputargs = [self.op.result] + afterblock.inputargs
         if self.graph_to_inline.returnblock in self.entrymap:
-            self.rewire_returnblock(afterblock) 
+            self.rewire_returnblock(afterblock)
         if self.graph_to_inline.exceptblock in self.entrymap:
             self.rewire_exceptblock(afterblock)
         if self.exception_guarded:
                     if graph is not None and graph in ok_to_call:
                         add(parentgraph, block, op, graph)
     return result
-    
+
 def instrument_inline_candidates(graphs, threshold):
     cache = {None: False}
     def candidate(graph):
     for parentgraph in graphs:
         for block in parentgraph.iterblocks():
             ops = block.operations
-            i = len(ops)-1
+            i = len(ops) - 1
             while i >= 0:
                 op = ops[i]
                 i -= 1
                         dummy.concretetype = Void
                         count = SpaceOperation('instrument_count',
                                                [tag, label], dummy)
-                        ops.insert(i+1, count)
+                        ops.insert(i + 1, count)
                         n += 1
     log.inlining("%d call sites instrumented" % n)
 
                   callgraph=None,
                   call_count_pred=None,
                   heuristic=inlining_heuristic):
-    
+
     assert threshold is not None and threshold != 1
     to_cleanup = {}
     from heapq import heappush, heappop, heapreplace, heapify
         count = auto_inlining(translator, threshold, callgraph=callgraph,
                               heuristic=heuristic,
                               call_count_pred=call_count_pred)
-        log.inlining('inlined %d callsites.'% (count,))
+        log.inlining('inlined %d callsites.' % (count,))
         for graph in graphs:
             removenoops.remove_duplicate_casts(graph, translator)

pypy/translator/backendopt/malloc.py

-from pypy.objspace.flow.model import Variable, Constant, Block, Link
-from pypy.objspace.flow.model import SpaceOperation
+from pypy.objspace.flow.model import Variable, Constant, SpaceOperation
 from pypy.tool.algo.unionfind import UnionFind
 from pypy.rpython.lltypesystem import lltype
 from pypy.rpython.ootypesystem import ootype
 from pypy.translator.backendopt import removenoops
 from pypy.translator.backendopt.support import log
 
+
 class LifeTime:
 
     def __init__(self, (block, var)):
                     set_use_point(node, node.exitswitch, "exitswitch", node)
 
         for node in graph.iterlinks():
-                if isinstance(node.last_exception, Variable):
-                    set_creation_point(node.prevblock, node.last_exception,
-                                       "last_exception")
-                if isinstance(node.last_exc_value, Variable):
-                    set_creation_point(node.prevblock, node.last_exc_value,
-                                       "last_exc_value")
-                d = {}
-                for i, arg in enumerate(node.args):
-                    union(node.prevblock, arg,
-                          node.target, node.target.inputargs[i])
-                    if isinstance(arg, Variable):
-                        if arg in d:
-                            # same variable present several times in link.args
-                            # consider it as a 'use' of the variable, which
-                            # will disable malloc optimization (aliasing problems)
-                            set_use_point(node.prevblock, arg, "dup", node, i)
-                        else:
-                            d[arg] = True
+            if isinstance(node.last_exception, Variable):
+                set_creation_point(node.prevblock, node.last_exception,
+                                   "last_exception")
+            if isinstance(node.last_exc_value, Variable):
+                set_creation_point(node.prevblock, node.last_exc_value,
+                                   "last_exc_value")
+            d = {}
+            for i, arg in enumerate(node.args):
+                union(node.prevblock, arg,
+                      node.target, node.target.inputargs[i])
+                if isinstance(arg, Variable):
+                    if arg in d:
+                        # same variable present several times in link.args
+                        # consider it as a 'use' of the variable, which
+                        # will disable malloc optimization (aliasing problems)
+                        set_use_point(node.prevblock, arg, "dup", node, i)
+                    else:
+                        d[arg] = True
 
         return lifetimes.infos()
 
                 isinstance(S._flds[S._names[0]], lltype.Struct) and
                 S._flds[S._names[0]]._hints.get('union'))
 
-
     def RTTI_dtor(self, STRUCT):
         try:
             destr_ptr = lltype.getRuntimeTypeInfo(STRUCT)._obj.destructor_funcptr
             if destr_ptr:
                 return True
-        except (ValueError, AttributeError), e:
+        except (ValueError, AttributeError):
             pass
         return False
 
             newop = SpaceOperation('same_as', [c], op.result)
             self.newops.append(newop)
         else:
-            raise AssertionError, op.opname
+            raise AssertionError(op.opname)
 
 
 class OOTypeMallocRemover(BaseMallocRemover):
             newop = SpaceOperation('same_as', [c], op.result)
             self.newops.append(newop)
         else:
-            raise AssertionError, op.opname
+            raise AssertionError(op.opname)
 
 
 def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True):
             tot += count
     log.malloc("removed %d simple mallocs in total" % tot)
     return tot
-
-

pypy/translator/backendopt/merge_if_blocks.py

-from pypy.objspace.flow.model import Block, Constant, Variable
-from pypy.objspace.flow.model import checkgraph, mkentrymap
+from pypy.objspace.flow.model import Constant, Variable, checkgraph, mkentrymap
 from pypy.translator.backendopt.support import log
 
 log = log.mergeifblocks
         return varmap[var_or_const]
     firstblock, case = chain[0]
     firstblock.operations = firstblock.operations[:-1]
-    firstblock.exitswitch = checkvar 
+    firstblock.exitswitch = checkvar
     values = {}
     links = []
     default = chain[-1][0].exits[0]

pypy/translator/backendopt/raisingop2direct_call.py

 from pypy.translator.backendopt.support import log, all_operations, annotate
 import pypy.rpython.raisingops.raisingops
+
+
 log = log.raisingop2directcall
 
 def is_raisingop(op):
     if graphs is None:
         graphs = translator.graphs
 
-
     log('starting')
     seen = {}
     for op in all_operations(graphs):
         translator.rtyper.specialize_more_blocks()
 
     #rename some operations (that were introduced in the newly specialized graphs)
-    #so this transformation becomes idempotent... 
+    #so this transformation becomes idempotent...
     #for op in all_operations(graphs):
     #   if op.opname in special_operations:
     #       log('renamed %s to %s_' % (op.opname, op.opname))
-    #       op.opname += '_' 
+    #       op.opname += '_'
 
     #selfdiagnostics... assert that there are no more raisingops
     for op in all_operations(graphs):

pypy/translator/backendopt/removeassert.py

 from pypy.objspace.flow.model import Constant, checkgraph, c_last_exception
+from pypy.rpython.lltypesystem import lltype
 from pypy.rpython.rtyper import LowLevelOpList, inputconst
+from pypy.translator.backendopt.support import log
 from pypy.translator.simplify import eliminate_empty_blocks, join_blocks
-#from pypy.translator.simplify import transform_dead_op_vars
-from pypy.rpython.lltypesystem import lltype
-from pypy.rpython.lltypesystem import rclass
-from pypy.translator.backendopt.support import log
 
 
 def remove_asserts(translator, graphs):
             if translator.config.translation.verbose:
                 log.removeassert("removed %d asserts in %s" % (count, graph.name))
             checkgraph(graph)
-            #transform_dead_op_vars(graph, translator)
     total_count = tuple(total_count)
     if total_count[0] == 0:
         if total_count[1] == 0:
             newops = LowLevelOpList()
             if link.exitcase:
                 v = newops.genop('bool_not', [block.exitswitch],
-                                 resulttype = lltype.Bool)
+                                 resulttype=lltype.Bool)
             else:
                 v = block.exitswitch
             msg = "assertion failed in %s" % (graph.name,)

pypy/translator/backendopt/removenoops.py

-from pypy.objspace.flow.model import Block, Variable, Constant
-from pypy.rpython.lltypesystem.lltype import Void
+from pypy.objspace.flow.model import Variable, Constant
 from pypy.translator.backendopt.support import log
 from pypy.translator import simplify
-from pypy import conftest
+
 
 def remove_unaryops(graph, opnames):
     """Removes unary low-level ops with a name appearing in the opnames list.
                 simplify.replace_exitswitch_by_constant(block, op_arg)
         block.operations[index] = None
         touched_blocks.add(block)
-        
+
     # remove all operations
     for block in touched_blocks:
         if block.operations:

pypy/translator/backendopt/ssa.py

-from pypy.objspace.flow.model import Variable, mkentrymap, Block
+from pypy.objspace.flow.model import Variable, mkentrymap
 from pypy.tool.algo.unionfind import UnionFind
 
 class DataFlowFamilyBuilder:

pypy/translator/backendopt/storesink.py

                 del cache[k]
 
     added_some_same_as = False
-    
+
     for block in graph.iterblocks():
         newops = []
         cache = {}

pypy/translator/backendopt/support.py

 import py
+
 from pypy.rpython.lltypesystem import lltype
+from pypy.rpython.rmodel import inputconst
+from pypy.tool.ansi_print import ansi_log
 from pypy.translator.simplify import get_graph
-from pypy.rpython.rmodel import inputconst 
-from pypy.tool.ansi_print import ansi_log
-from pypy.annotation.model import s_ImpossibleValue
-from pypy.translator.unsimplify import split_block, copyvar, insert_empty_block
-from pypy.objspace.flow.model import Constant, Variable, SpaceOperation, c_last_exception
-from pypy.rpython.lltypesystem import lltype
 
 
 log = py.log.Producer("backendopt")
 
 def graph_operations(graph):
     for block in graph.iterblocks():
-        for op in block.operations: 
+        for op in block.operations:
             yield op
 
 def all_operations(graphs):
     for graph in graphs:
         for block in graph.iterblocks():
-            for op in block.operations: 
+            for op in block.operations:
                 yield op
 
 def annotate(translator, func, result, args):
     args   = [arg.concretetype for arg in args]
     graph  = translator.rtyper.annotate_helper(func, args)
     fptr   = lltype.functionptr(lltype.FuncType(args, result.concretetype), func.func_name, graph=graph)
-    c      = inputconst(lltype.typeOf(fptr), fptr) 
+    c      = inputconst(lltype.typeOf(fptr), fptr)
     return c
 
 def var_needsgc(var):

pypy/translator/backendopt/tailrecursion.py

             len(block.operations) > 0 and
             block.operations[-1].opname == 'direct_call' and
             block.operations[-1].result == link.args[0]):
-            call = get_graph(block.operations[-1].args[0], translator)
             print "getgraph", graph
             if graph is graph:
                 _remove_tail_call(translator, graph, block)

pypy/translator/backendopt/test/test_canraise.py

         result = ra.can_raise(fgraph.startblock.exits[0].target.operations[-1]) # the call to g
         assert result
 
+    def test_recursive_cannot_raise(self):
+        # intentionally don't insert stack checks.  The goal is to verify
+        # the graph analyzer, which should return "no" on such a recursion.
+        def g(x):
+            return f(x)
+
+        def f(x):
+            if x:
+                if x % 2:
+                    return x
+                return 42
+            return g(x - 1)
+
+        t, ra = self.translate(f, [int])
+        ggraph = graphof(t, g)
+        fgraph = graphof(t, f)
+        result = ra.can_raise(ggraph.startblock.operations[-1]) # the call to f
+        assert not result
+        result = ra.can_raise(fgraph.startblock.exits[0].target.operations[-1]) # the call to g
+        assert not result
+
     def test_can_raise_exception(self):
         def g():
             raise ValueError

pypy/translator/backendopt/test/test_tailrecursion.py

-from pypy.objspace.flow.model import Block, Link, Variable, Constant
+from pypy.rpython.llinterp import LLInterpreter
 from pypy.translator.backendopt.tailrecursion import remove_tail_calls_to_self
 from pypy.translator.translator import TranslationContext, graphof
-from pypy.rpython.llinterp import LLInterpreter
-from pypy.translator.test.snippet import is_perfect_number
+
 
 def test_recursive_gcd():
     def gcd(a, b):
     t.buildannotator().build_types(gcd, [int, int])
     t.buildrtyper().specialize()
     gcd_graph = graphof(t, gcd)
-    remove_tail_calls_to_self(t, gcd_graph )
+    remove_tail_calls_to_self(t, gcd_graph)
     lli = LLInterpreter(t.rtyper)
     res = lli.eval_graph(gcd_graph, (15, 25))
     assert res == 5

pypy/translator/backendopt/writeanalyze.py

 top_set = object()
 empty_set = frozenset()
 
+
 class WriteAnalyzer(graphanalyze.GraphAnalyzer):
+    def bottom_result(self):
+        return empty_set
 
-    @staticmethod
-    def join_two_results(result1, result2):
-        if result1 is top_set:
+    def top_result(self):
+        return top_set
+
+    def is_top_result(self, result):
+        return result is top_set
+
+    def result_builder(self):
+        return set()
+
+    def add_to_result(self, result, other):
+        if other is top_set:
             return top_set
-        if result2 is top_set:
+        result.update(other)
+        return result
+
+    def finalize_builder(self, result):
+        if result is top_set:
+            return result
+        return frozenset(result)
+
+    def join_two_results(self, result1, result2):
+        if result1 is top_set or result2 is top_set:
             return top_set
         return result1.union(result2)
 
-    @staticmethod
-    def bottom_result():
-        return empty_set
-
-    @staticmethod
-    def top_result():
-        return top_set
-
-    @staticmethod
-    def is_top_result(result):
-        return result is top_set
-
     def analyze_simple_operation(self, op, graphinfo):
         if op.opname in ("setfield", "oosetfield"):
             if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]):

pypy/translator/c/primitive.py

 import sys
-from pypy.rlib.objectmodel import Symbolic, ComputedIntSymbolic
-from pypy.rlib.objectmodel import CDefinedIntSymbolic
+
+from pypy.rlib.objectmodel import Symbolic, ComputedIntSymbolic, CDefinedIntSymbolic
 from pypy.rlib.rarithmetic import r_longlong, is_emulated_long
 from pypy.rlib.rfloat import isinf, isnan
-from pypy.rpython.lltypesystem.lltype import *
 from pypy.rpython.lltypesystem import rffi, llgroup
-from pypy.rpython.lltypesystem.llmemory import Address, \
-     AddressOffset, ItemOffset, ArrayItemsOffset, FieldOffset, \
-     CompositeOffset, ArrayLengthOffset, \
-     GCHeaderOffset, GCREF, AddressAsInt
+from pypy.rpython.lltypesystem.llmemory import (Address, AddressOffset,
+    ItemOffset, ArrayItemsOffset, FieldOffset, CompositeOffset,
+    ArrayLengthOffset, GCHeaderOffset, GCREF, AddressAsInt)
+from pypy.rpython.lltypesystem.lltype import (Signed, SignedLongLong, Unsigned,
+    UnsignedLongLong, Float, SingleFloat, LongFloat, Char, UniChar, Bool, Void,
+    FixedSizeArray, Ptr, cast_opaque_ptr, typeOf)
 from pypy.rpython.lltypesystem.llarena import RoundedUpForAllocation
 from pypy.translator.c.support import cdecl, barebonearray
 
+
 SUPPORT_INT128 = hasattr(rffi, '__INT128_T')
 
 # ____________________________________________________________
 else:
     def lll(fmt):
         return fmt
-    
+
 def name_signed(value, db):
     if isinstance(value, Symbolic):
         if isinstance(value, FieldOffset):
                 repeat = value.fldname[4:]
                 size = 'sizeof(%s)' % (cdecl(db.gettype(value.TYPE.OF), ''),)
                 return '(%s * %s)' % (size, repeat)
-            return 'offsetof(%s, %s)'%(
+            return 'offsetof(%s, %s)' % (
                 cdecl(db.gettype(value.TYPE), ''),
                 structnode.c_struct_field_name(value.fldname))
         elif isinstance(value, ItemOffset):
                 barebonearray(value.TYPE)):
                 return '0'
             elif value.TYPE.OF != Void:
-                return 'offsetof(%s, items)'%(
+                return 'offsetof(%s, items)' % (
                     cdecl(db.gettype(value.TYPE), ''))
             else:
-                return 'sizeof(%s)'%(cdecl(db.gettype(value.TYPE), ''),)
+                return 'sizeof(%s)' % (cdecl(db.gettype(value.TYPE), ''),)
         elif isinstance(value, ArrayLengthOffset):
-            return 'offsetof(%s, length)'%(
+            return 'offsetof(%s, length)' % (
                 cdecl(db.gettype(value.TYPE), ''))
         elif isinstance(value, CompositeOffset):
             names = [name_signed(item, db) for item in value.offsets]
         elif isinstance(value, AddressAsInt):
             return '((Signed)%s)' % name_address(value.adr, db)
         else:
-            raise Exception("unimplemented symbolic %r"%value)
+            raise Exception("unimplemented symbolic %r" % value)
     if value is None:
         assert not db.completed
         return None
     else:
         name_str = '((%s) %%d%s)' % (c_name, suffix)
         PrimitiveName[ll_type] = lambda value, db: name_str % value
-    PrimitiveType[ll_type] = '%s @'% c_name
+    PrimitiveType[ll_type] = '%s @' % c_name
 
 define_c_primitive(rffi.SIGNEDCHAR, 'signed char')
 define_c_primitive(rffi.UCHAR, 'unsigned char')

pypy/translator/cli/test/test_float.py

 
     def test_r_singlefloat(self):
         py.test.skip("not implemented: single-precision floats")
+
+    def test_formatd(self):
+        py.test.skip('formatd is broken on ootype')
+
+    def test_formatd_repr(self):
+        py.test.skip('formatd is broken on ootype')
+
+    def test_formatd_huge(self):
+        py.test.skip('formatd is broken on ootype')
+
+    def test_parts_to_float(self):
+        py.test.skip('parts_to_float is broken on ootype')

pypy/translator/jvm/genjvm.py

 import sys
 
 import py
+from pypy.rpython.ootypesystem import ootype
 from pypy.tool.udir import udir
-from pypy.translator.translator import TranslationContext
-from pypy.translator.oosupport.genoo import GenOO
 from pypy.translator.backendopt.all import backend_optimizations
 from pypy.translator.backendopt.checkvirtual import check_virtual_methods
+from pypy.translator.oosupport.genoo import GenOO
+from pypy.translator.translator import TranslationContext
 
+from pypy.translator.jvm.constant import (
+    JVMConstantGenerator, JVMStaticMethodConst, JVMCustomDictConst,
+    JVMWeakRefConst)
+from pypy.translator.jvm.database import Database
 from pypy.translator.jvm.generator import JasminGenerator
-from pypy.translator.jvm.option import getoption
-from pypy.translator.jvm.database import Database
 from pypy.translator.jvm.log import log
 from pypy.translator.jvm.node import EntryPoint, Function
 from pypy.translator.jvm.opcodes import opcodes
-from pypy.rpython.ootypesystem import ootype
-from pypy.translator.jvm.constant import \
-     JVMConstantGenerator, JVMStaticMethodConst, JVMCustomDictConst, \
-     JVMWeakRefConst
+from pypy.translator.jvm.option import getoption
 from pypy.translator.jvm.prebuiltnodes import create_interlink_node
 
 MIN_JAVA_VERSION = '1.6.0'
 
     def __str__(self):
         return "Error code %d running %s" % (self.res, repr(self.args))
-        
+
     def pretty_print(self):
         JvmError.pretty_print(self)
         print "vvv Stdout vvv\n"
         print self.stdout
         print "vvv Stderr vvv\n"
         print self.stderr
-        
+
 
 class JvmGeneratedSource(object):
-    
+
     """
     An object which represents the generated sources. Contains methods
     to find out where they are located, to compile them, and to execute
         self.package = package
         self.compiled = False
         self.jasmin_files = None
-        
+
         # Determine various paths:
         self.thisdir = py.path.local(__file__).dirpath()
         self.rootdir = self.thisdir.join('src')
         self.srcdir = self.rootdir.join('pypy')
         self.jnajar = self.rootdir.join('jna.jar')
-        self.jasminjar = self.rootdir.join('jasmin.jar')        
+        self.jasminjar = self.rootdir.join('jasmin.jar')
 
         # Compute directory where .j files are
         self.javadir = self.tmpdir
         Compiles the .java sources into .class files, ready for execution.
         """
         jascmd = [
-            getoption('java'), 
+            getoption('java'),
+            '-Djava.awt.headless=true',
             '-jar', str(self.jasminjar),
-            '-g', 
-            '-d', 
+            '-g',
+            '-d',
             str(self.javadir)]
 
         def split_list(files):
             #     path_to_jre/java -jar path_to_jasmin/jasmin.jar $*
             # So we limit the length of arguments files to:
             MAXLINE = 1500
-    
+
             chunk = []
             chunklen = 0
             for f in files:
             #print "Invoking jasmin on %s" % files
             self._invoke(jascmd + files, False)
             #print "... completed!"
-                           
+
         self.compiled = True
         self._compile_helper()
 
         strargs = [self._make_str(a) for a in args]
         cmd = [getoption('java'),
                '-Xmx256M', # increase the heapsize so the microbenchmarks run
+               '-Djava.awt.headless=true',
                '-cp',
                str(self.javadir)+os.pathsep+str(self.jnajar),
                self.package+".Main"] + strargs
         return stdout, stderr, retval
 
 def generate_source_for_function(func, annotation, backendopt=False):
-    
+
     """
     Given a Python function and some hints about its argument types,
     generates JVM sources that call it and print the result.  Returns
     the JvmGeneratedSource object.
     """
-    
+
     if hasattr(func, 'im_func'):
         func = func.im_func
     t = TranslationContext()
     generate_source().  *You can not use one of these objects more than
     once.* """
 
-    TypeSystem = lambda X, db: db # TypeSystem and Database are the same object 
+    TypeSystem = lambda X, db: db # TypeSystem and Database are the same object
     Function = Function
     Database = Database
     opcodes = opcodes
     CustomDictConst   = JVMCustomDictConst