Commits

Armin Rigo committed 7bc2027

* Removed underscores from directory and file names.
* Finished sorting the tests in their own backendopt/test/test_* files.
* Changed outside code to use backendopt/ instead of backendoptimization.py,
which is deleted.
* Fixed some bugs here and there, there are more bugs open, both in
inlining and malloc removal...
* llvm seems quite broken at the moment, sorry if I caused that.
I will check this too.

  • Participants
  • Parent commits 2ef28aa

Comments (0)

Files changed (30)

File pypy/translator/backend_opt/__init__.py

Empty file removed.

File pypy/translator/backend_opt/inline.py

-##from pypy.translator.translator import Translator
-##from pypy.translator.simplify import eliminate_empty_blocks, join_blocks
-##from pypy.translator.simplify import remove_identical_vars
-##from pypy.translator.simplify import transform_dead_op_vars
-##from pypy.translator.unsimplify import copyvar, split_block
-##from pypy.objspace.flow.model import Variable, Constant, Block, Link
-##from pypy.objspace.flow.model import SpaceOperation, last_exception
-##from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph
-##from pypy.annotation import model as annmodel
-##from pypy.tool.unionfind import UnionFind
-##from pypy.rpython.lltype import Void, Bool
-##from pypy.rpython import rmodel, lltype
-from pypy.translator.backend_opt import matfunc
-
-def collect_called_functions(graph):
-    funcs = {}
-    def visit(obj):
-        if not isinstance(obj, Block):
-            return
-        for op in obj.operations:
-            if op.opname == "direct_call":
-                funcs[op.args[0]] = True
-    traverse(visit, graph)
-    return funcs
-
-def inline_function(translator, inline_func, graph):
-    count = 0
-    callsites = []
-    def find_callsites(block):
-        if isinstance(block, Block):
-            for i, op in enumerate(block.operations):
-                if not (op.opname == "direct_call" and
-                    isinstance(op.args[0], Constant)):
-                    continue
-                funcobj = op.args[0].value._obj
-                # accept a function or a graph as 'inline_func'
-                if (getattr(funcobj, 'graph', None) is inline_func or
-                    getattr(funcobj, '_callable', None) is inline_func):
-                    callsites.append((block, i))
-    traverse(find_callsites, graph)
-    while callsites != []:
-        block, index_operation = callsites.pop()
-        _inline_function(translator, graph, block, index_operation)
-        callsites = []
-        traverse(find_callsites, graph)
-        checkgraph(graph)
-        count += 1
-    return count
-
-def _find_exception_type(block):
-    #XXX slightly brittle: find the exception type for simple cases
-    #(e.g. if you do only raise XXXError) by doing pattern matching
-    ops = block.operations
-    if (len(ops) < 6 or
-        ops[-6].opname != "malloc" or ops[-5].opname != "cast_pointer" or
-        ops[-4].opname != "setfield" or ops[-3].opname != "cast_pointer" or
-        ops[-2].opname != "getfield" or ops[-1].opname != "cast_pointer" or
-        len(block.exits) != 1 or block.exits[0].args[0] != ops[-2].result or
-        block.exits[0].args[1] != ops[-1].result or
-        not isinstance(ops[-4].args[1], Constant) or
-        ops[-4].args[1].value != "typeptr"):
-        return None
-    return ops[-4].args[2].value
-
-def _inline_function(translator, graph, block, index_operation):
-    op = block.operations[index_operation]
-    graph_to_inline = op.args[0].value._obj.graph
-    exception_guarded = False
-    if (block.exitswitch == Constant(last_exception) and
-        index_operation == len(block.operations) - 1):
-        exception_guarded = True
-        if len(collect_called_functions(graph_to_inline)) != 0:
-            raise NotImplementedError("can't handle exceptions yet")
-    entrymap = mkentrymap(graph_to_inline)
-    beforeblock = block
-    afterblock = split_block(translator, graph, block, index_operation)
-    assert afterblock.operations[0] is op
-    #vars that need to be passed through the blocks of the inlined function
-    passon_vars = {beforeblock: [arg for arg in beforeblock.exits[0].args
-                                     if isinstance(arg, Variable)]}
-    copied_blocks = {}
-    varmap = {}
-    def get_new_name(var):
-        if var is None:
-            return None
-        if isinstance(var, Constant):
-            return var
-        if var not in varmap:
-            varmap[var] = copyvar(translator, var)
-        return varmap[var]
-    def get_new_passon_var_names(block):
-        result = [copyvar(translator, var) for var in passon_vars[beforeblock]]
-        passon_vars[block] = result
-        return result
-    def copy_operation(op):
-        args = [get_new_name(arg) for arg in op.args]
-        return SpaceOperation(op.opname, args, get_new_name(op.result))
-    def copy_block(block):
-        if block in copied_blocks:
-            "already there"
-            return copied_blocks[block]
-        args = ([get_new_name(var) for var in block.inputargs] +
-                get_new_passon_var_names(block))
-        newblock = Block(args)
-        copied_blocks[block] = newblock
-        newblock.operations = [copy_operation(op) for op in block.operations]
-        newblock.exits = [copy_link(link, block) for link in block.exits]
-        newblock.exitswitch = get_new_name(block.exitswitch)
-        newblock.exc_handler = block.exc_handler
-        return newblock
-    def copy_link(link, prevblock):
-        newargs = [get_new_name(a) for a in link.args] + passon_vars[prevblock]
-        newlink = Link(newargs, copy_block(link.target), link.exitcase)
-        newlink.prevblock = copy_block(link.prevblock)
-        newlink.last_exception = get_new_name(link.last_exception)
-        newlink.last_exc_value = get_new_name(link.last_exc_value)
-        if hasattr(link, 'llexitcase'):
-            newlink.llexitcase = link.llexitcase
-        return newlink
-    linktoinlined = beforeblock.exits[0]
-    assert linktoinlined.target is afterblock
-    copiedstartblock = copy_block(graph_to_inline.startblock)
-    copiedstartblock.isstartblock = False
-    copiedreturnblock = copied_blocks[graph_to_inline.returnblock]
-    #find args passed to startblock of inlined function
-    passon_args = []
-    for arg in op.args[1:]:
-        if isinstance(arg, Constant):
-            passon_args.append(arg)
-        else:
-            index = afterblock.inputargs.index(arg)
-            passon_args.append(linktoinlined.args[index])
-    passon_args += passon_vars[beforeblock]
-    #rewire blocks
-    linktoinlined.target = copiedstartblock
-    linktoinlined.args = passon_args
-    afterblock.inputargs = [op.result] + afterblock.inputargs
-    afterblock.operations = afterblock.operations[1:]
-    linkfrominlined = Link([copiedreturnblock.inputargs[0]] + passon_vars[graph_to_inline.returnblock], afterblock)
-    linkfrominlined.prevblock = copiedreturnblock
-    copiedreturnblock.exitswitch = None
-    copiedreturnblock.exits = [linkfrominlined]
-    assert copiedreturnblock.exits[0].target == afterblock
-    if graph_to_inline.exceptblock in entrymap:
-        #let links to exceptblock of the graph to inline go to graphs exceptblock
-        copiedexceptblock = copied_blocks[graph_to_inline.exceptblock]
-        if not exception_guarded:
-            copiedexceptblock = copied_blocks[graph_to_inline.exceptblock]
-            for link in entrymap[graph_to_inline.exceptblock]:
-                copiedblock = copied_blocks[link.prevblock]
-                assert len(copiedblock.exits) == 1
-                copiedblock.exits[0].args = copiedblock.exits[0].args[:2]
-                copiedblock.exits[0].target = graph.exceptblock
-        else:
-            def find_args_in_exceptional_case(link, block, etype, evalue):
-                linkargs = []
-                for arg in link.args:
-                    if arg == link.last_exception:
-                        linkargs.append(etype)
-                    elif arg == link.last_exc_value:
-                        linkargs.append(evalue)
-                    elif isinstance(arg, Constant):
-                        linkargs.append(arg)
-                    else:
-                        index = afterblock.inputargs.index(arg)
-                        linkargs.append(passon_vars[block][index - 1])
-                return linkargs
-            exc_match = Constant(rmodel.getfunctionptr(
-                translator,
-                translator.rtyper.getexceptiondata().ll_exception_match))
-            #try to match the exceptions for simple cases
-            for link in entrymap[graph_to_inline.exceptblock]:
-                copiedblock = copied_blocks[link.prevblock]
-                copiedlink = copiedblock.exits[0]
-                eclass = _find_exception_type(copiedblock)
-                #print copiedblock.operations
-                if eclass is None:
-                    continue
-                etype = copiedlink.args[0]
-                evalue = copiedlink.args[1]
-                for exceptionlink in afterblock.exits[1:]:
-                    if exc_match.value(eclass, exceptionlink.llexitcase):
-                        copiedlink.target = exceptionlink.target
-                        linkargs = find_args_in_exceptional_case(exceptionlink,
-                                                                 link.prevblock,
-                                                                 etype, evalue)
-                        copiedlink.args = linkargs
-                        break
-            #XXXXX don't look: insert blocks that do exception matching
-            #for the cases where direct matching did not work
-            blocks = []
-            for i, link in enumerate(afterblock.exits[1:]):
-                etype = copyvar(translator, copiedexceptblock.inputargs[0])
-                evalue = copyvar(translator, copiedexceptblock.inputargs[1])
-                block = Block([etype, evalue] + get_new_passon_var_names(link.target))
-                res = Variable()
-                res.concretetype = Bool
-                translator.annotator.bindings[res] = annmodel.SomeBool()
-                args = [exc_match, etype, Constant(link.llexitcase)]
-                block.operations.append(SpaceOperation("direct_call", args, res))
-                block.exitswitch = res
-                linkargs = find_args_in_exceptional_case(link, link.target,
-                                                         etype, evalue)
-                l = Link(linkargs, link.target)
-                l.prevblock = block
-                l.exitcase = True
-                block.exits.append(l)
-                if i > 0:
-                    l = Link(blocks[-1].inputargs, block)
-                    l.prevblock = blocks[-1]
-                    l.exitcase = False
-                    blocks[-1].exits.insert(0, l)
-                blocks.append(block)
-            blocks[-1].exits = blocks[-1].exits[:1]
-            blocks[-1].operations = []
-            blocks[-1].exitswitch = None
-            linkargs = copiedexceptblock.inputargs
-            copiedexceptblock.closeblock(Link(linkargs, blocks[0]))
-            afterblock.exits = [afterblock.exits[0]]
-            afterblock.exitswitch = None
-    #cleaning up -- makes sense to be here, because I insert quite
-    #some empty blocks and blocks that can be joined
-    eliminate_empty_blocks(graph)
-    join_blocks(graph)
-    remove_identical_vars(graph)
-
-# ____________________________________________________________
-#
-# Automatic inlining
-
-def measure_median_execution_cost(graph):
-    linktargets = [graph.startblock]
-    linkmap = {}
-    for node in flatten(graph):
-        if isinstance(node, Link):
-            linkmap[node] = len(linktargets)
-            linktargets.append(node.target)
-    matrix = []
-    vector = []
-    for i, target in zip(range(len(linktargets)), linktargets):
-        vector.append(len(target.operations))
-        row = [0.0] * len(linktargets)
-        row[i] = 1.0
-        if target.exits:
-            f = 1.0 / len(target.exits)
-            for nextlink in target.exits:
-                row[linkmap[nextlink]] -= f
-        matrix.append(row)
-    M = matfunc.Mat(matrix)
-    V = matfunc.Vec(vector)
-    # we must solve: M * (vector x1...xn) = V
-    try:
-        Solution = M._solve(V)
-    except (OverflowError, ValueError):
-        return sys.maxint
-    else:
-        return Solution[0]
-
-def static_instruction_count(graph):
-    count = 0
-    for node in flatten(graph):
-        if isinstance(node, Block):
-            count += len(node.operations)
-    return count
-
-def inlining_heuristic(graph):
-    # XXX ponderation factors?
-    return (0.819487132 * measure_median_execution_cost(graph) +
-            static_instruction_count(graph))
-
-
-def static_callers(translator):
-    result = []
-    def build_call_graph(node):
-        if isinstance(node, Block):
-            for op in node.operations:
-                if (op.opname == "direct_call" and
-                    isinstance(op.args[0], Constant)):
-                    funcobj = op.args[0].value._obj
-                    graph = getattr(funcobj, 'graph', None)
-                    if graph is not None:
-                        result.append((parentgraph, graph))
-    for parentgraph in translator.flowgraphs.itervalues():
-        traverse(build_call_graph, parentgraph)
-    return result
-
-
-def auto_inlining(translator, threshold=20):
-    from heapq import heappop, heapreplace
-    callers = {}     # {graph: {graphs-that-call-it}}
-    callees = {}     # {graph: {graphs-that-it-calls}}
-    for graph1, graph2 in static_callers(translator):
-        callers.setdefault(graph2, {})[graph1] = True
-        callees.setdefault(graph1, {})[graph2] = True
-    fiboheap = [(0.0, graph) for graph in callers]
-    valid_weight = {}
-
-    while fiboheap:
-        weight, graph = fiboheap[0]
-        if not valid_weight.get(graph):
-            weight = inlining_heuristic(graph)
-            print '  + cost %7.2f %50s' % (weight, graph.name)
-            heapreplace(fiboheap, (weight, graph))
-            valid_weight[graph] = True
-            continue
-
-        if weight >= threshold:
-            break   # finished
-
-        heappop(fiboheap)
-        print 'Inlining %7.2f %50s' % (weight, graph.name)
-        for parentgraph in callers[graph]:
-            if parentgraph == graph:
-                continue
-            print '\t\t-> in %s' % parentgraph.name
-            try:
-                if backendoptimization.inline_function(translator, graph,
-                                                       parentgraph):
-                    valid_weight[parentgraph] = False
-                    for graph2 in callees.get(graph, {}):
-                        callees[parentgraph][graph2] = True
-                        callers[graph2][parentgraph] = True
-            except NotImplementedError:
-                pass

File pypy/translator/backend_opt/malloc.py

-##from pypy.translator.translator import Translator
-##from pypy.translator.simplify import eliminate_empty_blocks, join_blocks
-##from pypy.translator.simplify import remove_identical_vars
-##from pypy.translator.simplify import transform_dead_op_vars
-##from pypy.translator.unsimplify import copyvar, split_block
-##from pypy.objspace.flow.model import Variable, Constant, Block, Link
-##from pypy.objspace.flow.model import SpaceOperation, last_exception
-##from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph
-##from pypy.annotation import model as annmodel
-##from pypy.tool.unionfind import UnionFind
-##from pypy.rpython.lltype import Void, Bool
-##from pypy.rpython import rmodel, lltype
-
-class Blocked(Exception):
-    pass
-
-class LifeTime:
-
-    def __init__(self, (block, var)):
-        assert isinstance(var, Variable)
-        self.variables = {(block, var) : True}
-        self.creationpoints = {}   # set of ("type of creation point", ...)
-        self.usepoints = {}        # set of ("type of use point",      ...)
-
-    def update(self, other):
-        self.variables.update(other.variables)
-        self.creationpoints.update(other.creationpoints)
-        self.usepoints.update(other.usepoints)
-
-
-def compute_lifetimes(graph):
-    """Compute the static data flow of the graph: returns a list of LifeTime
-    instances, each of which corresponds to a set of Variables from the graph.
-    The variables are grouped in the same LifeTime if a value can pass from
-    one to the other by following the links.  Each LifeTime also records all
-    places where a Variable in the set is used (read) or build (created).
-    """
-    lifetimes = UnionFind(LifeTime)
-
-    def set_creation_point(block, var, *cp):
-        _, _, info = lifetimes.find((block, var))
-        info.creationpoints[cp] = True
-
-    def set_use_point(block, var, *up):
-        _, _, info = lifetimes.find((block, var))
-        info.usepoints[up] = True
-
-    def union(block1, var1, block2, var2):
-        if isinstance(var1, Variable):
-            lifetimes.union((block1, var1), (block2, var2))
-        elif isinstance(var1, Constant):
-            set_creation_point(block2, var2, "constant", var1)
-        else:
-            raise TypeError(var1)
-
-    for var in graph.startblock.inputargs:
-        set_creation_point(graph.startblock, var, "inputargs")
-    set_use_point(graph.returnblock, graph.returnblock.inputargs[0], "return")
-    set_use_point(graph.exceptblock, graph.exceptblock.inputargs[0], "except")
-    set_use_point(graph.exceptblock, graph.exceptblock.inputargs[1], "except")
-
-    def visit(node):
-        if isinstance(node, Block):
-            for op in node.operations:
-                if op.opname in ("same_as", "cast_pointer"):
-                    # special-case these operations to identify their input
-                    # and output variables
-                    union(node, op.args[0], node, op.result)
-                else:
-                    for i in range(len(op.args)):
-                        if isinstance(op.args[i], Variable):
-                            set_use_point(node, op.args[i], "op", node, op, i)
-                    set_creation_point(node, op.result, "op", node, op)
-
-        if isinstance(node, Link):
-            if isinstance(node.last_exception, Variable):
-                set_creation_point(node.prevblock, node.last_exception,
-                                   "last_exception")
-            if isinstance(node.last_exc_value, Variable):
-                set_creation_point(node.prevblock, node.last_exc_value,
-                                   "last_exc_value")
-            for i in range(len(node.args)):
-                union(node.prevblock, node.args[i],
-                      node.target, node.target.inputargs[i])
-
-    traverse(visit, graph)
-    return lifetimes.infos()
-
-def _try_inline_malloc(info):
-    """Try to inline the mallocs creation and manipulation of the Variables
-    in the given LifeTime."""
-    # the values must be only ever created by a "malloc"
-    lltypes = {}
-    for cp in info.creationpoints:
-        if cp[0] != "op":
-            return False
-        op = cp[2]
-        if op.opname != "malloc":
-            return False
-        lltypes[op.result.concretetype] = True
-
-    # there must be a single largest malloced GcStruct;
-    # all variables can point to it or to initial substructures
-    if len(lltypes) != 1:
-        return False
-    STRUCT = lltypes.keys()[0].TO
-    assert isinstance(STRUCT, lltype.GcStruct)
-
-    # must be only ever accessed via getfield/setfield
-    for up in info.usepoints:
-        if up[0] != "op":
-            return False
-        if (up[2].opname, up[3]) not in [("getfield", 0), ("setfield", 0)]:
-            return False
-
-    # success: replace each variable with a family of variables (one per field)
-    example = STRUCT._container_example()
-    flatnames = []
-    flatconstants = {}
-    def flatten(S, example):
-        start = 0
-        if S._names and isinstance(S._flds[S._names[0]], lltype.GcStruct):
-            flatten(S._flds[S._names[0]], getattr(example, S._names[0]))
-            start = 1
-        for name in S._names[start:]:
-            flatnames.append((S, name))
-            constant = Constant(getattr(example, name))
-            constant.concretetype = lltype.typeOf(constant.value)
-            flatconstants[S, name] = constant
-    flatten(STRUCT, example)
-
-    pending = info.variables.keys()
-    for block, var in pending:
-        newvarsmap = {}
-
-        def var_comes_from_outside():
-            for key in flatnames:
-                newvar = Variable()
-                newvar.concretetype = flatconstants[key].concretetype
-                newvarsmap[key] = newvar
-
-        def var_is_created_here():
-            newvarsmap.update(flatconstants)
-
-        def make_newvars():
-            return [newvarsmap[key] for key in flatnames]
-
-        if var in block.inputargs:
-            var_comes_from_outside()
-            i = block.inputargs.index(var)
-            block.inputargs = (block.inputargs[:i] + make_newvars() +
-                               block.inputargs[i+1:])
-
-        assert block.operations != ()
-        newops = []
-        try:
-            for op in block.operations:
-                assert var not in op.args[1:]   # should be the first arg only
-                if op.args and var == op.args[0]:
-                    if op.opname == "getfield":
-                        S = var.concretetype.TO
-                        fldname = op.args[1].value
-                        newop = SpaceOperation("same_as",
-                                               [newvarsmap[S, fldname]],
-                                               op.result)
-                        newops.append(newop)
-                    elif op.opname == "setfield":
-                        S = var.concretetype.TO
-                        fldname = op.args[1].value
-                        assert (S, fldname) in newvarsmap
-                        newvarsmap[S, fldname] = op.args[2]
-                    elif op.opname in ("same_as", "cast_pointer"):
-                        # temporary pseudo-operation, should be removed below
-                        newop = SpaceOperation("_tmp_same_as",
-                                               make_newvars(),
-                                               op.result)
-                        newops.append(newop)
-                    else:
-                        raise AssertionError, op.opname
-                elif var == op.result:
-                    assert not newvarsmap
-                    if op.opname == "malloc":
-                        var_is_created_here()
-                    elif op.opname in ("same_as", "cast_pointer"):
-                        # in a 'v2=same_as(v1)', we must analyse v1 before
-                        # we can analyse v2.  If we get them in the wrong
-                        # order we cancel and reschedule v2.
-                        raise Blocked
-                    elif op.opname == "_tmp_same_as":
-                        # pseudo-operation just introduced by the code
-                        # some lines above.
-                        for key, v in zip(flatnames, op.args):
-                            newvarsmap[key] = v
-                    else:
-                        raise AssertionError, op.opname
-                else:
-                    newops.append(op)
-        except Blocked:
-            pending.append((block, var))
-            continue
-        block.operations[:] = newops
-
-        for link in block.exits:
-            while var in link.args:
-                i = link.args.index(var)
-                link.args = link.args[:i] + make_newvars() + link.args[i+1:]
-
-    return True
-
-def remove_mallocs_once(graph):
-    """Perform one iteration of malloc removal."""
-    lifetimes = compute_lifetimes(graph)
-    progress = False
-    for info in lifetimes:
-        if _try_inline_malloc(info):
-            progress = True
-    return progress
-
-def remove_simple_mallocs(graph):
-    """Iteratively remove (inline) the mallocs that can be simplified away."""
-    done_something = False
-    while remove_mallocs_once(graph):
-        done_something = True
-    return done_something

File pypy/translator/backend_opt/matfunc.py

-'''Basic Table, Matrix and Vector functions for Python 2.2
-   License:  Public Domain     Author:   Raymond Hettinger email:  python@rcn.com
-   Updates and documentation:  http://users.rcn.com/python/download/python.htm
-   Revision In Use:  'File %n, Ver %v, Date %f'                             '''
-Version = 'File MATFUNC.PY, Ver 183, Date 12-Dec-2002,14:33:42'
-
-import operator, math, random
-NPRE, NPOST = 0, 0                    # Disables pre and post condition checks
-
-def iszero(z):  return abs(z) < .000001
-def getreal(z):
-    try:
-        return z.real
-    except AttributeError:
-        return z
-def getimag(z):
-    try:
-        return z.imag
-    except AttributeError:
-        return 0
-def getconj(z):
-    try:
-        return z.conjugate()
-    except AttributeError:
-        return z
-
-
-separator = [ '', '\t', '\n', '\n----------\n', '\n===========\n' ]
-
-class Table(list):
-    dim = 1
-    concat = list.__add__      # A substitute for the overridden __add__ method
-    def __getslice__( self, i, j ):
-        return self.__class__( list.__getslice__(self,i,j) )
-    def __init__( self, elems ):
-        list.__init__( self, elems )
-        if len(elems) and hasattr(elems[0], 'dim'): self.dim = elems[0].dim + 1
-    def __str__( self ):
-        return separator[self.dim].join( map(str, self) )
-    def map( self, op, rhs=None ):
-        '''Apply a unary operator to every element in the matrix or a binary operator to corresponding
-        elements in two arrays.  If the dimensions are different, broadcast the smaller dimension over
-        the larger (i.e. match a scalar to every element in a vector or a vector to a matrix).'''
-        if rhs is None:                                                 # Unary case
-            return self.dim==1 and self.__class__( map(op, self) ) or self.__class__( [elem.map(op) for elem in self] )
-        elif not hasattr(rhs,'dim'):                                    # List / Scalar op
-            return self.__class__( [op(e,rhs) for e in self] )
-        elif self.dim == rhs.dim:                                       # Same level Vec / Vec or Matrix / Matrix
-            assert NPRE or len(self) == len(rhs), 'Table operation requires len sizes to agree'
-            return self.__class__( map(op, self, rhs) )
-        elif self.dim < rhs.dim:                                        # Vec / Matrix
-            return self.__class__( [op(self,e) for e in rhs]  )
-        return self.__class__( [op(e,rhs) for e in self] )         # Matrix / Vec
-    def __mul__( self, rhs ):  return self.map( operator.mul, rhs )
-    def __div__( self, rhs ):  return self.map( operator.div, rhs )
-    def __sub__( self, rhs ):  return self.map( operator.sub, rhs )
-    def __add__( self, rhs ):  return self.map( operator.add, rhs )
-    def __rmul__( self, lhs ):  return self*lhs
-    def __rdiv__( self, lhs ):  return self*(1.0/lhs)
-    def __rsub__( self, lhs ):  return -(self-lhs)
-    def __radd__( self, lhs ):  return self+lhs
-    def __abs__( self ): return self.map( abs )
-    def __neg__( self ): return self.map( operator.neg )
-    def conjugate( self ): return self.map( getconj )
-    def real( self ): return self.map( getreal  )
-    def imag( self ): return self.map( getimag )
-    def flatten( self ):
-        if self.dim == 1: return self
-        return reduce( lambda cum, e: e.flatten().concat(cum), self, [] )
-    def prod( self ):  return reduce(operator.mul, self.flatten(), 1.0)
-    def sum( self ):  return reduce(operator.add, self.flatten(), 0.0)
-    def exists( self, predicate ):
-        for elem in self.flatten():
-            if predicate(elem):
-                return 1
-        return 0
-    def forall( self, predicate ):
-        for elem in self.flatten():
-            if not predicate(elem):
-                return 0
-        return 1
-    def __eq__( self, rhs ):  return (self - rhs).forall( iszero )
-
-class Vec(Table):
-    def dot( self, otherVec ):  return reduce(operator.add, map(operator.mul, self, otherVec), 0.0)
-    def norm( self ):  return math.sqrt(abs( self.dot(self.conjugate()) ))
-    def normalize( self ):  return self / self.norm()
-    def outer( self, otherVec ):  return Mat([otherVec*x for x in self])
-    def cross( self, otherVec ):
-        'Compute a Vector or Cross Product with another vector'
-        assert len(self) == len(otherVec) == 3, 'Cross product only defined for 3-D vectors'
-        u, v = self, otherVec
-        return Vec([ u[1]*v[2]-u[2]*v[1], u[2]*v[0]-u[0]*v[2], u[0]*v[1]-u[1]*v[0] ])
-    def house( self, index ):
-        'Compute a Householder vector which zeroes all but the index element after a reflection'
-        v = Vec( Table([0]*index).concat(self[index:]) ).normalize()
-        t = v[index]
-        sigma = 1.0 - t**2
-        if sigma != 0.0:
-            t = v[index] = t<=0 and t-1.0 or -sigma / (t + 1.0)
-            v /= t
-        return v, 2.0 * t**2 / (sigma + t**2)
-    def polyval( self, x ):
-        'Vec([6,3,4]).polyval(5) evaluates to 6*x**2 + 3*x + 4 at x=5'
-        return reduce( lambda cum,c: cum*x+c, self, 0.0 )
-    def ratval( self, x ):
-        'Vec([10,20,30,40,50]).ratfit(5) evaluates to (10*x**2 + 20*x + 30) / (40*x**2 + 50*x + 1) at x=5.'
-        degree = len(self) / 2
-        num, den = self[:degree+1], self[degree+1:] + [1]
-        return num.polyval(x) / den.polyval(x)
-
-class Matrix(Table):
-    __slots__ = ['size', 'rows', 'cols']
-    def __init__( self, elems ):
-        'Form a matrix from a list of lists or a list of Vecs'
-        Table.__init__( self, hasattr(elems[0], 'dot') and elems or map(Vec,map(tuple,elems)) )
-        self.size = self.rows, self.cols = len(elems), len(elems[0])
-    def tr( self ):
-        'Tranpose elements so that Transposed[i][j] = Original[j][i]'
-        return Mat(zip(*self))
-    def star( self ):
-        'Return the Hermetian adjoint so that Star[i][j] = Original[j][i].conjugate()'
-        return self.tr().conjugate()
-    def diag( self ):
-        'Return a vector composed of elements on the matrix diagonal'
-        return Vec( [self[i][i] for i in range(min(self.size))] )
-    def trace( self ): return self.diag().sum()
-    def mmul( self, other ):
-        'Matrix multiply by another matrix or a column vector '
-        if other.dim==2: return Mat( map(self.mmul, other.tr()) ).tr()
-        assert NPRE or self.cols == len(other)
-        return Vec( map(other.dot, self) )
-    def augment( self, otherMat ):
-        'Make a new matrix with the two original matrices laid side by side'
-        assert self.rows == otherMat.rows, 'Size mismatch: %s * %s' % (`self.size`, `otherMat.size`)
-        return Mat( map(Table.concat, self, otherMat) )
-    def qr( self, ROnly=0 ):
-        'QR decomposition using Householder reflections: Q*R==self, Q.tr()*Q==I(n), R upper triangular'
-        R = self
-        m, n = R.size
-        for i in range(min(m,n)):
-            v, beta = R.tr()[i].house(i)
-            R -= v.outer( R.tr().mmul(v)*beta )
-        for i in range(1,min(n,m)): R[i][:i] = [0] * i
-        R = Mat(R[:n])
-        if ROnly: return R
-        Q = R.tr().solve(self.tr()).tr()       # Rt Qt = At    nn  nm  = nm
-        self.qr = lambda r=0, c=`self`: not r and c==`self` and (Q,R) or Matrix.qr(self,r) #Cache result
-        assert NPOST or m>=n and Q.size==(m,n) and isinstance(R,UpperTri) or m<n and Q.size==(m,m) and R.size==(m,n)
-        assert NPOST or Q.mmul(R)==self and Q.tr().mmul(Q)==eye(min(m,n))
-        return Q, R
-    def _solve( self, b ):
-        '''General matrices (incuding) are solved using the QR composition.
-        For inconsistent cases, returns the least squares solution'''
-        Q, R = self.qr()
-        return R.solve( Q.tr().mmul(b) )
-    def solve( self, b ):
-        'Divide matrix into a column vector or matrix and iterate to improve the solution'
-        if b.dim==2: return Mat( map(self.solve, b.tr()) ).tr()
-        assert NPRE or self.rows == len(b), 'Matrix row count %d must match vector length %d' % (self.rows, len(b))
-        x = self._solve( b )
-        diff = b - self.mmul(x)
-        maxdiff = diff.dot(diff)
-        for i in range(10):
-            xnew = x + self._solve( diff )
-            diffnew = b - self.mmul(xnew)
-            maxdiffnew = diffnew.dot(diffnew)
-            if maxdiffnew >= maxdiff:  break
-            x, diff, maxdiff = xnew, diffnew, maxdiffnew
-            #print >> sys.stderr, i+1, maxdiff
-        assert NPOST or self.rows!=self.cols or self.mmul(x) == b
-        return x
-    def rank( self ):  return Vec([ not row.forall(iszero) for row in self.qr(ROnly=1) ]).sum()
-
-class Square(Matrix):
-    def lu( self ):
-        'Factor a square matrix into lower and upper triangular form such that L.mmul(U)==A'
-        n = self.rows
-        L, U = eye(n), Mat(self[:])
-        for i in range(n):
-            for j in range(i+1,U.rows):
-                assert U[i][i] != 0.0, 'LU requires non-zero elements on the diagonal'
-                L[j][i] = m = 1.0 * U[j][i] / U[i][i]
-                U[j] -= U[i] * m
-        assert NPOST or isinstance(L,LowerTri) and isinstance(U,UpperTri) and L*U==self
-        return L, U
-    def __pow__( self, exp ):
-        'Raise a square matrix to an integer power (i.e. A**3 is the same as A.mmul(A.mmul(A))'
-        assert NPRE or exp==int(exp) and exp>0, 'Matrix powers only defined for positive integers not %s' % exp
-        if exp == 1: return self
-        if exp&1: return self.mmul(self ** (exp-1))
-        sqrme = self ** (exp/2)
-        return sqrme.mmul(sqrme)
-    def det( self ):  return self.qr( ROnly=1 ).det()
-    def inverse( self ):  return self.solve( eye(self.rows) )
-    def hessenberg( self ):
-        '''Householder reduction to Hessenberg Form (zeroes below the diagonal)
-        while keeping the same eigenvalues as self.'''
-        for i in range(self.cols-2):
-            v, beta = self.tr()[i].house(i+1)
-            self -= v.outer( self.tr().mmul(v)*beta )
-            self -= self.mmul(v).outer(v*beta)
-        return self
-    def eigs( self ):
-        'Estimate principal eigenvalues using the QR with shifts method'
-        origTrace, origDet = self.trace(), self.det()
-        self = self.hessenberg()
-        eigvals = Vec([])
-        for i in range(self.rows-1,0,-1):
-            while not self[i][:i].forall(iszero):
-                shift = eye(i+1) * self[i][i]
-                q, r = (self - shift).qr()
-                self = r.mmul(q) + shift
-            eigvals.append( self[i][i] )
-            self = Mat( [self[r][:i] for r in range(i)] )
-        eigvals.append( self[0][0] )
-        assert NPOST or iszero( (abs(origDet) - abs(eigvals.prod())) / 1000.0 )
-        assert NPOST or iszero( origTrace - eigvals.sum() )
-        return Vec(eigvals)
-
-class Triangular(Square):
-    def eigs( self ):  return self.diag()
-    def det( self ):  return self.diag().prod()
-
-class UpperTri(Triangular):
-    def _solve( self, b ):
-        'Solve an upper triangular matrix using backward substitution'
-        x = Vec([])
-        for i in range(self.rows-1, -1, -1):
-            assert NPRE or self[i][i], 'Backsub requires non-zero elements on the diagonal'
-            x.insert(0, (b[i] - x.dot(self[i][i+1:])) / self[i][i] )
-        return x
-
-class LowerTri(Triangular):
-    def _solve( self, b ):
-        'Solve a lower triangular matrix using forward substitution'
-        x = Vec([])
-        for i in range(self.rows):
-            assert NPRE or self[i][i], 'Forward sub requires non-zero elements on the diagonal'
-            x.append( (b[i] - x.dot(self[i][:i])) / self[i][i] )
-        return x
-
-def Mat( elems ):
-    'Factory function to create a new matrix.'
-    m, n = len(elems), len(elems[0])
-    if m != n: return Matrix(elems)
-    if n <= 1: return Square(elems)
-    for i in range(1, len(elems)):
-        if not iszero( max(map(abs, elems[i][:i])) ):
-            break
-    else: return UpperTri(elems)
-    for i in range(0, len(elems)-1):
-        if not iszero( max(map(abs, elems[i][i+1:])) ):
-            return Square(elems)
-    return LowerTri(elems)
-
-
-def funToVec( tgtfun, low=-1, high=1, steps=40, EqualSpacing=0 ):
-    '''Compute x,y points from evaluating a target function over an interval (low to high)
-    at evenly spaces points or with Chebyshev abscissa spacing (default) '''
-    if EqualSpacing:
-        h = (0.0+high-low)/steps
-        xvec = [low+h/2.0+h*i for i in range(steps)]
-    else:
-        scale, base = (0.0+high-low)/2.0, (0.0+high+low)/2.0
-        xvec = [base+scale*math.cos(((2*steps-1-2*i)*math.pi)/(2*steps)) for i in range(steps)]
-    yvec = map(tgtfun, xvec)
-    return Mat( [xvec, yvec] )
-
-def funfit( (xvec, yvec), basisfuns ):
-    'Solves design matrix for approximating to basis functions'
-    return Mat([ map(form,xvec) for form in basisfuns ]).tr().solve(Vec(yvec))
-
-def polyfit( (xvec, yvec), degree=2 ):
-    'Solves Vandermonde design matrix for approximating polynomial coefficients'
-    return Mat([ [x**n for n in range(degree,-1,-1)] for x in xvec ]).solve(Vec(yvec))
-
-def ratfit( (xvec, yvec), degree=2 ):
-    'Solves design matrix for approximating rational polynomial coefficients (a*x**2 + b*x + c)/(d*x**2 + e*x + 1)'
-    return Mat([[x**n for n in range(degree,-1,-1)]+[-y*x**n for n in range(degree,0,-1)] for x,y in zip(xvec,yvec)]).solve(Vec(yvec))
-
-def genmat(m, n, func):
-    if not n: n=m
-    return Mat([ [func(i,j) for i in range(n)] for j in range(m) ])
-
-def zeroes(m=1, n=None):
-    'Zero matrix with side length m-by-m or m-by-n.'
-    return genmat(m,n, lambda i,j: 0)
-
-def eye(m=1, n=None):
-    'Identity matrix with side length m-by-m or m-by-n'
-    return genmat(m,n, lambda i,j: i==j)
-
-def hilb(m=1, n=None):
-    'Hilbert matrix with side length m-by-m or m-by-n.  Elem[i][j]=1/(i+j+1)'
-    return genmat(m,n, lambda i,j: 1.0/(i+j+1.0))
-
-def rand(m=1, n=None):
-    'Random matrix with side length m-by-m or m-by-n'
-    return genmat(m,n, lambda i,j: random.random())
-
-if __name__ == '__main__':
-    import cmath
-    a = Table([1+2j,2,3,4])
-    b = Table([5,6,7,8])
-    C = Table([a,b])
-    print 'a+b', a+b
-    print '2+a', 2+a
-    print 'a/5.0', a/5.0
-    print '2*a+3*b', 2*a+3*b
-    print 'a+C', a+C
-    print '3+C', 3+C
-    print 'C+b', C+b
-    print 'C.sum()', C.sum()
-    print 'C.map(math.cos)', C.map(cmath.cos)
-    print 'C.conjugate()', C.conjugate()
-    print 'C.real()', C.real()
-
-    print zeroes(3)
-    print eye(4)
-    print hilb(3,5)
-
-    C = Mat( [[1,2,3], [4,5,1,], [7,8,9]] )
-    print C.mmul( C.tr()), '\n'
-    print C ** 5, '\n'
-    print C + C.tr(), '\n'
-
-    A = C.tr().augment( Mat([[10,11,13]]).tr() ).tr()
-    q, r = A.qr()
-    assert q.mmul(r) == A
-    assert q.tr().mmul(q)==eye(3)
-    print 'q:\n', q, '\nr:\n', r, '\nQ.tr()&Q:\n', q.tr().mmul(q), '\nQ*R\n', q.mmul(r), '\n'
-    b = Vec([50, 100, 220, 321])
-    x = A.solve(b)
-    print 'x:  ', x
-    print 'b:  ', b
-    print 'Ax: ', A.mmul(x)
-
-    inv = C.inverse()
-    print '\ninverse C:\n', inv, '\nC * inv(C):\n', C.mmul(inv)
-    assert C.mmul(inv) == eye(3)
-
-    points = (xvec,yvec) = funToVec(lambda x: math.sin(x)+2*math.cos(.7*x+.1), low=0, high=3, EqualSpacing=1)
-    basis = [lambda x: math.sin(x), lambda x: math.exp(x), lambda x: x**2]
-    print 'Func coeffs:', funfit( points, basis )
-    print 'Poly coeffs:', polyfit( points, degree=5 )
-    points = (xvec,yvec) = funToVec(lambda x: math.sin(x)+2*math.cos(.7*x+.1), low=0, high=3)
-    print 'Rational coeffs:', ratfit( points )
-
-    print polyfit(([1,2,3,4], [1,4,9,16]), 2)
-
-    mtable = Vec([1,2,3]).outer(Vec([1,2]))
-    print mtable, mtable.size
-
-    A = Mat([ [2,0,3], [1,5,1], [18,0,6] ])
-    print 'A:'
-    print A
-    print 'eigs:'
-    print A.eigs()
-    print 'Should be:', Vec([11.6158, 5.0000, -3.6158])
-    print 'det(A)'
-    print A.det()
-
-    c = Mat( [[1,2,30],[4,5,10],[10,80,9]] )     # Failed example from Konrad Hinsen
-    print 'C:\n', c
-    print c.eigs()
-    print 'Should be:', Vec([-8.9554, 43.2497, -19.2943])
-
-    A = Mat([ [1,2,3,4], [4,5,6,7], [2,1,5,0], [4,2,1,0] ] )    # Kincaid and Cheney p.326
-    print 'A:\n', A
-    print A.eigs()
-    print 'Should be:', Vec([3.5736, 0.1765, 11.1055, -3.8556])
-
-    A = rand(3)
-    q,r = A.qr()
-    s,t = A.qr()
-    print q is s                # Test caching
-    print r is t
-    A[1][1] = 1.1               # Invalidate the cache
-    u,v = A.qr()
-    print q is u                # Verify old result not used
-    print r is v
-    print u.mmul(v) == A        # Verify new result
-
-    print 'Test qr on 3x5 matrix'
-    a = rand(3,5)
-    q,r = a.qr()
-    print q.mmul(r) == a
-    print q.tr().mmul(q) == eye(3)
-

File pypy/translator/backend_opt/remove_no_ops.py

-##from pypy.translator.translator import Translator
-##from pypy.translator.simplify import eliminate_empty_blocks, join_blocks
-##from pypy.translator.simplify import remove_identical_vars
-##from pypy.translator.simplify import transform_dead_op_vars
-##from pypy.translator.unsimplify import copyvar, split_block
-##from pypy.objspace.flow.model import Variable, Constant, Block, Link
-##from pypy.objspace.flow.model import SpaceOperation, last_exception
-##from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph
-##from pypy.annotation import model as annmodel
-##from pypy.tool.unionfind import UnionFind
-##from pypy.rpython.lltype import Void, Bool
-##from pypy.rpython import rmodel, lltype
-
-def remove_same_as(graph):
-    """Remove all 'same_as' operations.
-    """
-    same_as_positions = []
-    def visit(node): 
-        if isinstance(node, Block): 
-            for i, op in enumerate(node.operations):
-                if op.opname == 'same_as': 
-                    same_as_positions.append((node, i))
-    traverse(visit, graph)
-    while same_as_positions:
-        block, index = same_as_positions.pop()
-        same_as_result = block.operations[index].result
-        same_as_arg = block.operations[index].args[0]
-        # replace the new variable (same_as_result) with the old variable
-        # (from all subsequent positions)
-        for op in block.operations[index:]:
-            if op is not None:
-                for i in range(len(op.args)):
-                    if op.args[i] == same_as_result:
-                        op.args[i] = same_as_arg
-        for link in block.exits:
-            for i in range(len(link.args)):
-                if link.args[i] == same_as_result:
-                    link.args[i] = same_as_arg
-        if block.exitswitch == same_as_result:
-            block.exitswitch = same_as_arg
-        block.operations[index] = None
-       
-    # remove all same_as operations
-    def visit(node): 
-        if isinstance(node, Block) and node.operations:
-            node.operations[:] = filter(None, node.operations)
-    traverse(visit, graph)
-
-
-def remove_void(translator):
-    for func, graph in translator.flowgraphs.iteritems():
-        args = [arg for arg in graph.startblock.inputargs
-                    if arg.concretetype is not Void]
-        graph.startblock.inputargs = args
-    def visit(block): 
-        if isinstance(block, Block):
-            for op in block.operations:
-                if op.opname == 'direct_call':
-                    args = [arg for arg in op.args
-                                if arg.concretetype is not Void]
-                    op.args = args
-    for func, graph in translator.flowgraphs.iteritems():
-        traverse(visit, graph)
- 
-##def rename_extfunc_calls(translator):
-##    from pypy.rpython.extfunctable import table as extfunctable
-##    def visit(block): 
-##        if isinstance(block, Block):
-##            for op in block.operations:
-##                if op.opname != 'direct_call':
-##                    continue
-##                functionref = op.args[0]
-##                if not isinstance(functionref, Constant):
-##                    continue
-##                _callable = functionref.value._obj._callable
-##                for func, extfuncinfo in extfunctable.iteritems():  # precompute a dict?
-##                    if _callable is not extfuncinfo.ll_function or not extfuncinfo.backend_functiontemplate:
-##                        continue
-##                    language, functionname = extfuncinfo.backend_functiontemplate.split(':')
-##                    if language is 'C':
-##                        old_name = functionref.value._obj._name[:]
-##                        functionref.value._obj._name = functionname
-##                        #print 'rename_extfunc_calls: %s -> %s' % (old_name, functionref.value._obj._name)
-##                        break
-##    for func, graph in translator.flowgraphs.iteritems():
-##        traverse(visit, graph)

File pypy/translator/backend_opt/ssa.py

-##from pypy.translator.translator import Translator
-##from pypy.translator.simplify import eliminate_empty_blocks, join_blocks
-##from pypy.translator.simplify import remove_identical_vars
-##from pypy.translator.simplify import transform_dead_op_vars
-##from pypy.translator.unsimplify import copyvar, split_block
-##from pypy.objspace.flow.model import Variable, Constant, Block, Link
-##from pypy.objspace.flow.model import SpaceOperation, last_exception
-##from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph
-##from pypy.annotation import model as annmodel
-##from pypy.tool.unionfind import UnionFind
-##from pypy.rpython.lltype import Void, Bool
-##from pypy.rpython import rmodel, lltype
- 
-def SSI_to_SSA(graph):
-    """Rename the variables in a flow graph as much as possible without
-    violating the SSA rule.  'SSI' means that each Variable in a flow graph is
-    defined only once in the whole graph; all our graphs are SSI.  This
-    function does not break that rule, but changes the 'name' of some
-    Variables to give them the same 'name' as other Variables.  The result
-    looks like an SSA graph.  'SSA' means that each var name appears as the
-    result of an operation only once in the whole graph, but it can be
-    passed to other blocks across links.
-    """
-    entrymap = mkentrymap(graph)
-    consider_blocks = entrymap
-    variable_families = UnionFind()
-
-    # group variables by families; a family of variables will be identified.
-    while consider_blocks:
-        blocklist = consider_blocks.keys()
-        consider_blocks = {}
-        for block in blocklist:
-            if block is graph.startblock:
-                continue
-            links = entrymap[block]
-            assert links
-            mapping = {}
-            for i in range(len(block.inputargs)):
-                # list of possible vars that can arrive in i'th position
-                v1 = block.inputargs[i]
-                v1 = variable_families.find_rep(v1)
-                inputs = {v1: True}
-                key = []
-                for link in links:
-                    v = link.args[i]
-                    if not isinstance(v, Variable):
-                        break
-                    v = variable_families.find_rep(v)
-                    inputs[v] = True
-                else:
-                    if len(inputs) == 2:
-                        variable_families.union(*inputs)
-                        # mark all the following blocks as subject to
-                        # possible further optimization
-                        for link in block.exits:
-                            consider_blocks[link.target] = True
-    # rename variables to give them the name of their familiy representant
-    for v in variable_families.keys():
-        v1 = variable_families.find_rep(v)
-        if v1 != v:
-            v._name = v1.name
-
-    # sanity-check that the same name is never used several times in a block
-    variables_by_name = {}
-    for block in entrymap:
-        vars = [op.result for op in block.operations]
-        for link in block.exits:
-            vars += link.getextravars()
-        assert len(dict.fromkeys([v.name for v in vars])) == len(vars), (
-            "duplicate variable name in %r" % (block,))
-        for v in vars:
-            variables_by_name.setdefault(v.name, []).append(v)
-    # sanity-check that variables with the same name have the same concretetype
-    for vname, vlist in variables_by_name.items():
-        vct = [getattr(v, 'concretetype', None) for v in vlist]
-        assert vct == vct[:1] * len(vct), (
-            "variables called %s have mixed concretetypes: %r" % (vname, vct))

File pypy/translator/backend_opt/test/__init__.py

Empty file removed.

File pypy/translator/backend_opt/test/test_inline.py

-from pypy.translator.backendoptimization import remove_void, inline_function
-from pypy.translator.backendoptimization import remove_simple_mallocs
-from pypy.translator.translator import Translator
-from pypy.rpython.lltype import Void
-from pypy.rpython.llinterp import LLInterpreter
-from pypy.objspace.flow.model import checkgraph, flatten, Block
-from pypy.translator.test.snippet import simple_method, is_perfect_number
-from pypy.translator.llvm.log import log
-
-import py
-log = py.log.Producer('test_backendoptimization')
-
-def annotate_and_remove_void(f, annotate):
-    t = Translator(f)
-    a = t.annotate(annotate)
-    t.specialize()
-    remove_void(t)
-    return t
-
-def test_remove_void_args():
-    def f(i):
-        return [1,2,3,i][i]
-    t = annotate_and_remove_void(f, [int])
-    for func, graph in t.flowgraphs.iteritems():
-        assert checkgraph(graph) is None
-        for arg in graph.startblock.inputargs:
-            assert arg.concretetype is not Void
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    assert interp.eval_function(f, [0]) == 1 
-
-def test_remove_void_in_struct():
-    t = annotate_and_remove_void(simple_method, [int])
-    #t.view()
-    log(t.flowgraphs.iteritems())
-    for func, graph in t.flowgraphs.iteritems():
-        log('func : ' + str(func))
-        log('graph: ' + str(graph))
-        assert checkgraph(graph) is None
-        #for fieldname in self.struct._names:    #XXX helper (in lltype?) should remove these voids
-        #    type_ = getattr(struct, fieldname)
-        #    log('fieldname=%(fieldname)s , type_=%(type_)s' % locals())
-        #    assert _type is not Void
-    #interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    #assert interp.eval_function(f, [0]) == 1 
-
-def test_inline_simple():
-    def f(x, y):
-        return (g(x, y) + 1) * x
-    def g(x, y):
-        if x > 0:
-            return x * y
-        else:
-            return -x * y
-    t = Translator(f)
-    a = t.annotate([int, int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, g, t.flowgraphs[f])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(f, [-1, 5])
-    assert result == f(-1, 5)
-    result = interp.eval_function(f, [2, 12])
-    assert result == f(2, 12)
-
-def test_inline_big():
-    def f(x):
-        result = []
-        for i in range(1, x+1):
-            if is_perfect_number(i):
-                result.append(i)
-        return result
-    t = Translator(f)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, is_perfect_number, t.flowgraphs[f])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(f, [10])
-    assert result.length == len(f(10))
-
-def test_inline_raising():
-    def f(x):
-        if x == 1:
-            raise ValueError
-        return x
-    def g(x):
-        a = f(x)
-        if x == 2:
-            raise KeyError
-    def h(x):
-        try:
-            g(x)
-        except ValueError:
-            return 1
-        except KeyError:
-            return 2
-        return x
-    t = Translator(h)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(h, [0])
-    assert result == 0
-    result = interp.eval_function(h, [1])
-    assert result == 1
-    result = interp.eval_function(h, [2])
-    assert result == 2    
-
-def test_inline_several_times():
-    def f(x):
-        return (x + 1) * 2
-    def g(x):
-        if x:
-            a = f(x) + f(x)
-        else:
-            a = f(x) + 1
-        return a + f(x)
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [0])
-    assert result == g(0)
-    result = interp.eval_function(g, [42])
-    assert result == g(42)
-
-def test_inline_exceptions():
-    def f(x):
-        if x == 0:
-            raise ValueError
-        if x == 1:
-            raise KeyError
-    def g(x):
-        try:
-            f(x)
-        except ValueError:
-            return 2
-        except KeyError:
-            return x+2
-        return 1
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [0])
-    assert result == 2
-    result = interp.eval_function(g, [1])
-    assert result == 3
-    result = interp.eval_function(g, [42])
-    assert result == 1
-
-def DONOTtest_inline_var_exception():
-    # this test is disabled for now, because f() contains a direct_call
-    # (at the end, to a ll helper, to get the type of the exception object)
-    def f(x):
-        e = None
-        if x == 0:
-            e = ValueError()
-        elif x == 1:
-            e = KeyError()
-        if x == 0 or x == 1:
-            raise e
-    def g(x):
-        try:
-            f(x)
-        except ValueError:
-            return 2
-        except KeyError:
-            return 3
-        return 1
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [0])
-    assert result == 2
-    result = interp.eval_function(g, [1])
-    assert result == 3
-    result = interp.eval_function(g, [42])
-    assert result == 1
-
-def DONOTtest_call_call():
-    # for reference.  Just remove this test if we decide not to support
-    # catching exceptions while inlining a graph that contains further
-    # direct_calls.
-    def e(x):
-        if x < 0:
-            raise KeyError
-        return x+1
-    def f(x):
-        return e(x)+2
-    def g(x):
-        try:
-            return f(x)+3
-        except KeyError:
-            return -1
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [100])
-    assert result == 106
-    result = interp.eval_function(g, [-100])
-    assert result == -1
-
-def test_for_loop():
-    def f(x):
-        result = 0
-        for i in range(0, x):
-            result += i
-        return result
-    t = Translator(f)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    for graph in t.flowgraphs.values():
-        if graph.name.startswith('ll_rangenext'):
-            break
-    else:
-        assert 0, "cannot find ll_rangenext_*() function"
-    inline_function(t, graph, t.flowgraphs[f])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(f, [10])
-    assert result == 45
-
-
-def check_malloc_removed(fn, signature, expected_remaining_mallocs):
-    t = Translator(fn)
-    t.annotate(signature)
-    t.specialize()
-    graph = t.getflowgraph()
-    remove_simple_mallocs(graph)
-    checkgraph(graph)
-    count = 0
-    for node in flatten(graph):
-        if isinstance(node, Block):
-            for op in node.operations:
-                if op.opname == 'malloc':
-                    count += 1
-    assert count == expected_remaining_mallocs
-
-def test_remove_mallocs():
-    def fn1(x, y):
-        s, d = x+y, x-y
-        return s*d
-    yield check_malloc_removed, fn1, [int, int], 0
-    #
-    class T:
-        pass
-    def fn2(x, y):
-        t = T()
-        t.x = x
-        t.y = y
-        if x > 0:
-            return t.x + t.y
-        else:
-            return t.x - t.y
-    yield check_malloc_removed, fn2, [int, int], 0
-    #
-    def fn3(x):
-        a, ((b, c), d, e) = x+1, ((x+2, x+3), x+4, x+5)
-        return a+b+c+d+e
-    yield check_malloc_removed, fn3, [int], 0
-    #
-    class A:
-        pass
-    class B(A):
-        pass
-    def fn4(i):
-        a = A()
-        b = B()
-        a.b = b
-        b.i = i
-        return a.b.i
-    yield check_malloc_removed, fn4, [int], 0

File pypy/translator/backend_opt/test/test_malloc.py

-from pypy.translator.backendoptimization import remove_void, inline_function
-from pypy.translator.backendoptimization import remove_simple_mallocs
-from pypy.translator.translator import Translator
-from pypy.rpython.lltype import Void
-from pypy.rpython.llinterp import LLInterpreter
-from pypy.objspace.flow.model import checkgraph, flatten, Block
-from pypy.translator.test.snippet import simple_method, is_perfect_number
-from pypy.translator.llvm.log import log
-
-import py
-log = py.log.Producer('test_backendoptimization')
-
-def annotate_and_remove_void(f, annotate):
-    t = Translator(f)
-    a = t.annotate(annotate)
-    t.specialize()
-    remove_void(t)
-    return t
-
-def test_remove_void_args():
-    def f(i):
-        return [1,2,3,i][i]
-    t = annotate_and_remove_void(f, [int])
-    for func, graph in t.flowgraphs.iteritems():
-        assert checkgraph(graph) is None
-        for arg in graph.startblock.inputargs:
-            assert arg.concretetype is not Void
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    assert interp.eval_function(f, [0]) == 1 
-
-def test_remove_void_in_struct():
-    t = annotate_and_remove_void(simple_method, [int])
-    #t.view()
-    log(t.flowgraphs.iteritems())
-    for func, graph in t.flowgraphs.iteritems():
-        log('func : ' + str(func))
-        log('graph: ' + str(graph))
-        assert checkgraph(graph) is None
-        #for fieldname in self.struct._names:    #XXX helper (in lltype?) should remove these voids
-        #    type_ = getattr(struct, fieldname)
-        #    log('fieldname=%(fieldname)s , type_=%(type_)s' % locals())
-        #    assert _type is not Void
-    #interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    #assert interp.eval_function(f, [0]) == 1 
-
-def test_inline_simple():
-    def f(x, y):
-        return (g(x, y) + 1) * x
-    def g(x, y):
-        if x > 0:
-            return x * y
-        else:
-            return -x * y
-    t = Translator(f)
-    a = t.annotate([int, int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, g, t.flowgraphs[f])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(f, [-1, 5])
-    assert result == f(-1, 5)
-    result = interp.eval_function(f, [2, 12])
-    assert result == f(2, 12)
-
-def test_inline_big():
-    def f(x):
-        result = []
-        for i in range(1, x+1):
-            if is_perfect_number(i):
-                result.append(i)
-        return result
-    t = Translator(f)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, is_perfect_number, t.flowgraphs[f])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(f, [10])
-    assert result.length == len(f(10))
-
-def test_inline_raising():
-    def f(x):
-        if x == 1:
-            raise ValueError
-        return x
-    def g(x):
-        a = f(x)
-        if x == 2:
-            raise KeyError
-    def h(x):
-        try:
-            g(x)
-        except ValueError:
-            return 1
-        except KeyError:
-            return 2
-        return x
-    t = Translator(h)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(h, [0])
-    assert result == 0
-    result = interp.eval_function(h, [1])
-    assert result == 1
-    result = interp.eval_function(h, [2])
-    assert result == 2    
-
-def test_inline_several_times():
-    def f(x):
-        return (x + 1) * 2
-    def g(x):
-        if x:
-            a = f(x) + f(x)
-        else:
-            a = f(x) + 1
-        return a + f(x)
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [0])
-    assert result == g(0)
-    result = interp.eval_function(g, [42])
-    assert result == g(42)
-
-def test_inline_exceptions():
-    def f(x):
-        if x == 0:
-            raise ValueError
-        if x == 1:
-            raise KeyError
-    def g(x):
-        try:
-            f(x)
-        except ValueError:
-            return 2
-        except KeyError:
-            return x+2
-        return 1
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [0])
-    assert result == 2
-    result = interp.eval_function(g, [1])
-    assert result == 3
-    result = interp.eval_function(g, [42])
-    assert result == 1
-
-def DONOTtest_inline_var_exception():
-    # this test is disabled for now, because f() contains a direct_call
-    # (at the end, to a ll helper, to get the type of the exception object)
-    def f(x):
-        e = None
-        if x == 0:
-            e = ValueError()
-        elif x == 1:
-            e = KeyError()
-        if x == 0 or x == 1:
-            raise e
-    def g(x):
-        try:
-            f(x)
-        except ValueError:
-            return 2
-        except KeyError:
-            return 3
-        return 1
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [0])
-    assert result == 2
-    result = interp.eval_function(g, [1])
-    assert result == 3
-    result = interp.eval_function(g, [42])
-    assert result == 1
-
-def DONOTtest_call_call():
-    # for reference.  Just remove this test if we decide not to support
-    # catching exceptions while inlining a graph that contains further
-    # direct_calls.
-    def e(x):
-        if x < 0:
-            raise KeyError
-        return x+1
-    def f(x):
-        return e(x)+2
-    def g(x):
-        try:
-            return f(x)+3
-        except KeyError:
-            return -1
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [100])
-    assert result == 106
-    result = interp.eval_function(g, [-100])
-    assert result == -1
-
-def test_for_loop():
-    def f(x):
-        result = 0
-        for i in range(0, x):
-            result += i
-        return result
-    t = Translator(f)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    for graph in t.flowgraphs.values():
-        if graph.name.startswith('ll_rangenext'):
-            break
-    else:
-        assert 0, "cannot find ll_rangenext_*() function"
-    inline_function(t, graph, t.flowgraphs[f])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(f, [10])
-    assert result == 45
-
-
-def check_malloc_removed(fn, signature, expected_remaining_mallocs):
-    t = Translator(fn)
-    t.annotate(signature)
-    t.specialize()
-    graph = t.getflowgraph()
-    remove_simple_mallocs(graph)
-    checkgraph(graph)
-    count = 0
-    for node in flatten(graph):
-        if isinstance(node, Block):
-            for op in node.operations:
-                if op.opname == 'malloc':
-                    count += 1
-    assert count == expected_remaining_mallocs
-
-def test_remove_mallocs():
-    def fn1(x, y):
-        s, d = x+y, x-y
-        return s*d
-    yield check_malloc_removed, fn1, [int, int], 0
-    #
-    class T:
-        pass
-    def fn2(x, y):
-        t = T()
-        t.x = x
-        t.y = y
-        if x > 0:
-            return t.x + t.y
-        else:
-            return t.x - t.y
-    yield check_malloc_removed, fn2, [int, int], 0
-    #
-    def fn3(x):
-        a, ((b, c), d, e) = x+1, ((x+2, x+3), x+4, x+5)
-        return a+b+c+d+e
-    yield check_malloc_removed, fn3, [int], 0
-    #
-    class A:
-        pass
-    class B(A):
-        pass
-    def fn4(i):
-        a = A()
-        b = B()
-        a.b = b
-        b.i = i
-        return a.b.i
-    yield check_malloc_removed, fn4, [int], 0

File pypy/translator/backend_opt/test/test_remove_no_ops.py

-from pypy.translator.backendoptimization import remove_void, inline_function
-from pypy.translator.backendoptimization import remove_simple_mallocs
-from pypy.translator.translator import Translator
-from pypy.rpython.lltype import Void
-from pypy.rpython.llinterp import LLInterpreter
-from pypy.objspace.flow.model import checkgraph, flatten, Block
-from pypy.translator.test.snippet import simple_method, is_perfect_number
-from pypy.translator.llvm.log import log
-
-import py
-log = py.log.Producer('test_backendoptimization')
-
-def annotate_and_remove_void(f, annotate):
-    t = Translator(f)
-    a = t.annotate(annotate)
-    t.specialize()
-    remove_void(t)
-    return t
-
-def test_remove_void_args():
-    def f(i):
-        return [1,2,3,i][i]
-    t = annotate_and_remove_void(f, [int])
-    for func, graph in t.flowgraphs.iteritems():
-        assert checkgraph(graph) is None
-        for arg in graph.startblock.inputargs:
-            assert arg.concretetype is not Void
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    assert interp.eval_function(f, [0]) == 1 
-
-def test_remove_void_in_struct():
-    t = annotate_and_remove_void(simple_method, [int])
-    #t.view()
-    log(t.flowgraphs.iteritems())
-    for func, graph in t.flowgraphs.iteritems():
-        log('func : ' + str(func))
-        log('graph: ' + str(graph))
-        assert checkgraph(graph) is None
-        #for fieldname in self.struct._names:    #XXX helper (in lltype?) should remove these voids
-        #    type_ = getattr(struct, fieldname)
-        #    log('fieldname=%(fieldname)s , type_=%(type_)s' % locals())
-        #    assert _type is not Void
-    #interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    #assert interp.eval_function(f, [0]) == 1 
-
-def test_inline_simple():
-    def f(x, y):
-        return (g(x, y) + 1) * x
-    def g(x, y):
-        if x > 0:
-            return x * y
-        else:
-            return -x * y
-    t = Translator(f)
-    a = t.annotate([int, int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, g, t.flowgraphs[f])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(f, [-1, 5])
-    assert result == f(-1, 5)
-    result = interp.eval_function(f, [2, 12])
-    assert result == f(2, 12)
-
-def test_inline_big():
-    def f(x):
-        result = []
-        for i in range(1, x+1):
-            if is_perfect_number(i):
-                result.append(i)
-        return result
-    t = Translator(f)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, is_perfect_number, t.flowgraphs[f])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(f, [10])
-    assert result.length == len(f(10))
-
-def test_inline_raising():
-    def f(x):
-        if x == 1:
-            raise ValueError
-        return x
-    def g(x):
-        a = f(x)
-        if x == 2:
-            raise KeyError
-    def h(x):
-        try:
-            g(x)
-        except ValueError:
-            return 1
-        except KeyError:
-            return 2
-        return x
-    t = Translator(h)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(h, [0])
-    assert result == 0
-    result = interp.eval_function(h, [1])
-    assert result == 1
-    result = interp.eval_function(h, [2])
-    assert result == 2    
-
-def test_inline_several_times():
-    def f(x):
-        return (x + 1) * 2
-    def g(x):
-        if x:
-            a = f(x) + f(x)
-        else:
-            a = f(x) + 1
-        return a + f(x)
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [0])
-    assert result == g(0)
-    result = interp.eval_function(g, [42])
-    assert result == g(42)
-
-def test_inline_exceptions():
-    def f(x):
-        if x == 0:
-            raise ValueError
-        if x == 1:
-            raise KeyError
-    def g(x):
-        try:
-            f(x)
-        except ValueError:
-            return 2
-        except KeyError:
-            return x+2
-        return 1
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [0])
-    assert result == 2
-    result = interp.eval_function(g, [1])
-    assert result == 3
-    result = interp.eval_function(g, [42])
-    assert result == 1
-
-def DONOTtest_inline_var_exception():
-    # this test is disabled for now, because f() contains a direct_call
-    # (at the end, to a ll helper, to get the type of the exception object)
-    def f(x):
-        e = None
-        if x == 0:
-            e = ValueError()
-        elif x == 1:
-            e = KeyError()
-        if x == 0 or x == 1:
-            raise e
-    def g(x):
-        try:
-            f(x)
-        except ValueError:
-            return 2
-        except KeyError:
-            return 3
-        return 1
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [0])
-    assert result == 2
-    result = interp.eval_function(g, [1])
-    assert result == 3
-    result = interp.eval_function(g, [42])
-    assert result == 1
-
-def DONOTtest_call_call():
-    # for reference.  Just remove this test if we decide not to support
-    # catching exceptions while inlining a graph that contains further
-    # direct_calls.
-    def e(x):
-        if x < 0:
-            raise KeyError
-        return x+1
-    def f(x):
-        return e(x)+2
-    def g(x):
-        try:
-            return f(x)+3
-        except KeyError:
-            return -1
-    t = Translator(g)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    inline_function(t, f, t.flowgraphs[g])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)
-    result = interp.eval_function(g, [100])
-    assert result == 106
-    result = interp.eval_function(g, [-100])
-    assert result == -1
-
-def test_for_loop():
-    def f(x):
-        result = 0
-        for i in range(0, x):
-            result += i
-        return result
-    t = Translator(f)
-    a = t.annotate([int])
-    a.simplify()
-    t.specialize()
-    for graph in t.flowgraphs.values():
-        if graph.name.startswith('ll_rangenext'):
-            break
-    else:
-        assert 0, "cannot find ll_rangenext_*() function"
-    inline_function(t, graph, t.flowgraphs[f])
-    interp = LLInterpreter(t.flowgraphs, t.rtyper)