Commits

wlav committed dd2f380 Merge

merge default into branch

Comments (0)

Files changed (47)

lib_pypy/itertools.py

-# Note that PyPy contains also a built-in module 'itertools' which will
-# hide this one if compiled in.
-
-"""Functional tools for creating and using iterators.
-
-Infinite iterators:
-count([n]) --> n, n+1, n+2, ...
-cycle(p) --> p0, p1, ... plast, p0, p1, ...
-repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times
-
-Iterators terminating on the shortest input sequence:
-izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... 
-ifilter(pred, seq) --> elements of seq where pred(elem) is True
-ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False
-islice(seq, [start,] stop [, step]) --> elements from
-       seq[start:stop:step]
-imap(fun, p, q, ...) --> fun(p0, q0), fun(p1, q1), ...
-starmap(fun, seq) --> fun(*seq[0]), fun(*seq[1]), ...
-tee(it, n=2) --> (it1, it2 , ... itn) splits one iterator into n
-chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ... 
-takewhile(pred, seq) --> seq[0], seq[1], until pred fails
-dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails
-groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v)
-"""
-
-__all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter',
-           'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap',
-           'takewhile', 'tee', 'compress', 'product']
-
-try: from __pypy__ import builtinify
-except ImportError: builtinify = lambda f: f
-
-
-class chain(object):
-    """Make an iterator that returns elements from the first iterable
-    until it is exhausted, then proceeds to the next iterable, until
-    all of the iterables are exhausted. Used for treating consecutive
-    sequences as a single sequence.
-
-    Equivalent to :
-
-    def chain(*iterables):
-        for it in iterables:
-            for element in it:
-                yield element
-    """
-    def __init__(self, *iterables):
-        self._iterables_iter = iter(map(iter, iterables))
-        # little trick for the first chain.next() call
-        self._cur_iterable_iter = iter([])
-
-    def __iter__(self):
-        return self
-    
-    def next(self):
-        while True:
-            try:
-                return self._cur_iterable_iter.next()
-            except StopIteration:
-                self._cur_iterable_iter = self._iterables_iter.next()
-            except AttributeError:
-                # CPython raises a TypeError when next() is not defined
-                raise TypeError('%s has no next() method' % \
-                                (self._cur_iterable_iter))
-
-
-class compress(object):
-    def __init__(self, data, selectors):
-        self.data = iter(data)
-        self.selectors = iter(selectors)
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        while True:
-            next_item = self.data.next()
-            next_selector = self.selectors.next()
-            if bool(next_selector):
-                return next_item
-
-
-class count(object):
-    """Make an iterator that returns consecutive integers starting
-    with n.  If not specified n defaults to zero. Does not currently
-    support python long integers. Often used as an argument to imap()
-    to generate consecutive data points.  Also, used with izip() to
-    add sequence numbers.
-
-    Equivalent to :
-
-    def count(n=0):
-        if not isinstance(n, int):
-            raise TypeError("%s is not a regular integer" % n)
-        while True:
-            yield n
-            n += 1
-    """
-    def __init__(self, n=0):
-        if not isinstance(n, int):
-            raise TypeError('%s is not a regular integer' % n)
-        self.times = n-1
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        self.times += 1
-        return self.times
-
-    def __repr__(self):
-        return 'count(%d)' % (self.times + 1)
-
-
-            
-class cycle(object):
-    """Make an iterator returning elements from the iterable and
-    saving a copy of each. When the iterable is exhausted, return
-    elements from the saved copy. Repeats indefinitely.
-
-    Equivalent to :
-
-    def cycle(iterable):
-        saved = []
-        for element in iterable:
-            yield element
-            saved.append(element)
-        while saved:
-            for element in saved:
-                yield element    
-    """
-    def __init__(self, iterable):
-        self._cur_iter = iter(iterable)
-        self._saved = []
-        self._must_save = True
-        
-    def __iter__(self):
-        return self
-
-    def next(self):
-        # XXX Could probably be improved
-        try:
-            next_elt = self._cur_iter.next()
-            if self._must_save:
-                self._saved.append(next_elt)
-        except StopIteration:
-            self._cur_iter = iter(self._saved)
-            next_elt = self._cur_iter.next()
-            self._must_save = False
-        except AttributeError:
-            # CPython raises a TypeError when next() is not defined
-            raise TypeError('%s has no next() method' % \
-                            (self._cur_iter))
-        return next_elt
-            
-        
-class dropwhile(object):
-    """Make an iterator that drops elements from the iterable as long
-    as the predicate is true; afterwards, returns every
-    element. Note, the iterator does not produce any output until the
-    predicate is true, so it may have a lengthy start-up time.
-
-    Equivalent to :
-
-    def dropwhile(predicate, iterable):
-        iterable = iter(iterable)
-        for x in iterable:
-            if not predicate(x):
-                yield x
-                break
-        for x in iterable:
-            yield x
-    """
-    def __init__(self, predicate, iterable):
-        self._predicate = predicate
-        self._iter = iter(iterable)
-        self._dropped = False
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        try:
-            value = self._iter.next()
-        except AttributeError:
-            # CPython raises a TypeError when next() is not defined
-            raise TypeError('%s has no next() method' % \
-                            (self._iter))
-        if self._dropped:
-            return value
-        while self._predicate(value):
-            value = self._iter.next()
-        self._dropped = True
-        return value
-
-class groupby(object):
-    """Make an iterator that returns consecutive keys and groups from the
-    iterable. The key is a function computing a key value for each
-    element. If not specified or is None, key defaults to an identity
-    function and returns the element unchanged. Generally, the
-    iterable needs to already be sorted on the same key function.
-
-    The returned group is itself an iterator that shares the
-    underlying iterable with groupby(). Because the source is shared,
-    when the groupby object is advanced, the previous group is no
-    longer visible. So, if that data is needed later, it should be
-    stored as a list:
-
-       groups = []
-       uniquekeys = []
-       for k, g in groupby(data, keyfunc):
-           groups.append(list(g))      # Store group iterator as a list
-           uniquekeys.append(k)
-    """    
-    def __init__(self, iterable, key=None):
-        if key is None:
-            key = lambda x: x
-        self.keyfunc = key
-        self.it = iter(iterable)
-        self.tgtkey = self.currkey = self.currvalue = xrange(0)
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        while self.currkey == self.tgtkey:
-            try:
-                self.currvalue = self.it.next() # Exit on StopIteration
-            except AttributeError:
-                # CPython raises a TypeError when next() is not defined
-                raise TypeError('%s has no next() method' % \
-                                (self.it))            
-            self.currkey = self.keyfunc(self.currvalue)
-        self.tgtkey = self.currkey
-        return (self.currkey, self._grouper(self.tgtkey))
-
-    def _grouper(self, tgtkey):
-        while self.currkey == tgtkey:
-            yield self.currvalue
-            self.currvalue = self.it.next() # Exit on StopIteration
-            self.currkey = self.keyfunc(self.currvalue)
-
-
-
-class _ifilter_base(object):
-    """base class for ifilter and ifilterflase"""
-    def __init__(self, predicate, iterable):
-        # Make sure iterable *IS* iterable
-        self._iter = iter(iterable)
-        if predicate is None:
-            self._predicate = bool
-        else:
-            self._predicate = predicate
-
-    def __iter__(self):
-        return self
-    
-class ifilter(_ifilter_base):
-    """Make an iterator that filters elements from iterable returning
-    only those for which the predicate is True.  If predicate is
-    None, return the items that are true.
-
-    Equivalent to :
-
-    def ifilter:
-        if predicate is None:
-            predicate = bool
-        for x in iterable:
-            if predicate(x):
-                yield x
-    """
-    def next(self):
-        try:
-            next_elt = self._iter.next()
-        except AttributeError:
-            # CPython raises a TypeError when next() is not defined
-            raise TypeError('%s has no next() method' % \
-                            (self._iter))
-        while True:
-            if self._predicate(next_elt):
-                return next_elt
-            next_elt = self._iter.next()
-
-class ifilterfalse(_ifilter_base):
-    """Make an iterator that filters elements from iterable returning
-    only those for which the predicate is False.  If predicate is
-    None, return the items that are false.
-
-    Equivalent to :
-    
-    def ifilterfalse(predicate, iterable):
-        if predicate is None:
-            predicate = bool
-        for x in iterable:
-            if not predicate(x):
-                yield x
-    """
-    def next(self):
-        try:
-            next_elt = self._iter.next()
-        except AttributeError:
-            # CPython raises a TypeError when next() is not defined
-            raise TypeError('%s has no next() method' % \
-                            (self._iter))
-        while True:
-            if not self._predicate(next_elt):
-                return next_elt
-            next_elt = self._iter.next()
-             
-
-
-
-class imap(object):
-    """Make an iterator that computes the function using arguments
-    from each of the iterables. If function is set to None, then
-    imap() returns the arguments as a tuple. Like map() but stops
-    when the shortest iterable is exhausted instead of filling in
-    None for shorter iterables. The reason for the difference is that
-    infinite iterator arguments are typically an error for map()
-    (because the output is fully evaluated) but represent a common
-    and useful way of supplying arguments to imap().
-
-    Equivalent to :
-
-    def imap(function, *iterables):
-        iterables = map(iter, iterables)
-        while True:
-            args = [i.next() for i in iterables]
-            if function is None:
-                yield tuple(args)
-            else:
-                yield function(*args)
-    
-    """
-    def __init__(self, function, iterable, *other_iterables):
-        self._func = function
-        self._iters = map(iter, (iterable, ) + other_iterables)
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        try:
-            args = [it.next() for it in self._iters]
-        except AttributeError:
-            # CPython raises a TypeError when next() is not defined
-            raise TypeError('%s has no next() method' % \
-                            (it))
-        if self._func is None:
-            return tuple(args)
-        else:
-            return self._func(*args)
-
-
-
-class islice(object):
-    """Make an iterator that returns selected elements from the
-    iterable.  If start is non-zero, then elements from the iterable
-    are skipped until start is reached. Afterward, elements are
-    returned consecutively unless step is set higher than one which
-    results in items being skipped. If stop is None, then iteration
-    continues until the iterator is exhausted, if at all; otherwise,
-    it stops at the specified position. Unlike regular slicing,
-    islice() does not support negative values for start, stop, or
-    step. Can be used to extract related fields from data where the
-    internal structure has been flattened (for example, a multi-line
-    report may list a name field on every third line).
-    """ 
-    def __init__(self, iterable, *args):
-        s = slice(*args)
-        self.start, self.stop, self.step = s.start or 0, s.stop, s.step
-        if not isinstance(self.start, (int, long)):
-           raise ValueError("Start argument must be an integer")
-        if self.stop is not None and not isinstance(self.stop, (int,long)):
-           raise ValueError("Stop argument must be an integer or None")
-        if self.step is None:
-            self.step = 1
-        if self.start<0 or (self.stop is not None and self.stop<0
-           ) or self.step<=0:
-            raise ValueError, "indices for islice() must be positive"
-        self.it = iter(iterable)
-        self.donext = None
-        self.cnt = 0
-
-    def __iter__(self):
-        return self
-
-    def next(self): 
-        if self.donext is None:
-            try:
-                self.donext = self.it.next
-            except AttributeError:
-                raise TypeError
-        nextindex = self.start
-        if self.stop is not None and nextindex >= self.stop:
-            raise StopIteration
-        while self.cnt <= nextindex:
-            nextitem = self.donext()
-            self.cnt += 1
-        self.start += self.step 
-        return nextitem
-
-class izip(object):
-    """Make an iterator that aggregates elements from each of the
-    iterables.  Like zip() except that it returns an iterator instead
-    of a list. Used for lock-step iteration over several iterables at
-    a time.
-
-    Equivalent to :
-
-    def izip(*iterables):
-        iterables = map(iter, iterables)
-        while iterables:
-            result = [i.next() for i in iterables]
-            yield tuple(result)
-    """
-    def __init__(self, *iterables):
-        self._iterators = map(iter, iterables)
-        self._result = [None] * len(self._iterators)
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        if not self._iterators:
-            raise StopIteration()
-        try:
-            return tuple([i.next() for i in self._iterators])
-        except AttributeError:
-            # CPython raises a TypeError when next() is not defined
-            raise TypeError('%s has no next() method' % (i))
-
-
-class product(object):
-
-    def __init__(self, *args, **kw):
-        if len(kw) > 1:
-            raise TypeError("product() takes at most 1 argument (%d given)" %
-                             len(kw))
-        self.repeat = kw.get('repeat', 1)
-        self.gears = [x for x in args] * self.repeat
-        self.num_gears = len(self.gears)
-        # initialization of indicies to loop over
-        self.indicies = [(0, len(self.gears[x]))
-                         for x in range(0, self.num_gears)]
-        self.cont = True
-
-    def roll_gears(self):
-        # Starting from the end of the gear indicies work to the front
-        # incrementing the gear until the limit is reached. When the limit
-        # is reached carry operation to the next gear
-        should_carry = True
-        for n in range(0, self.num_gears):
-            nth_gear = self.num_gears - n - 1
-            if should_carry:
-                count, lim = self.indicies[nth_gear]
-                count += 1
-                if count == lim and nth_gear == 0:
-                    self.cont = False
-                if count == lim:
-                    should_carry = True
-                    count = 0
-                else:
-                    should_carry = False
-                self.indicies[nth_gear] = (count, lim)
-            else:
-                break
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        if not self.cont:
-            raise StopIteration
-        l = []
-        for x in range(0, self.num_gears):
-            index, limit = self.indicies[x]
-            l.append(self.gears[x][index])
-        self.roll_gears()
-        return tuple(l)
-
-
-class repeat(object):
-    """Make an iterator that returns object over and over again.
-    Runs indefinitely unless the times argument is specified.  Used
-    as argument to imap() for invariant parameters to the called
-    function. Also used with izip() to create an invariant part of a
-    tuple record.
-
-    Equivalent to :
-
-    def repeat(object, times=None):
-        if times is None:
-            while True:
-                yield object
-        else:
-            for i in xrange(times):
-                yield object
-    """
-    def __init__(self, obj, times=None):
-        self._obj = obj
-        if times is not None:
-            xrange(times) # Raise a TypeError
-            if times < 0:
-                times = 0
-        self._times = times
-        
-    def __iter__(self):
-        return self
-
-    def next(self):
-        # next() *need* to decrement self._times when consumed
-        if self._times is not None:
-            if self._times <= 0: 
-                raise StopIteration()
-            self._times -= 1
-        return self._obj
-
-    def __repr__(self):
-        if self._times is not None:
-            return 'repeat(%r, %r)' % (self._obj, self._times)
-        else:
-            return 'repeat(%r)' % (self._obj,)
-
-    def __len__(self):
-        if self._times == -1 or self._times is None:
-            raise TypeError("len() of uniszed object")
-        return self._times
-    
-
-class starmap(object):
-    """Make an iterator that computes the function using arguments
-    tuples obtained from the iterable. Used instead of imap() when
-    argument parameters are already grouped in tuples from a single
-    iterable (the data has been ``pre-zipped''). The difference
-    between imap() and starmap() parallels the distinction between
-    function(a,b) and function(*c).
-
-    Equivalent to :
-
-    def starmap(function, iterable):
-        iterable = iter(iterable)
-        while True:
-            yield function(*iterable.next())    
-    """
-    def __init__(self, function, iterable):
-        self._func = function
-        self._iter = iter(iterable)
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        # CPython raises a TypeError when the iterator doesn't return a tuple
-        try:
-            t = self._iter.next()
-        except AttributeError:
-            # CPython raises a TypeError when next() is not defined
-            raise TypeError('%s has no next() method' % self._iter)
-        if not isinstance(t, tuple):
-            raise TypeError("iterator must return a tuple")
-        return self._func(*t)
-
-
-
-class takewhile(object):
-    """Make an iterator that returns elements from the iterable as
-    long as the predicate is true.
-
-    Equivalent to :
-    
-    def takewhile(predicate, iterable):
-        for x in iterable:
-            if predicate(x):
-                yield x
-            else:
-                break
-    """
-    def __init__(self, predicate, iterable):
-        self._predicate = predicate
-        self._iter = iter(iterable)
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        try:
-            value = self._iter.next()
-        except AttributeError:
-            # CPython raises a TypeError when next() is not defined
-            raise TypeError('%s has no next() method' % \
-                            (self._iter))
-        if not self._predicate(value):
-            raise StopIteration()
-        return value
-
-    
-class TeeData(object):
-    """Holds cached values for TeeObjects"""
-    def __init__(self, iterator):
-        self.data = []
-        self._iter = iterator
-
-    def __getitem__(self, i):
-        # iterates until 'i' if not done yet
-        while i>= len(self.data):
-            try:
-                self.data.append( self._iter.next() )
-            except AttributeError:
-                # CPython raises a TypeError when next() is not defined
-                raise TypeError('%s has no next() method' % self._iter)
-        return self.data[i]
-
-
-class TeeObject(object):
-    """Iterables / Iterators as returned by the tee() function"""
-    def __init__(self, iterable=None, tee_data=None):
-        if tee_data:
-            self.tee_data = tee_data
-            self.pos = 0
-        # <=> Copy constructor
-        elif isinstance(iterable, TeeObject):
-            self.tee_data = iterable.tee_data
-            self.pos = iterable.pos
-        else:
-            self.tee_data = TeeData(iter(iterable))
-            self.pos = 0
-            
-    def next(self):
-        data = self.tee_data[self.pos]
-        self.pos += 1
-        return data
-    
-    def __iter__(self):
-        return self
-
-
-@builtinify
-def tee(iterable, n=2):
-    """Return n independent iterators from a single iterable.
-    Note : once tee() has made a split, the original iterable
-    should not be used anywhere else; otherwise, the iterable could get
-    advanced without the tee objects being informed.
-    
-    Note : this member of the toolkit may require significant auxiliary
-    storage (depending on how much temporary data needs to be stored).
-    In general, if one iterator is going to use most or all of the
-    data before the other iterator, it is faster to use list() instead
-    of tee()
-    
-    Equivalent to :
-    
-    def tee(iterable, n=2):
-        def gen(next, data={}, cnt=[0]):
-            for i in count():
-                if i == cnt[0]:
-                    item = data[i] = next()
-                    cnt[0] += 1
-                else:
-                    item = data.pop(i)
-                yield item
-        it = iter(iterable)
-        return tuple([gen(it.next) for i in range(n)])
-    """
-    if isinstance(iterable, TeeObject):
-        # a,b = tee(range(10)) ; c,d = tee(a) ; self.assert_(a is c)
-        return tuple([iterable] +
-        [TeeObject(tee_data=iterable.tee_data) for i in xrange(n-1)])
-    tee_data = TeeData(iter(iterable))
-    return tuple([TeeObject(tee_data=tee_data) for i in xrange(n)])

lib_pypy/numpypy/core/numeric.py

 
-from _numpypy import array, ndarray, int_, float_, bool_ #, complex_# , longlong
+from _numpypy import array, ndarray, int_, float_, bool_, flexible #, complex_# , longlong
 from _numpypy import concatenate
 from .fromnumeric import any
 import math
             typename = "'%s'" % typename
 
         lf = ''
-        if 0: # or issubclass(arr.dtype.type, flexible):
+        if issubclass(arr.dtype.type, flexible):
             if arr.dtype.names:
                 typename = "%s" % str(arr.dtype)
             else:

lib_pypy/pypy_test/test_itertools.py

-from py.test import raises
-from lib_pypy import itertools
-
-class TestItertools(object):
-
-    def test_compress(self):
-        it = itertools.compress(['a', 'b', 'c'], [0, 1, 0])
-
-        assert list(it) == ['b']
-
-    def test_compress_diff_len(self):
-        it = itertools.compress(['a'], [])
-        raises(StopIteration, it.next)
-
-    def test_product(self):
-        l = [1, 2]
-        m = ['a', 'b']
-
-        prodlist = itertools.product(l, m)
-        assert list(prodlist) == [(1, 'a'), (1, 'b'), (2, 'a'), (2, 'b')]
-
-    def test_product_repeat(self):
-        l = [1, 2]
-        m = ['a', 'b']
-
-        prodlist = itertools.product(l, m, repeat=2)
-        ans = [(1, 'a', 1, 'a'), (1, 'a', 1, 'b'), (1, 'a', 2, 'a'),
-               (1, 'a', 2, 'b'), (1, 'b', 1, 'a'), (1, 'b', 1, 'b'),
-               (1, 'b', 2, 'a'), (1, 'b', 2, 'b'), (2, 'a', 1, 'a'),
-               (2, 'a', 1, 'b'), (2, 'a', 2, 'a'), (2, 'a', 2, 'b'),
-               (2, 'b', 1, 'a'), (2, 'b', 1, 'b'), (2, 'b', 2, 'a'),
-               (2, 'b', 2, 'b')]
-        assert list(prodlist) == ans
-
-    def test_product_diff_sizes(self):
-        l = [1, 2]
-        m = ['a']
-
-        prodlist = itertools.product(l, m)
-        assert list(prodlist) == [(1, 'a'), (2, 'a')]
-
-        l = [1]
-        m = ['a', 'b']
-        prodlist = itertools.product(l, m)
-        assert list(prodlist) == [(1, 'a'), (1, 'b')]
-
-    def test_product_toomany_args(self):
-        l = [1, 2]
-        m = ['a']
-        raises(TypeError, itertools.product, l, m, repeat=1, foo=2)

lib_pypy/pyrepl/readline.py

         try:
             return unicode(line, ENCODING)
         except UnicodeDecodeError:   # bah, silently fall back...
-            return unicode(line, 'utf-8')
+            return unicode(line, 'utf-8', 'replace')
 
     def get_history_length(self):
         return self.saved_history_length

lib_pypy/pyrepl/unix_console.py

             if iscode:
                 self.__tputs(text)
             else:
-                os.write(self.output_fd, text.encode(self.encoding))
+                os.write(self.output_fd, text.encode(self.encoding, 'replace'))
         del self.__buffer[:]
 
     def __tputs(self, fmt, prog=delayprog):

pypy/annotation/annrpython.py

                         if cell.is_constant():
                             newcell.const = cell.const
                         cell = newcell
-                        cell.knowntypedata = renamed_knowntypedata
+                        cell.set_knowntypedata(renamed_knowntypedata)
 
                     cells.append(cell)
 

pypy/annotation/binaryop.py

         # XXX HACK HACK HACK
         bk = getbookkeeper()
         if bk is not None: # for testing
-            knowntypedata = r.knowntypedata = {}
+            knowntypedata = {}
             fn, block, i = bk.position_key
 
             annotator = bk.annotator
 
             bind(obj2, obj1, 0)
             bind(obj1, obj2, 1)
+            r.set_knowntypedata(knowntypedata)
 
         return r
 
             case = opname in ('gt', 'ge', 'eq')
             add_knowntypedata(knowntypedata, case, [op.args[0]],
                               SomeInteger(nonneg=True, knowntype=tointtype(int1)))
-        if knowntypedata:
-            r.knowntypedata = knowntypedata
+        r.set_knowntypedata(knowntypedata)
         # a special case for 'x < 0' or 'x >= 0',
         # where 0 is a flow graph Constant
         # (in this case we are sure that it cannot become a r_uint later)
         if hasattr(boo1, 'knowntypedata') and \
            hasattr(boo2, 'knowntypedata'):
             ktd = merge_knowntypedata(boo1.knowntypedata, boo2.knowntypedata)
-            if ktd:
-                s.knowntypedata = ktd
+            s.set_knowntypedata(ktd)
         return s 
 
     def and_((boo1, boo2)):

pypy/annotation/builtin.py

             variables = [op.args[1]]
         for variable in variables:
             assert bk.annotator.binding(variable) == s_obj
-        r.knowntypedata = {}
-        
+        knowntypedata = {}
         if not hasattr(typ, '_freeze_') and isinstance(s_type, SomePBC):
-            add_knowntypedata(r.knowntypedata, True, variables, bk.valueoftype(typ))
+            add_knowntypedata(knowntypedata, True, variables, bk.valueoftype(typ))
+        r.set_knowntypedata(knowntypedata)
     return r
 
 # note that this one either needs to be constant, or we will create SomeObject

pypy/annotation/model.py

     unsigned = False
     def __init__(self):
         pass
+    def set_knowntypedata(self, knowntypedata):
+        assert not hasattr(self, 'knowntypedata')
+        if knowntypedata:
+            self.knowntypedata = knowntypedata
 
 class SomeStringOrUnicode(SomeObject):
     immutable = True

pypy/annotation/unaryop.py

         s_obj.is_true_behavior(r)
 
         bk = getbookkeeper()
-        knowntypedata = r.knowntypedata = {}
+        knowntypedata = {}
         fn, block, i = bk.position_key
         op = block.operations[i]
         assert op.opname == "is_true" or op.opname == "nonzero"
         if s_obj.can_be_none():
             s_nonnone_obj = s_obj.nonnoneify()
         add_knowntypedata(knowntypedata, True, [arg], s_nonnone_obj)
+        r.set_knowntypedata(knowntypedata)
         return r
-        
 
     def nonzero(obj):
         return obj.is_true()

pypy/config/translationoption.py

                          ("translation.gcrootfinder", DEFL_ROOTFINDER_WITHJIT),
                          ("translation.list_comprehension_operations", True)]),
     ChoiceOption("jit_backend", "choose the backend for the JIT",
-                 ["auto", "x86", "x86-without-sse2", "llvm", 'arm'],
+                 ["auto", "x86", "x86-without-sse2", 'arm'],
                  default="auto", cmdline="--jit-backend"),
     ChoiceOption("jit_profiler", "integrate profiler support into the JIT",
                  ["off", "oprofile"],

pypy/doc/whatsnew-head.rst

 
 .. branch: numpypy-complex2
 Complex dtype support for numpy
+.. branch: numpypy-problems
+Improve dtypes intp, uintp, void, string and record
 .. branch: kill-someobject
 major cleanups including killing some object support
 

pypy/jit/backend/detect_cpu.py

         return "pypy.jit.backend.x86.runner", "CPU_X86_64"
     elif backend_name == 'cli':
         return "pypy.jit.backend.cli.runner", "CliCPU"
-    elif backend_name == 'llvm':
-        return "pypy.jit.backend.llvm.runner", "LLVMCPU"
     elif backend_name == 'arm':
         return "pypy.jit.backend.arm.runner", "CPU_ARM"
     elif backend_name == 'armhf':

pypy/jit/metainterp/optimizeopt/optimizer.py

 from pypy.jit.metainterp import jitprof, resume, compile
 from pypy.jit.metainterp.executor import execute_nonspec
-from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF, INT
+from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF
 from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \
                                                      ImmutableIntUnbounded, \
                                                      IntLowerBound, MININT, MAXINT
-from pypy.jit.metainterp.optimizeopt.util import (make_dispatcher_method,
-    args_dict)
+from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
 from pypy.jit.metainterp.resoperation import rop, ResOperation, AbstractResOp
 from pypy.jit.metainterp.typesystem import llhelper, oohelper
 from pypy.tool.pairtype import extendabletype
-from pypy.rlib.debug import debug_start, debug_stop, debug_print
+from pypy.rlib.debug import debug_print
 from pypy.rlib.objectmodel import specialize
 
+
 LEVEL_UNKNOWN    = '\x00'
 LEVEL_NONNULL    = '\x01'
 LEVEL_KNOWNCLASS = '\x02'     # might also mean KNOWNARRAYDESCR, for arrays
 MODE_ARRAY   = '\x00'
 MODE_STR     = '\x01'
 MODE_UNICODE = '\x02'
+
+
 class LenBound(object):
     def __init__(self, mode, descr, bound):
         self.mode = mode

pypy/jit/metainterp/optimizeopt/rewrite.py

 from pypy.jit.codewriter.effectinfo import EffectInfo
-from pypy.jit.metainterp.history import ConstInt, make_hashable_int
+from pypy.jit.metainterp import compile
+from pypy.jit.metainterp.history import (Const, ConstInt, BoxInt, BoxFloat,
+    BoxPtr, make_hashable_int)
 from pypy.jit.metainterp.optimize import InvalidLoop
 from pypy.jit.metainterp.optimizeopt.intutils import IntBound
-from pypy.jit.metainterp.optimizeopt.optimizer import *
+from pypy.jit.metainterp.optimizeopt.optimizer import (Optimization, REMOVED,
+    CONST_0, CONST_1)
 from pypy.jit.metainterp.optimizeopt.util import _findall, make_dispatcher_method
 from pypy.jit.metainterp.resoperation import (opboolinvers, opboolreflex, rop,
     ResOperation)
         source_start_box = self.get_constant_box(op.getarg(3))
         dest_start_box = self.get_constant_box(op.getarg(4))
         length = self.get_constant_box(op.getarg(5))
-        if (source_value.is_virtual() and source_start_box and dest_start_box
-            and length and (dest_value.is_virtual() or length.getint() <= 8)):
+        if (source_start_box and dest_start_box
+            and length and (dest_value.is_virtual() or length.getint() <= 8) and
+            (source_value.is_virtual() or length.getint() <= 8)):
             from pypy.jit.metainterp.optimizeopt.virtualize import VArrayValue
-            assert isinstance(source_value, VArrayValue)
             source_start = source_start_box.getint()
             dest_start = dest_start_box.getint()
             for index in range(length.getint()):
-                val = source_value.getitem(index + source_start)
+                # XXX fish fish fish
+                arraydescr = op.getdescr().get_extra_info().write_descrs_arrays[0]
+                if source_value.is_virtual():
+                    assert isinstance(source_value, VArrayValue)
+                    val = source_value.getitem(index + source_start)
+                else:
+                    if arraydescr.is_array_of_pointers():
+                        resbox = BoxPtr()
+                    elif arraydescr.is_array_of_floats():
+                        resbox = BoxFloat()
+                    else:
+                        resbox = BoxInt()
+                    newop = ResOperation(rop.GETARRAYITEM_GC,
+                                      [op.getarg(1),
+                                       ConstInt(index + source_start)], resbox,
+                                       descr=arraydescr)
+                    self.optimizer.propagate_forward(newop)
+                    val = self.getvalue(resbox)
                 if dest_value.is_virtual():
                     dest_value.setitem(index + dest_start, val)
                 else:
                                          [op.getarg(2),
                                           ConstInt(index + dest_start),
                                           val.get_key_box()], None,
-                                         descr=source_value.arraydescr)
+                                         descr=arraydescr)
                     self.emit_operation(newop)
             return True
         if length and length.getint() == 0:

pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py

         '''
         self.optimize_loop(ops, expected)
 
+    def test_arraycopy_not_virtual_2(self):
+        ops = '''
+        [p0]
+        p1 = new_array(3, descr=arraydescr)
+        call(0, p0, p1, 0, 0, 3, descr=arraycopydescr)
+        i0 = getarrayitem_gc(p1, 0, descr=arraydescr)
+        jump(i0)
+        '''
+        expected = '''
+        [p0]
+        i0 = getarrayitem_gc(p0, 0, descr=arraydescr)
+        i1 = getarrayitem_gc(p0, 1, descr=arraydescr) # removed by the backend
+        i2 = getarrayitem_gc(p0, 2, descr=arraydescr) # removed by the backend
+        jump(i0)
+        '''
+        self.optimize_loop(ops, expected)
+
+    def test_arraycopy_not_virtual_3(self):
+        ops = '''
+        [p0, p1]
+        call(0, p0, p1, 0, 0, 3, descr=arraycopydescr)
+        i0 = getarrayitem_gc(p1, 0, descr=arraydescr)
+        jump(i0)
+        '''
+        expected = '''
+        [p0, p1]
+        i0 = getarrayitem_gc(p0, 0, descr=arraydescr)
+        i1 = getarrayitem_gc(p0, 1, descr=arraydescr)
+        i2 = getarrayitem_gc(p0, 2, descr=arraydescr)
+        setarrayitem_gc(p1, 0, i0, descr=arraydescr)
+        setarrayitem_gc(p1, 1, i1, descr=arraydescr)
+        setarrayitem_gc(p1, 2, i2, descr=arraydescr)
+        jump(i0)
+        '''
+        self.optimize_loop(ops, expected)
+
     def test_arraycopy_no_elem(self):
         """ this was actually observed in the wild
         """

pypy/jit/metainterp/optimizeopt/unroll.py

 from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken
 from pypy.jit.metainterp.jitexc import JitException
 from pypy.jit.metainterp.optimize import InvalidLoop
+from pypy.rlib.debug import debug_print, debug_start, debug_stop
 from pypy.jit.metainterp.optimizeopt.optimizer import *
 from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds
 from pypy.jit.metainterp.inliner import Inliner

pypy/module/_cffi_backend/ctypefunc.py

 
 import sys
 from pypy.interpreter.error import OperationError, operationerrfmt
-from pypy.interpreter.error import wrap_oserror
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
-from pypy.rlib import jit, clibffi, jit_libffi, rposix
+from pypy.rlib import jit, clibffi, jit_libffi
 from pypy.rlib.jit_libffi import CIF_DESCRIPTION, CIF_DESCRIPTION_P
 from pypy.rlib.jit_libffi import FFI_TYPE, FFI_TYPE_P, FFI_TYPE_PP
 from pypy.rlib.jit_libffi import SIZE_OF_FFI_ARG
                     if flag == 1:
                         raw_string = rffi.cast(rffi.CCHARPP, data)[0]
                         lltype.free(raw_string, flavor='raw')
-                    elif flag == 2:
-                        file = rffi.cast(rffi.CCHARPP, data)[0]
-                        rffi_fclose(file)
             lltype.free(buffer, flavor='raw')
         return w_res
 
     assert isinstance(abi, int)
     return space.wrap(abi)
 
-rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP)
-rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT)
-
-def prepare_file_call_argument(fileobj):
-    import os
-    space = fileobj.space
-    fileobj.direct_flush()
-    fd = fileobj.direct_fileno()
-    if fd < 0:
-        raise OperationError(space.w_ValueError,
-                             space.wrap("file has no OS file descriptor"))
-    try:
-        fd2 = os.dup(fd)
-        f = rffi_fdopen(fd2, fileobj.mode)
-        if not f:
-            os.close(fd2)
-            raise OSError(rposix.get_errno(), "fdopen failed")
-    except OSError, e:
-        raise wrap_oserror(space, e)
-    return f
-
 # ____________________________________________________________
 
 

pypy/module/_cffi_backend/ctypeptr.py

 """
 
 from pypy.interpreter.error import OperationError, operationerrfmt
+from pypy.interpreter.error import wrap_oserror
 from pypy.rpython.lltypesystem import lltype, rffi
 from pypy.rlib.objectmodel import keepalive_until_here
 from pypy.rlib.rarithmetic import ovfcheck
+from pypy.rlib import rposix
 
 from pypy.module._cffi_backend.ctypeobj import W_CType
 from pypy.module._cffi_backend import cdataobj, misc, ctypeprim
         p = rffi.ptradd(cdata, i * self.ctitem.size)
         return cdataobj.W_CData(space, p, self)
 
+    def cast(self, w_ob):
+        if self.is_file:
+            value = self.prepare_file(w_ob)
+            if value:
+                return cdataobj.W_CData(self.space, value, self)
+        return W_CTypePtrBase.cast(self, w_ob)
+
+    def prepare_file(self, w_ob):
+        from pypy.module._file.interp_file import W_File
+        from pypy.module._cffi_backend import ctypefunc
+        ob = self.space.interpclass_w(w_ob)
+        if isinstance(ob, W_File):
+            return prepare_file_argument(self.space, ob)
+        else:
+            return lltype.nullptr(rffi.CCHARP.TO)
+
     def _prepare_pointer_call_argument(self, w_init, cdata):
         space = self.space
         if (space.isinstance_w(w_init, space.w_list) or
             # from a string, we add the null terminator
             length = space.int_w(space.len(w_init)) + 1
         elif self.is_file:
-            from pypy.module._file.interp_file import W_File
-            from pypy.module._cffi_backend import ctypefunc
-            ob = space.interpclass_w(w_init)
-            if isinstance(ob, W_File):
-                result = ctypefunc.prepare_file_call_argument(ob)
+            result = self.prepare_file(w_init)
+            if result:
                 rffi.cast(rffi.CCHARPP, cdata)[0] = result
                 return 2
             return 0
         else:
             raise OperationError(space.w_TypeError,
                      space.wrap("expected a 'cdata struct-or-union' object"))
+
+# ____________________________________________________________
+
+
+rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP)
+rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT)
+
+class CffiFileObj(object):
+    _immutable_ = True
+    def __init__(self, fd, mode):
+        self.llf = rffi_fdopen(fd, mode)
+        if not self.llf:
+            raise OSError(rposix.get_errno(), "fdopen failed")
+    def close(self):
+        rffi_fclose(self.llf)
+
+def prepare_file_argument(space, fileobj):
+    fileobj.direct_flush()
+    if fileobj.cffi_fileobj is None:
+        fd = fileobj.direct_fileno()
+        if fd < 0:
+            raise OperationError(space.w_ValueError,
+                                 space.wrap("file has no OS file descriptor"))
+        try:
+            fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode)
+        except OSError, e:
+            raise wrap_oserror(space, e)
+    return fileobj.cffi_fileobj.llf

pypy/module/_cffi_backend/test/_backend_test_c.py

     e = py.test.raises(TypeError, fputs, b"hello world\n", fw1)
     assert str(e.value) == ("initializer for ctype 'struct NOT_FILE *' must "
                             "be a cdata pointer, not file")
+
+def test_FILE_object():
+    if sys.platform == "win32":
+        py.test.skip("testing FILE not implemented")
+    #
+    BFILE = new_struct_type("_IO_FILE")
+    BFILEP = new_pointer_type(BFILE)
+    BChar = new_primitive_type("char")
+    BCharP = new_pointer_type(BChar)
+    BInt = new_primitive_type("int")
+    BFunc = new_function_type((BCharP, BFILEP), BInt, False)
+    BFunc2 = new_function_type((BFILEP,), BInt, False)
+    ll = find_and_load_library('c')
+    fputs = ll.load_function(BFunc, "fputs")
+    fileno = ll.load_function(BFunc2, "fileno")
+    #
+    import posix
+    fdr, fdw = posix.pipe()
+    fw1 = posix.fdopen(fdw, 'wb', 256)
+    #
+    fw1p = cast(BFILEP, fw1)
+    fw1.write(b"X")
+    fw1.flush()
+    res = fputs(b"hello\n", fw1p)
+    assert res >= 0
+    res = fileno(fw1p)
+    assert res == fdw
+    fw1.close()
+    #
+    data = posix.read(fdr, 256)
+    assert data == b"Xhello\n"
+    posix.close(fdr)

pypy/module/_cffi_backend/test/test_ztranslation.py

 from pypy.objspace.fake.checkmodule import checkmodule
+from pypy.module._cffi_backend import ctypeptr
+from pypy.rpython.lltypesystem import lltype, rffi
 
 # side-effect: FORMAT_LONGDOUBLE must be built before test_checkmodule()
 from pypy.module._cffi_backend import misc
 
 
 def test_checkmodule():
-    checkmodule('_cffi_backend')
+    # prepare_file_argument() is not working without translating the _file
+    # module too
+    def dummy_prepare_file_argument(space, fileobj):
+        return lltype.nullptr(rffi.CCHARP.TO)
+    old = ctypeptr.prepare_file_argument
+    try:
+        ctypeptr.prepare_file_argument = dummy_prepare_file_argument
+        #
+        checkmodule('_cffi_backend')
+        #
+    finally:
+        ctypeptr.prepare_file_argument = old

pypy/module/_file/interp_file.py

     encoding = None
     errors   = None
     fd       = -1
+    cffi_fileobj = None    # pypy/module/_cffi_backend
 
     newlines = 0     # Updated when the stream is closed
 
                 del openstreams[stream]
             except KeyError:
                 pass
-            stream.close()
+            # close the stream.  If cffi_fileobj is None, we close the
+            # underlying fileno too.  Otherwise, we leave that to
+            # cffi_fileobj.close().
+            cffifo = self.cffi_fileobj
+            self.cffi_fileobj = None
+            stream.close1(cffifo is None)
+            if cffifo is not None:
+                cffifo.close()
 
     def direct_fileno(self):
         self.getstream()    # check if the file is still open

pypy/module/bz2/interp_bz2.py

             read += length
 
     def readall(self):
-        w_result = self.decompressor.decompress(self.stream.readall())
-        if self.decompressor.running:
-            raise OperationError(self.space.w_EOFError,
-                                 self.space.wrap("compressed file ended before the logical end-of-the-stream was detected"))
-        result = self.space.str_w(w_result)
-        self.readlength += len(result)
+        raw = self.stream.readall()
+        if raw:
+            w_result = self.decompressor.decompress(raw)
+            if self.decompressor.running:
+                raise OperationError(self.space.w_EOFError,
+                                     self.space.wrap("compressed file ended before the logical end-of-the-stream was detected"))
+            result = self.space.str_w(w_result)
+            self.readlength += len(result)
+        else:
+            result = ""
         if len(self.buffer) != self.pos:
             pos = self.pos
             assert pos >= 0
         was found after the end of stream, it'll be ignored and saved in
         unused_data attribute."""
 
-        if data == '':
-            return self.space.wrap('')
         if not self.running:
             raise OperationError(self.space.w_EOFError,
                 self.space.wrap("end of stream was already found"))
+        if data == '':
+            return self.space.wrap('')
 
         in_bufsize = len(data)
 

pypy/module/bz2/test/test_bz2_compdecomp.py

         bz2d = BZ2Decompressor()
         bz2d.decompress(self.DATA)
         raises(EOFError, bz2d.decompress, "foo")
+        raises(EOFError, bz2d.decompress, "")
 
     def test_buffer(self):
         from bz2 import BZ2Decompressor

pypy/module/itertools/interp_itertools.py

                                space.newtuple(args_w)])
 
 def check_number(space, w_obj):
-    if (space.lookup(w_obj, '__add__') is None or
-        space.is_true(space.isinstance(w_obj, space.w_str)) or
-        space.is_true(space.isinstance(w_obj, space.w_unicode))):
+    if (space.lookup(w_obj, '__int__') is None and
+        space.lookup(w_obj, '__float__') is None):
         raise OperationError(space.w_TypeError,
                              space.wrap("expected a number"))
 
         next = interp2app(W_Count.next_w),
         __reduce__ = interp2app(W_Count.reduce_w),
         __repr__ = interp2app(W_Count.repr_w),
-        __doc__ = """Make an iterator that returns consecutive integers starting
-    with n.  If not specified n defaults to zero. Does not currently
-    support python long integers. Often used as an argument to imap()
-    to generate consecutive data points.  Also, used with izip() to
-    add sequence numbers.
+        __doc__ = """Make an iterator that returns evenly spaced values starting
+    with n.  If not specified n defaults to zero.  Often used as an
+    argument to imap() to generate consecutive data points.  Also,
+    used with izip() to add sequence numbers.
 
-    Equivalent to :
+    Equivalent to:
 
-    def count(n=0):
-        if not isinstance(n, int):
-            raise TypeError("%s is not a regular integer" % n)
+    def count(start=0, step=1):
+        n = start
         while True:
             yield n
-            n += 1
+            n += step
     """)
 
 

pypy/module/itertools/test/test_itertools.py

 
         raises(TypeError, itertools.count, None)
         raises(TypeError, itertools.count, 'a')
+        raises(TypeError, itertools.count, [])
 
     def test_repeat(self):
         import itertools

pypy/module/micronumpy/__init__.py

         'flexible': 'interp_boxes.W_FlexibleBox',
         'character': 'interp_boxes.W_CharacterBox',
         'str_': 'interp_boxes.W_StringBox',
+        'string_': 'interp_boxes.W_StringBox',
         'unicode_': 'interp_boxes.W_UnicodeBox',
         'void': 'interp_boxes.W_VoidBox',
         'complexfloating': 'interp_boxes.W_ComplexFloatingBox',

pypy/module/micronumpy/arrayimpl/concrete.py

 from pypy.rpython.lltypesystem import rffi, lltype
 from pypy.rlib import jit
 from pypy.rlib.rawstorage import free_raw_storage
+from pypy.rlib.debug import make_sure_not_resized
 
 class ConcreteArrayIterator(base.BaseArrayIterator):
     def __init__(self, array):
         self.skip = array.strides[0]
         self.dtype = array.dtype
         self.index = 0
-        self.size = array.shape[0]
+        self.size = array.get_shape()[0]
 
     def next(self):
         self.offset += self.skip
     parent = None
     
     def get_shape(self):
-        return self.shape
+        shape = self.shape
+        jit.hint(len(shape), promote=True)
+        return shape
 
     def getitem(self, index):
         return self.dtype.getitem(self, index)
         if impl.is_scalar():
             self.fill(impl.get_scalar_value())
             return
-        shape = shape_agreement(space, self.shape, arr)
+        shape = shape_agreement(space, self.get_shape(), arr)
         if impl.storage == self.storage:
             impl = impl.copy()
         loop.setslice(shape, self, impl)
         # Since we got to here, prod(new_shape) == self.size
         new_strides = None
         if self.size > 0:
-            new_strides = calc_new_strides(new_shape, self.shape,
+            new_strides = calc_new_strides(new_shape, self.get_shape(),
                                            self.strides, self.order)
         if new_strides:
             # We can create a view, strides somehow match up.
                 raise IndexError
             idx = int_w(space, w_index)
             if idx < 0:
-                idx = self.shape[i] + idx
-            if idx < 0 or idx >= self.shape[i]:
+                idx = self.get_shape()[i] + idx
+            if idx < 0 or idx >= self.get_shape()[i]:
                 raise operationerrfmt(space.w_IndexError,
-                      "index (%d) out of range (0<=index<%d", i, self.shape[i],
+                      "index (%d) out of range (0<=index<%d", i, self.get_shape()[i],
                 )
             item += idx * self.strides[i]
         return item
     @jit.unroll_safe
     def _lookup_by_unwrapped_index(self, space, lst):
         item = self.start
-        assert len(lst) == len(self.shape)
+        shape = self.get_shape()
+        assert len(lst) == len(shape)
         for i, idx in enumerate(lst):
             if idx < 0:
-                idx = self.shape[i] + idx
-            if idx < 0 or idx >= self.shape[i]:
+                idx = shape[i] + idx
+            if idx < 0 or idx >= shape[i]:
                 raise operationerrfmt(space.w_IndexError,
-                      "index (%d) out of range (0<=index<%d", i, self.shape[i],
+                      "index (%d) out of range (0<=index<%d", i, shape[i],
                 )
             item += idx * self.strides[i]
         return item
             raise IndexError
         if isinstance(w_idx, W_NDimArray):
             raise ArrayArgumentException
-        shape_len = len(self.shape)
+        shape = self.get_shape()
+        shape_len = len(shape)
         if shape_len == 0:
             raise OperationError(space.w_IndexError, space.wrap(
                 "0-d arrays can't be indexed"))
             return RecordChunk(idx)
         if (space.isinstance_w(w_idx, space.w_int) or
             space.isinstance_w(w_idx, space.w_slice)):
-            return Chunks([Chunk(*space.decode_index4(w_idx, self.shape[0]))])
+            return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))])
         elif space.is_w(w_idx, space.w_None):
             return Chunks([NewAxisChunk()])
         result = []
                 result.append(NewAxisChunk())
             else:
                 result.append(Chunk(*space.decode_index4(w_item,
-                                                         self.shape[i])))
+                                                         self.get_shape()[i])))
                 i += 1
         return Chunks(result)
 
             view.implementation.setslice(space, w_value)
 
     def transpose(self):
-        if len(self.shape) < 2:
+        if len(self.get_shape()) < 2:
             return self
         strides = []
         backstrides = []
         shape = []
-        for i in range(len(self.shape) - 1, -1, -1):
+        for i in range(len(self.get_shape()) - 1, -1, -1):
             strides.append(self.strides[i])
             backstrides.append(self.backstrides[i])
-            shape.append(self.shape[i])
+            shape.append(self.get_shape()[i])
         return SliceArray(self.start, strides,
                           backstrides, shape, self)
 
     def copy(self):
-        strides, backstrides = support.calc_strides(self.shape, self.dtype,
+        strides, backstrides = support.calc_strides(self.get_shape(), self.dtype,
                                                     self.order)
-        impl = ConcreteArray(self.shape, self.dtype, self.order, strides,
+        impl = ConcreteArray(self.get_shape(), self.dtype, self.order, strides,
                              backstrides)
-        return loop.setslice(self.shape, impl, self)
+        return loop.setslice(self.get_shape(), impl, self)
 
     def create_axis_iter(self, shape, dim):
         return AxisIterator(self, shape, dim)
         return MultiDimViewIterator(self, self.start, r[0], r[1], shape)
 
     def swapaxes(self, axis1, axis2):
-        shape = self.shape[:]
+        shape = self.get_shape()[:]
         strides = self.strides[:]
         backstrides = self.backstrides[:]
         shape[axis1], shape[axis2] = shape[axis2], shape[axis1]   
 
 class ConcreteArray(BaseConcreteArray):
     def __init__(self, shape, dtype, order, strides, backstrides):
+        make_sure_not_resized(shape)
         self.shape = shape
         self.size = support.product(shape) * dtype.get_size()
         self.storage = dtype.itemtype.malloc(self.size)
         self.backstrides = backstrides
 
     def create_iter(self, shape):
-        if shape == self.shape:
+        if shape == self.get_shape():
             return ConcreteArrayIterator(self)
         r = calculate_broadcast_strides(self.strides, self.backstrides,
-                                        self.shape, shape)
+                                        self.get_shape(), shape)
         return MultiDimViewIterator(self, 0, r[0], r[1], shape)
 
     def fill(self, box):
         loop.fill(self, box.convert_to(self.dtype))
 
     def create_iter(self, shape):
-        if shape != self.shape:
+        if shape != self.get_shape():
             r = calculate_broadcast_strides(self.strides, self.backstrides,
-                                            self.shape, shape)
+                                            self.get_shape(), shape)
             return MultiDimViewIterator(self.parent,
                                         self.start, r[0], r[1], shape)
-        if len(self.shape) == 1:
+        if len(self.get_shape()) == 1:
             return OneDimViewIterator(self)
         return MultiDimViewIterator(self.parent, self.start, self.strides,
-                                    self.backstrides, self.shape)
+                                    self.backstrides, self.get_shape())
 
     def set_shape(self, space, new_shape):
-        if len(self.shape) < 2 or self.size == 0:
+        if len(self.get_shape()) < 2 or self.size == 0:
             # TODO: this code could be refactored into calc_strides
             # but then calc_strides would have to accept a stepping factor
             strides = []
                 new_shape.reverse()
             return SliceArray(self.start, strides, backstrides, new_shape,
                               self)
-        new_strides = calc_new_strides(new_shape, self.shape, self.strides,
+        new_strides = calc_new_strides(new_shape, self.get_shape(), self.strides,
                                        self.order)
         if new_strides is None:
             raise OperationError(space.w_AttributeError, space.wrap(

pypy/module/micronumpy/arrayimpl/voidbox.py

     def __init__(self, size, dtype):
         self.storage = alloc_raw_storage(size)
         self.dtype = dtype
+        self.size = size
 
     def __del__(self):
         free_raw_storage(self.storage)

pypy/module/micronumpy/dot.py

         right_critical_dim = len(right_shape) - 2
         right_critical_dim_size = right_shape[right_critical_dim]
         assert right_critical_dim >= 0
-        out_shape += left_shape[:-1] + \
-                     right_shape[0:right_critical_dim] + \
-                     right_shape[right_critical_dim + 1:]
+        out_shape = out_shape + left_shape[:-1] + \
+                    right_shape[0:right_critical_dim] + \
+                    right_shape[right_critical_dim + 1:]
     elif len(right_shape) > 0:
         #dot does not reduce for scalars
-        out_shape += left_shape[:-1]
+        out_shape = out_shape + left_shape[:-1]
     if my_critical_dim_size != right_critical_dim_size:
         raise OperationError(space.w_ValueError, space.wrap(
                                         "objects are not aligned"))

pypy/module/micronumpy/interp_boxes.py

         except KeyError:
             raise OperationError(space.w_IndexError,
                                  space.wrap("Field %s does not exist" % item))
-        return dtype.itemtype.read(self.arr, self.ofs, ofs, dtype)
+        read_val = dtype.itemtype.read(self.arr, self.ofs, ofs, dtype)
+        if isinstance (read_val, W_StringBox):
+            # StringType returns a str
+            return space.wrap(dtype.itemtype.to_str(read_val))
+        return read_val
 
     @unwrap_spec(item=str)
     def descr_setitem(self, space, item, w_value):
             arr.storage[i] = arg[i]
         return W_StringBox(arr, 0, arr.dtype)
 
+    # Running entire test suite needs this function to succeed,
+    # running single test_stringarray succeeds without it.
+    # With convert_to() test_ztranslation fails since 
+    # W_CharacterBox is not a W_GenericBox.
+    # Why is it needed for multiple tests?
+    #def convert_to(self, dtype):
+    #    xxx
 
 class W_UnicodeBox(W_CharacterBox):
     def descr__new__unicode_box(space, w_subtype, w_arg):
 
 W_VoidBox.typedef = TypeDef("void", W_FlexibleBox.typedef,
     __module__ = "numpypy",
+    __new__ = interp2app(W_VoidBox.descr__new__.im_func),
     __getitem__ = interp2app(W_VoidBox.descr_getitem),
     __setitem__ = interp2app(W_VoidBox.descr_setitem),
 )

pypy/module/micronumpy/interp_dtype.py

 from pypy.module.micronumpy import types, interp_boxes
 from pypy.rlib.objectmodel import specialize
 from pypy.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong
+from pypy.rpython.lltypesystem import rffi
 
 
 UNSIGNEDLTR = "u"
 VOIDLTR = 'V'
 STRINGLTR = 'S'
 UNICODELTR = 'U'
+INTPLTR = 'p'
+UINTPLTR = 'P'
 
 def decode_w_dtype(space, w_dtype):
     if space.is_none(w_dtype):
     def fill(self, storage, box, start, stop):
         self.itemtype.fill(storage, self.get_size(), box, start, stop, 0)
 
+    def get_name(self):
+        if self.char == 'S':
+            return '|S' + str(self.get_size())
+        return self.name
+
     def descr_str(self, space):
-        return space.wrap(self.name)
+        return space.wrap(self.get_name())
 
     def descr_repr(self, space):
-        return space.wrap("dtype('%s')" % self.name)
+        return space.wrap("dtype('%s')" % self.get_name())
 
     def descr_get_itemsize(self, space):
         return space.wrap(self.itemtype.get_element_size())
     def is_record_type(self):
         return self.fields is not None
 
+    def is_flexible_type(self):
+        return (self.num == 18 or self.num == 19 or self.num == 20)
+
     def __repr__(self):
         if self.fields is not None:
             return '<DType %r>' % self.fields
             #alternate_constructors=[space.w_buffer],
             # XXX no buffer in space
         )
+        ptr_size = rffi.sizeof(rffi.CCHARP)
+        if ptr_size == 4:
+            intp_box = interp_boxes.W_Int32Box
+            intp_type = types.Int32()
+            uintp_box = interp_boxes.W_UInt32Box
+            uintp_type = types.UInt32()
+        elif ptr_size == 8:
+            intp_box = interp_boxes.W_Int64Box
+            intp_type = types.Int64()
+            uintp_box = interp_boxes.W_UInt64Box
+            uintp_type = types.UInt64()
+        else:
+            raise ValueError('unknown point size %d' % ptr_size)
+        self.w_intpdtype = W_Dtype(
+            intp_type,
+            num=5,
+            kind=INTPLTR,
+            name='intp',
+            char=INTPLTR,
+            w_box_type = space.gettypefor(intp_box),
+        )    
+        self.w_uintpdtype = W_Dtype(
+            uintp_type,
+            num=6,
+            kind=UINTPLTR,
+            name='uintp',
+            char=UINTPLTR,
+            w_box_type = space.gettypefor(uintp_box),
+        )    
         self.builtin_dtypes = [
             self.w_booldtype, self.w_int8dtype, self.w_uint8dtype,
             self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype,
             self.w_float32dtype, self.w_float64dtype, self.w_complex64dtype,
             self.w_complex128dtype,
             self.w_stringdtype, self.w_unicodedtype,
-            self.w_voiddtype,
+            self.w_voiddtype, self.w_intpdtype, self.w_uintpdtype,
         ]
         self.float_dtypes_by_num_bytes = sorted(
             (dtype.itemtype.get_element_size(), dtype)
             #'CDOUBLE',
             #'DATETIME',
             'UINT': self.w_uint32dtype,
-            'INTP': self.w_longdtype,
+            'INTP': self.w_intpdtype,
+            'UINTP': self.w_uintpdtype,
             #'HALF',
             'BYTE': self.w_int8dtype,
             #'CFLOAT': ,

pypy/module/micronumpy/interp_numarray.py

     shape = []
     for w_item in space.fixedview(w_size):
         shape.append(space.int_w(w_item))
-    return shape
+    return shape[:]
 
 class __extend__(W_NDimArray):
     @jit.unroll_safe
 
     def _binop_right_impl(ufunc_name):
         def impl(self, space, w_other, w_out=None):
-            dtype = interp_ufuncs.find_dtype_for_scalar(space, w_other,
-                                                        self.get_dtype())
-            w_other = W_NDimArray.new_scalar(space, dtype, w_other)
+            w_other = convert_to_array(space, w_other)
             return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out])
         return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name)
 

pypy/module/micronumpy/interp_ufuncs.py

                 "supported for binary functions"))
         assert isinstance(self, W_Ufunc2)
         obj = convert_to_array(space, w_obj)
+        if obj.get_dtype().is_flexible_type():
+            raise OperationError(space.w_TypeError, 
+                      space.wrap('cannot perform reduce for flexible type'))
         obj_shape = obj.get_shape()
         if obj.is_scalar():
             return obj.get_scalar_value()
             if space.is_w(out, space.w_None):
                 out = None
         w_obj = convert_to_array(space, w_obj)
+        if w_obj.get_dtype().is_flexible_type():
+            raise OperationError(space.w_TypeError, 
+                      space.wrap('Not implemented for this type'))
         calc_dtype = find_unaryop_result_dtype(space,
                                   w_obj.get_dtype(),
                                   promote_to_float=self.promote_to_float,
             w_out = None
         w_lhs = convert_to_array(space, w_lhs)
         w_rhs = convert_to_array(space, w_rhs)
+        if w_lhs.get_dtype().is_flexible_type() or \
+           w_rhs.get_dtype().is_flexible_type():
+            raise OperationError(space.w_TypeError, 
+                      space.wrap('unsupported operand types'))
         calc_dtype = find_binop_result_dtype(space,
             w_lhs.get_dtype(), w_rhs.get_dtype(),
             int_only=self.int_only,
         return interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum]
 
 
+@jit.unroll_safe
 def find_unaryop_result_dtype(space, dt, promote_to_float=False,
     promote_bools=False, promote_to_largest=False, allow_complex=True):
     if promote_bools and (dt.kind == interp_dtype.BOOLLTR):
     int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype
     complex_type = interp_dtype.get_dtype_cache(space).w_complex128dtype
     float_type = interp_dtype.get_dtype_cache(space).w_float64dtype
+    str_dtype = interp_dtype.get_dtype_cache(space).w_stringdtype
     if isinstance(w_obj, interp_boxes.W_GenericBox):
         dtype = w_obj.get_dtype(space)
         if current_guess is None:
             current_guess is complex_type or current_guess is float_type):
             return complex_type
         return current_guess
+    elif space.isinstance_w(w_obj, space.w_str):
+        if (current_guess is None):
+            return interp_dtype.variable_dtype(space, 
+                                               'S%d' % space.len_w(w_obj))
+        elif current_guess.num ==18:
+            if  current_guess.itemtype.get_size() < space.len_w(w_obj):
+                return interp_dtype.variable_dtype(space, 
+                                                   'S%d' % space.len_w(w_obj))
+        return current_guess
     if current_guess is complex_type:
         return complex_type
     return interp_dtype.get_dtype_cache(space).w_float64dtype

pypy/module/micronumpy/stdobjspace.py

+
+from pypy.objspace.std import stringobject
+from pypy.module.micronumpy import interp_boxes
+
+def delegate_stringbox2stringobj(space, w_box):
+    return space.wrap(w_box.dtype.itemtype.to_str(w_box))
+
+def register_delegates(typeorder):
+    typeorder[interp_boxes.W_StringBox] = [
+        (stringobject.W_StringObject, delegate_stringbox2stringobj),
+    ]

pypy/module/micronumpy/strides.py

     while True:
         new_batch = []
         if not batch:
-            return shape, []
+            return shape[:], []
         if is_single_elem(space, batch[0], is_rec_type):
             for w_elem in batch:
                 if not is_single_elem(space, w_elem, is_rec_type):
                     raise OperationError(space.w_ValueError, space.wrap(
                         "setting an array element with a sequence"))
-            return shape, batch
+            return shape[:], batch
         size = space.len_w(batch[0])
         for w_elem in batch:
             if (is_single_elem(space, w_elem, is_rec_type) or

pypy/module/micronumpy/test/test_dtypes.py

         from _numpypy import dtype
 
         assert dtype(bool).num == 0
+        assert dtype('intp').num == 5
+        assert dtype('uintp').num == 6
         assert dtype(int).num == 7
         assert dtype(long).num == 9
         assert dtype(float).num == 12
 
     def test_cant_subclass(self):
         from _numpypy import dtype
-
         # You can't subclass dtype
         raises(TypeError, type, "Foo", (dtype,), {})
 
+    def test_can_subclass(self):
+        import _numpypy
+        class xyz(_numpypy.void):
+            pass
+        assert True
+
     def test_aliases(self):
         from _numpypy import dtype
 
 
 
 class AppTestTypes(BaseNumpyAppTest):
+    def setup_class(cls):
+        BaseNumpyAppTest.setup_class.im_func(cls)
+        if option.runappdirect:
+            import platform
+            bits, linkage = platform.architecture()
+            ptr_size = int(bits[:-3]) // 8
+        else:
+            from pypy.rpython.lltypesystem import rffi
+            ptr_size = rffi.sizeof(rffi.CCHARP)
+        cls.w_ptr_size = cls.space.wrap(ptr_size)
+
     def test_abstract_types(self):
         import _numpypy as numpy
         raises(TypeError, numpy.generic, 0)
     def test_int8(self):
         import _numpypy as numpy
 
-        assert numpy.int8.mro() == [numpy.int8, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object]
+        assert numpy.int8.mro() == [numpy.int8, numpy.signedinteger,
+                                    numpy.integer, numpy.number, 
+                                    numpy.generic, object]
 
         a = numpy.array([1, 2, 3], numpy.int8)
         assert type(a[1]) is numpy.int8
     def test_uint8(self):
         import _numpypy as numpy
 
-        assert numpy.uint8.mro() == [numpy.uint8, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object]
+        assert numpy.uint8.mro() == [numpy.uint8, numpy.unsignedinteger, 
+                                     numpy.integer, numpy.number, 
+                                     numpy.generic, object]
 
         a = numpy.array([1, 2, 3], numpy.uint8)
         assert type(a[1]) is numpy.uint8
         import _numpypy as numpy
 
         assert numpy.int_ is numpy.dtype(int).type
-        assert numpy.int_.mro() == [numpy.int_, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object]
+        assert numpy.int_.mro() == [numpy.int_, numpy.signedinteger, 
+                                    numpy.integer, numpy.number, 
+                                    numpy.generic, int, object]
 
     def test_int64(self):
         import sys
         import _numpypy as numpy
 
         if sys.maxint == 2 ** 63 -1:
-            assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, int, object]
+            assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, 
+                                         numpy.integer, numpy.number, 
+                                         numpy.generic, int, object]
         else:
-            assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object]
+            assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, 
+                                         numpy.integer, numpy.number, 
+                                         numpy.generic, object]
 
         assert numpy.dtype(numpy.int64).type is numpy.int64
         assert numpy.int64(3) == 3
         import sys
         import _numpypy as numpy