Commits

mattip committed ae694c5 Merge

merge default into branch

  • Participants
  • Parent commits 92ac356, 2737da5
  • Branches str-dtype-improvement

Comments (0)

Files changed (211)

File _pytest/pdb.py

File contents unchanged.

File lib-python/2/distutils/sysconfig_pypy.py

     if prefix is None:
         prefix = PREFIX
     if standard_lib:
-        return os.path.join(prefix, "lib-python", get_python_version())
+        return os.path.join(prefix, "lib-python", sys.version[0])
     return os.path.join(prefix, 'site-packages')
 
 
     g['SO'] = _get_so_extension() or ".so"
     g['SOABI'] = g['SO'].rsplit('.')[0]
     g['LIBDIR'] = os.path.join(sys.prefix, 'lib')
+    g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check
 
     global _config_vars
     _config_vars = g

File lib-python/2/pickle.py

 import struct
 import re
 
+from __pypy__.builders import StringBuilder
+
 __all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
            "Unpickler", "dump", "dumps", "load", "loads"]
 
 except ImportError:
     from StringIO import StringIO
 
+
+class StringBuilderFile(object):
+    ''' pickle uses only file.write - provide this method, 
+    use StringBuilder for speed
+    '''
+    def __init__(self):
+        self.builder = StringBuilder()
+        self.write = self.builder.append
+
+    def getvalue(self):
+        return self.builder.build()
+
+
 def dump(obj, file, protocol=None):
     Pickler(file, protocol).dump(obj)
 
 def dumps(obj, protocol=None):
-    file = StringIO()
+    file = StringBuilderFile()
     Pickler(file, protocol).dump(obj)
     return file.getvalue()
 

File lib-python/2/sysconfig.py

         'data'   : '{base}',
         },
     'pypy': {
-        'stdlib': '{base}/lib-python/{py_version_short}',
-        'platstdlib': '{base}/lib-python/{py_version_short}',
-        'purelib': '{base}/lib-python/{py_version_short}',
-        'platlib': '{base}/lib-python/{py_version_short}',
+        'stdlib': '{base}/lib-python/{py_version}',
+        'platstdlib': '{base}/lib-python/{py_version}',
+        'purelib': '{base}/lib-python/{py_version}',
+        'platlib': '{base}/lib-python/{py_version}',
         'include': '{base}/include',
         'platinclude': '{base}/include',
         'scripts': '{base}/bin',

File lib_pypy/_marshal.py

 # the "sandboxed" process.  It must work for Python2 as well.
 
 import types
-from _codecs import utf_8_decode, utf_8_encode
 
 try:
     intern
 
     def dump_unicode(self, x):
         self._write(TYPE_UNICODE)
-        #s = x.encode('utf8')
-        s, len_s = utf_8_encode(x)
-        self.w_long(len_s)
+        s = x.encode('utf8')
+        self.w_long(len(s))
         self._write(s)
     try:
         unicode
     def load_unicode(self):
         n = self.r_long()
         s = self._read(n)
-        #ret = s.decode('utf8')
-        ret, len_ret = utf_8_decode(s)
+        ret = s.decode('utf8')
         return ret
     dispatch[TYPE_UNICODE] = load_unicode
 

File lib_pypy/_sqlite3.py

         self._in_transaction = False
         self.isolation_level = isolation_level
 
-        self._cursors = []
+        self.__cursors = []
+        self.__cursors_counter = 0
         self.__statements = []
-        self.__statement_counter = 0
+        self.__statements_counter = 0
         self._statement_cache = _StatementCache(self, cached_statements)
 
         self.__func_cache = {}
     def close(self):
         self._check_thread()
 
-        for statement in self.__statements:
-            obj = statement()
-            if obj is not None:
-                obj._finalize()
+        self.__do_all_statements(Statement._finalize, True)
 
         if self._db:
             ret = _lib.sqlite3_close(self._db)
         exc.error_code = error_code
         return exc
 
+    def _remember_cursor(self, cursor):
+        self.__cursors.append(weakref.ref(cursor))
+        self.__cursors_counter += 1
+        if self.__cursors_counter < 200:
+            return
+        self.__cursors_counter = 0
+        self.__cursors = [r for r in self.__cursors if r() is not None]
+
     def _remember_statement(self, statement):
         self.__statements.append(weakref.ref(statement))
-        self.__statement_counter += 1
+        self.__statements_counter += 1
+        if self.__statements_counter < 200:
+            return
+        self.__statements_counter = 0
+        self.__statements = [r for r in self.__statements if r() is not None]
 
-        if self.__statement_counter % 100 == 0:
-            self.__statements = [ref for ref in self.__statements
-                                 if ref() is not None]
+    def __do_all_statements(self, action, reset_cursors):
+        for weakref in self.__statements:
+            statement = weakref()
+            if statement is not None:
+                action(statement)
+
+        if reset_cursors:
+            for weakref in self.__cursors:
+                cursor = weakref()
+                if cursor is not None:
+                    cursor._reset = True
 
     @_check_thread_wrap
     @_check_closed_wrap
         if not self._in_transaction:
             return
 
-        for statement in self.__statements:
-            obj = statement()
-            if obj is not None:
-                obj._reset()
+        self.__do_all_statements(Statement._reset, False)
 
         statement = c_void_p()
         ret = _lib.sqlite3_prepare_v2(self._db, b"COMMIT", -1,
         if not self._in_transaction:
             return
 
-        for statement in self.__statements:
-            obj = statement()
-            if obj is not None:
-                obj._reset()
-
-        for cursor_ref in self._cursors:
-            cursor = cursor_ref()
-            if cursor:
-                cursor._reset = True
+        self.__do_all_statements(Statement._reset, True)
 
         statement = c_void_p()
         ret = _lib.sqlite3_prepare_v2(self._db, b"ROLLBACK", -1,
     __statement = None
 
     def __init__(self, con):
-        self.__initialized = True
-        self.__connection = con
-
         if not isinstance(con, Connection):
             raise TypeError
-        con._check_thread()
-        con._check_closed()
-        con._cursors.append(weakref.ref(self))
+        self.__connection = con
 
         self.arraysize = 1
         self.row_factory = None
         self.__description = None
         self.__rowcount = -1
 
+        con._check_thread()
+        con._remember_cursor(self)
+
+        self.__initialized = True
+
     def __del__(self):
-        try:
-            self.__connection._cursors.remove(weakref.ref(self))
-        except (AttributeError, ValueError):
-            pass
         if self.__statement:
             self.__statement._reset()
 
                     self.__rowcount += _lib.sqlite3_changes(self.__connection._db)
         finally:
             self.__locked = False
-
         return self
 
     @__check_cursor_wrap
             if rc != _lib.SQLITE_DONE:
                 _lib.sqlite3_finalize(statement)
                 if rc == _lib.SQLITE_OK:
-                    return self
+                    break
                 else:
                     raise self.__connection._get_exception(rc)
+
             rc = _lib.sqlite3_finalize(statement)
             if rc != _lib.SQLITE_OK:
                 raise self.__connection._get_exception(rc)
 
     def __init__(self, connection, sql):
         self.__con = connection
+        self.__con._remember_statement(self)
 
         if not isinstance(sql, basestring):
             raise Warning("SQL is of wrong type. Must be string or unicode.")
             ret = _lib.sqlite3_prepare_v2(self.__con._db, sql, -1,
                                           byref(self._statement), byref(sql))
             self._kind = Statement._DQL
-
         if ret != _lib.SQLITE_OK:
             raise self.__con._get_exception(ret)
-        self.__con._remember_statement(self)
+
         sql = sql.value.decode('utf-8')
         if _check_remaining_sql(sql):
             raise Warning("You can only execute one statement at a time.")

File lib_pypy/cPickle.py

 #     closer to the ones produced by cPickle in CPython
 
 from pickle import StringIO
+try:
+    from pickle import StringBuilderFile
+except ImportError:
+    assert '__pypy__' not in sys.builtin_module_names
+    from pickle import StringIO as StringBuilderFile
 
 PythonPickler = Pickler
 class Pickler(PythonPickler):
 
 @builtinify
 def dumps(obj, protocol=None):
-    file = StringIO()
+    file = StringBuilderFile()
     Pickler(file, protocol).dump(obj)
     return file.getvalue()
 

File lib_pypy/numpypy/core/numeric.py

            'array_repr', 'array_str', 'set_string_function',
            'array_equal', 'outer', 'vdot', 'identity', 'little_endian',
            'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_',
+           'seterr',
           ]
 
 import sys
+
+* ARM

File pypy/doc/architecture.rst

 
  * a compliant, flexible and fast implementation of the Python_ Language 
    which uses the above toolchain to enable new advanced high-level features 
-   without having to encode the low-level details.
+   without having to encode the low-level details.  We call this PyPy.
 
 By separating concerns in this way, our implementation
 of Python - and other dynamic languages - is able to automatically
 High Level Goals
 =============================
 
-PyPy - the Translation Framework 
+RPython - the Translation Toolchain
 -----------------------------------------------
 
 Traditionally, language interpreters are written in a target platform language
 very challenging because of the involved complexity.
 
 
-PyPy - the Python Interpreter 
+PyPy - the Python Interpreter
 --------------------------------------------
 
 Our main motivation for developing the translation framework is to
 of `Extreme Programming`_, the architecture of PyPy has evolved over time
 and continues to evolve.  Nevertheless, the high level architecture is 
 stable. As described above, there are two rather independent basic
-subsystems: the `Python Interpreter`_ and the `Translation Framework`_.
+subsystems: the `PyPy Python Interpreter`_ and the `RPython Translation Toolchain`_.
 
 .. _`translation framework`:
 
-The Translation Framework
+RPython Translation Toolchain
 -------------------------
 
 The job of the RPython toolchain is to translate RPython_ programs
 
 * Optionally, `various transformations`_ can then be applied which, for
   example, perform optimizations such as inlining, add capabilities
-  such as stackless-style concurrency (deprecated), or insert code for the
+  such as stackless-style concurrency, or insert code for the
   `garbage collector`_.
 
 * Then, the graphs are converted to source code for the target platform
 .. _`standard interpreter`: 
 .. _`python interpreter`: 
 
-The Python Interpreter
+PyPy Python Interpreter
 -------------------------------------
 
 PyPy's *Python Interpreter* is written in RPython and implements the

File pypy/doc/faq.rst

 CPython extension and replace it with a pure python version that the
 JIT can see.
 
-We fully support ctypes-based extensions.
+We fully support ctypes-based extensions. But for best performance, we
+recommend that you use the cffi_ module to interface with C code.
 
 For information on which third party extensions work (or do not work) 
 with PyPy see the `compatibility wiki`_.
 
 .. _`extension modules`: cpython_differences.html#extension-modules
 .. _`cpython differences`: cpython_differences.html
-.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home
+.. _`compatibility wiki`:
+.. https://bitbucket.org/pypy/compatibility/wiki/Home
+.. _cffi: http://cffi.readthedocs.org/
 
 ---------------------------------
 On which platforms does PyPy run?
 bootstrap, as cross compilation is not really meant to work yet.
 At the moment you need CPython 2.5 - 2.7
 for the translation process. PyPy's JIT requires an x86 or x86_64 CPU.
+(There has also been good progress on getting the JIT working for ARMv7.)
 
 ------------------------------------------------
 Which Python version (2.x?) does PyPy implement?
 * Second, and perhaps most important: do you have a really good reason
   for writing the module in RPython in the first place?  Nowadays you
   should really look at alternatives, like writing it in pure Python,
-  using ctypes if it needs to call C code.  Other alternatives are being
-  developed too (as of summer 2011), like a Cython binding.
+  using cffi_ if it needs to call C code.
 
 In this context it is not that important to be able to translate
 RPython modules independently of translating the complete interpreter.

File pypy/doc/getting-started-dev.rst

 The translator is a tool based on the PyPy interpreter which can translate
 sufficiently static RPython programs into low-level code (in particular it can
 be used to translate the `full Python interpreter`_). To be able to experiment with it
-you need to:
+you need to download and install the usual (CPython) version of:
 
-  * Download and install Pygame_.
-
-  * Download and install `Dot Graphviz`_ 
+  * Pygame_
+  * `Dot Graphviz`_
 
 To start the interactive translator shell do::
 

File pypy/doc/whatsnew-head.rst

 .. branch: callback-jit
 Callbacks from C are now better JITted
 
+.. branch: fix-jit-logs
+
 .. branch: remove-globals-in-jit
 
 .. branch: length-hint
 .. branch: numpypy-real-as-view
 Convert real, imag from ufuncs to views. This involves the beginning of
 view() functionality
+.. branch: indexing-by-array
+Adds indexing by scalar, adds int conversion from scalar and single element array,
+fixes compress, indexing by an array with a smaller shape and the indexed object.
 
 .. branch: signatures
 Improved RPython typing
 .. branch: numpy-unify-methods
 .. branch: fix-version-tool
 .. branch: popen2-removal
+.. branch: pickle-dumps
+.. branch: scalar_get_set
 
 .. branch: release-2.0-beta1
 
 
 .. branch: vendor-rename
 Remove minor verison number from lib-python dirs to simplify stdlib upgrades.
+
+.. branch: jitframe-on-heap
+Moves optimized JIT frames from stack to heap. As a side effect it enables
+stackless to work well with the JIT on PyPy. Also removes a bunch of code from
+the GC which fixes cannot find gc roots.
+
+.. branch: pycon2013-doc-fixes
+Documentation fixes after going through the docs at PyCon 2013 sprint.

File pypy/goal/getnightly.py

+#!/usr/bin/env python
+
+import sys
+import os
+import py
+
+if sys.platform.startswith('linux'):
+    arch = 'linux'
+else:
+    print 'Cannot determine the platform, please update this scrip'
+    sys.exit(1)
+
+if sys.maxint == 2**63 - 1:
+    arch += '64'
+
+filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch
+url = 'http://buildbot.pypy.org/nightly/trunk/%s' % filename
+tmp = py.path.local.mkdtemp()
+mydir = tmp.chdir()
+print 'Downloading pypy to', tmp
+if os.system('wget "%s"' % url) != 0:
+    sys.exit(1)
+
+print 'Extracting pypy binary'
+mydir.chdir()
+os.system("tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy'" % tmp.join(filename))
+

File pypy/interpreter/error.py

-import os, sys
+import cStringIO
+import os
+import sys
+import traceback
+from errno import EINTR
+
 from rpython.rlib import jit
 from rpython.rlib.objectmodel import we_are_translated
-from errno import EINTR
+
+from pypy.interpreter import debug
+
 
 AUTO_DEBUG = os.getenv('PYPY_DEBUG')
 RECORD_INTERPLEVEL_TRACEBACK = True
         if space is None:
             # this part NOT_RPYTHON
             exc_typename = str(self.w_type)
-            exc_value    = str(w_value)
+            exc_value = str(w_value)
         else:
             w = space.wrap
             if space.is_w(space.type(self.w_type), space.w_str):
 
     def print_application_traceback(self, space, file=None):
         "NOT_RPYTHON: Dump a standard application-level traceback."
-        if file is None: file = sys.stderr
+        if file is None:
+            file = sys.stderr
         self.print_app_tb_only(file)
         print >> file, self.errorstr(space)
 
     def print_detailed_traceback(self, space=None, file=None):
         """NOT_RPYTHON: Dump a nice detailed interpreter- and
         application-level traceback, useful to debug the interpreter."""
-        import traceback, cStringIO
-        if file is None: file = sys.stderr
+        if file is None:
+            file = sys.stderr
         f = cStringIO.StringIO()
         for i in range(len(self.debug_excs)-1, -1, -1):
             print >> f, "Traceback (interpreter-level):"
         self.print_app_tb_only(file)
         print >> file, '(application-level)', self.errorstr(space)
         if AUTO_DEBUG:
-            import debug
             debug.fire(self)
 
     @jit.unroll_safe
         #  ("string", ...)            ("string", ...)              deprecated
         #  (inst, None)               (inst.__class__, inst)          no
         #
-        w_type  = self.w_type
+        w_type = self.w_type
         w_value = self.get_w_value(space)
         while space.is_true(space.isinstance(w_type, space.w_tuple)):
             w_type = space.getitem(w_type, space.wrap(0))
             w_value = w_inst
             w_type = w_instclass
 
-        self.w_type   = w_type
+        self.w_type = w_type
         self._w_value = w_value
 
     def _exception_getclass(self, space, w_inst):
         from rpython.rlib.unroll import unrolling_iterable
         attrs = ['x%d' % i for i in range(len(formats))]
         entries = unrolling_iterable(enumerate(attrs))
-        #
+
         class OpErrFmt(OperationError):
             def __init__(self, w_type, strings, *args):
                 self.setup(w_type)
                 for i, attr in entries:
                     setattr(self, attr, args[i])
                 assert w_type is not None
+
             def _compute_value(self):
                 lst = [None] * (len(formats) + len(formats) + 1)
                 for i, attr in entries:

File pypy/interpreter/nestedscope.py

-from pypy.interpreter.error import OperationError
-from pypy.interpreter import function, pycode, pyframe
-from pypy.interpreter.baseobjspace import Wrappable
-from pypy.interpreter.mixedmodule import MixedModule
-from pypy.interpreter.astcompiler import consts
 from rpython.rlib import jit
 from rpython.tool.uid import uid
 
+from pypy.interpreter import function, pycode, pyframe
+from pypy.interpreter.astcompiler import consts
+from pypy.interpreter.baseobjspace import Wrappable
+from pypy.interpreter.error import OperationError
+from pypy.interpreter.mixedmodule import MixedModule
+
+
 class Cell(Wrappable):
     "A simple container for a wrapped value."
 
 
     def get(self):
         if self.w_value is None:
-            raise ValueError, "get() from an empty cell"
+            raise ValueError("get() from an empty cell")
         return self.w_value
 
     def set(self, w_value):
 
     def delete(self):
         if self.w_value is None:
-            raise ValueError, "delete() on an empty cell"
+            raise ValueError("delete() on an empty cell")
         self.w_value = None
-  
+
     def descr__cmp__(self, space, w_other):
         other = space.interpclass_w(w_other)
         if not isinstance(other, Cell):
         return space.cmp(self.w_value, other.w_value)
 
     def descr__reduce__(self, space):
-        w_mod    = space.getbuiltinmodule('_pickle_support')
-        mod      = space.interp_w(MixedModule, w_mod)
+        w_mod = space.getbuiltinmodule('_pickle_support')
+        mod = space.interp_w(MixedModule, w_mod)
         new_inst = mod.get('cell_new')
-        if self.w_value is None:    #when would this happen?
+        if self.w_value is None:    # when would this happen?
             return space.newtuple([new_inst, space.newtuple([])])
         tup = [self.w_value]
         return space.newtuple([new_inst, space.newtuple([]),
 
     def descr__setstate__(self, space, w_state):
         self.w_value = space.getitem(w_state, space.wrap(0))
-        
+
     def __repr__(self):
         """ representation for debugging purposes """
         if self.w_value is None:
             raise OperationError(space.w_ValueError, space.wrap("Cell is empty"))
 
 
-
 super_initialize_frame_scopes = pyframe.PyFrame.initialize_frame_scopes
-super_fast2locals             = pyframe.PyFrame.fast2locals
-super_locals2fast             = pyframe.PyFrame.locals2fast
+super_fast2locals = pyframe.PyFrame.fast2locals
+super_locals2fast = pyframe.PyFrame.locals2fast
 
 
 class __extend__(pyframe.PyFrame):
     def fast2locals(self):
         super_fast2locals(self)
         # cellvars are values exported to inner scopes
-        # freevars are values coming from outer scopes 
+        # freevars are values coming from outer scopes
         freevarnames = list(self.pycode.co_cellvars)
         if self.pycode.co_flags & consts.CO_OPTIMIZED:
             freevarnames.extend(self.pycode.co_freevars)
         except ValueError:
             varname = self.getfreevarname(varindex)
             if self.iscellvar(varindex):
-                message = "local variable '%s' referenced before assignment"%varname
+                message = "local variable '%s' referenced before assignment" % varname
                 w_exc_type = self.space.w_UnboundLocalError
             else:
                 message = ("free variable '%s' referenced before assignment"
-                           " in enclosing scope"%varname)
+                           " in enclosing scope" % varname)
                 w_exc_type = self.space.w_NameError
             raise OperationError(w_exc_type, self.space.wrap(message))
         else:

File pypy/module/_continuation/interp_continuation.py

         #  - normal:      self.sthread != None, not is_empty_handle(self.h)
         #  - finished:    self.sthread != None, is_empty_handle(self.h)
 
-    def __del__(self):
-        sthread = self.sthread
-        if sthread is not None and not sthread.is_empty_handle(self.h):
-            sthread.destroy(self.h)
-
     def check_sthread(self):
         ec = self.space.getexecutioncontext()
         if ec.stacklet_thread is not self.sthread:
         if self.sthread is not None:
             raise geterror(self.space, "continulet already __init__ialized")
         sthread = build_sthread(self.space)
-        workaround_disable_jit(sthread)
         #
         # hackish: build the frame "by hand", passing it the correct arguments
         space = self.space
                 global_state.clear()
                 raise geterror(self.space, "continulet already finished")
         self.check_sthread()
-        workaround_disable_jit(self.sthread)
-        #
+
         global_state.origin = self
         if to is None:
             # simple switch: going to self.h
         sthread = ec.stacklet_thread = SThread(space, ec)
     return sthread
 
-def workaround_disable_jit(sthread):
-    # A bad workaround to kill the JIT anywhere in this thread.
-    # This forces all the frames.  It's a bad workaround because
-    # it takes O(depth) time, and it will cause some "abort:
-    # vable escape" in the JIT.  The goal is to prevent any frame
-    # from being still virtuals, because the JIT generates code
-    # to un-virtualizable them "on demand" by loading values based
-    # on FORCE_TOKEN, which is an address in the stack.
-    sthread.ec.force_all_frames()
-
 # ____________________________________________________________
 
 def permute(space, args_w):

File pypy/module/_io/interp_iobase.py

     def remove(self, w_iobase):
         holder = w_iobase.streamholder
         if holder is not None:
-            del self.streams[holder]
+            try:
+                del self.streams[holder]
+            except KeyError:
+                # this can happen in daemon threads
+                pass
 
     def flush_all(self, space):
         while self.streams:

File pypy/module/cppyy/capi/reflex_capi.py

 srcpath = pkgpath.join("src")
 incpath = pkgpath.join("include")
 
+import commands
+(config_stat, incdir) = commands.getstatusoutput("root-config --incdir")
+
 if os.environ.get("ROOTSYS"):
-    import commands
-    (stat, incdir) = commands.getstatusoutput("root-config --incdir")
-    if stat != 0:        # presumably Reflex-only
+    if config_stat != 0:     # presumably Reflex-only
         rootincpath = [os.path.join(os.environ["ROOTSYS"], "include")]
         rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")]
     else:
         rootincpath = [incdir]
         rootlibpath = commands.getoutput("root-config --libdir").split()
 else:
-    rootincpath = []
-    rootlibpath = []
+    if config_stat == 0:
+        rootincpath = [incdir]
+        rootlibpath = commands.getoutput("root-config --libdir").split()
+    else:
+        rootincpath = []
+        rootlibpath = []
 
 def identify():
     return 'Reflex'

File pypy/module/cppyy/test/Makefile

 
 ifeq ($(ROOTSYS),)
   genreflex=genreflex
-  cppflags=
+  cppflags=-I$(shell root-config --incdir) -L$(shell root-config --libdir)
 else
   genreflex=$(ROOTSYS)/bin/genreflex
   ifeq ($(wildcard $(ROOTSYS)/include),)     # standard locations used?

File pypy/module/cppyy/test/test_crossing.py

 import py, os, sys
+from pypy.interpreter.gateway import interp2app, unwrap_spec
+from rpython.translator.tool.cbuild import ExternalCompilationInfo
+from rpython.translator import platform
+from rpython.translator.gensupp import uniquemodulename
+from rpython.tool.udir import udir
+
+from pypy.module.cpyext import api
+from pypy.module.cpyext.state import State
+
 from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
 
+
 currpath = py.path.local(__file__).dirpath()
 test_dct = str(currpath.join("crossingDict.so"))
 
     if err:
         raise OSError("'make' failed (see stderr)")
 
+# from pypy/module/cpyext/test/test_cpyext.py; modified to accept more external
+# symbols and called directly instead of import_module
+def compile_extension_module(space, modname, symbols, **kwds):
+    """
+    Build an extension module and return the filename of the resulting native
+    code file.
+
+    modname is the name of the module, possibly including dots if it is a module
+    inside a package.
+
+    Any extra keyword arguments are passed on to ExternalCompilationInfo to
+    build the module (so specify your source with one of those).
+    """
+    state = space.fromcache(State)
+    api_library = state.api_lib
+    if sys.platform == 'win32':
+        kwds["libraries"] = [api_library]
+        # '%s' undefined; assuming extern returning int
+        kwds["compile_extra"] = ["/we4013"]
+    elif sys.platform == 'darwin':
+        kwds["link_files"] = [str(api_library + '.dylib')]
+    else:
+        kwds["link_files"] = [str(api_library + '.so')]
+        if sys.platform.startswith('linux'):
+            kwds["compile_extra"]=["-Werror=implicit-function-declaration"]
+
+    modname = modname.split('.')[-1]
+    eci = ExternalCompilationInfo(
+        export_symbols=['init%s' % (modname,)]+symbols,
+        include_dirs=api.include_dirs,
+        **kwds
+        )
+    eci = eci.convert_sources_to_files()
+    dirname = (udir/uniquemodulename('module')).ensure(dir=1)
+    soname = platform.platform.compile(
+        [], eci,
+        outputfilename=str(dirname/modname),
+        standalone=False)
+    from pypy.module.imp.importing import get_so_extension
+    pydname = soname.new(purebasename=modname, ext=get_so_extension(space))
+    soname.rename(pydname)
+    return str(pydname)
 
 class AppTestCrossing(AppTestCpythonExtensionBase):
     spaceconfig = dict(usemodules=['cpyext', 'cppyy', 'thread', '_rawffi', '_ffi',
                                    'array', 'itertools', 'rctime', 'binascii'])
 
     def setup_class(cls):
-        # following from AppTestCpythonExtensionBase, with cppyy added
-        cls.space.getbuiltinmodule("cpyext")
-        from pypy.module.imp.importing import importhook
-        importhook(cls.space, "os") # warm up reference counts
-        from pypy.module.cpyext.pyobject import RefcountState
-        state = cls.space.fromcache(RefcountState)
-        state.non_heaptypes_w[:] = []
-
-        # cppyy specific additions (not that the test_dct is loaded late
+        AppTestCpythonExtensionBase.setup_class.im_func(cls)
+        # cppyy specific additions (note that test_dct is loaded late
         # to allow the generated extension module be loaded first)
         cls.w_test_dct    = cls.space.wrap(test_dct)
         cls.w_pre_imports = cls.space.appexec([], """():
             import cppyy, ctypes""")    # prevents leak-checking complaints on ctypes
-        from pypy.module.imp.importing import get_so_extension
-        cls.w_soext = cls.space.wrap(get_so_extension(cls.space))
+
+    def setup_method(self, func):
+        AppTestCpythonExtensionBase.setup_method.im_func(self, func)
+
+        @unwrap_spec(name=str, init=str, body=str)
+        def load_cdll(space, name, init, body, w_symbols):
+            # the following is loosely from test_cpyext.py import_module; it
+            # is copied here to be able to tweak the call to
+            # compile_extension_module and to get a different return result
+            # than in that function
+            code = """
+            #include <Python.h>
+            %(body)s
+
+            void init%(name)s(void) {
+            %(init)s
+            }
+            """ % dict(name=name, init=init, body=body)
+            kwds = dict(separate_module_sources=[code])
+            symbols = [space.str_w(w_item) for w_item in space.fixedview(w_symbols)]
+            mod = compile_extension_module(space, name, symbols, **kwds)
+
+            # explicitly load the module as a CDLL rather than as a module
+            import ctypes
+            from pypy.module.imp.importing import get_so_extension
+            fullmodname = os.path.join(
+                os.path.dirname(mod), name + get_so_extension(space))
+            return ctypes.CDLL(fullmodname, ctypes.RTLD_GLOBAL)
+
+        self.w_load_cdll = self.space.wrap(interp2app(load_cdll))
 
     def test00_base_class(self):
         """Test from cpyext; only here to see whether the imported class works"""
 
         import os, ctypes
 
+        name = 'bar'
+
         init = """
         if (Py_IsInitialized())
             Py_InitModule("bar", methods);
         """
+
         # note: only the symbols are needed for C, none for python
         body = """
         long bar_unwrap(PyObject* arg)
             { NULL }
         };
         """
+        # explicitly load the module as a CDLL rather than as a module
+#        dirname = space.wrap(os.path.dirname(mod))
 
-        dirname = self.import_module(name='bar', init=init, body=body, load_it=False)
-        fullmodname = os.path.join(dirname, 'bar' + self.soext)
-        self.cmodule = ctypes.CDLL(fullmodname, ctypes.RTLD_GLOBAL)
+#        dirname = self.import_module(name='bar', init=init, body=body, load_it=False)
+#        fullmodname = os.path.join(dirname, name + self.soext)
+        self.cmodule = self.load_cdll(name, init, body, ['bar_unwrap', 'bar_wrap'])#ctypes.CDLL(fullmodname, ctypes.RTLD_GLOBAL)
 
     def test02_crossing_dict(self):
         """Test availability of all needed classes in the dict"""

File pypy/module/cppyy/test/test_pythonify.py

 test_dct = str(currpath.join("example01Dict.so"))
 
 def setup_module(mod):
+    # force removal of ROOTSYS for this one test, which serves as a test itself
+    if os.getenv("ROOTSYS"):
+        os.unsetenv("ROOTSYS")
     if sys.platform == 'win32':
         py.test.skip("win32 not supported so far")
     err = os.system("cd '%s' && make example01Dict.so" % currpath)

File pypy/module/micronumpy/arrayimpl/base.py

     def base(self):
         raise NotImplementedError
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         raise NotImplementedError
 
 class BaseArrayIterator(object):

File pypy/module/micronumpy/arrayimpl/concrete.py

 
-from pypy.module.micronumpy.arrayimpl import base
+from pypy.module.micronumpy.arrayimpl import base, scalar
 from pypy.module.micronumpy import support, loop, iter
 from pypy.module.micronumpy.base import convert_to_array, W_NDimArray,\
      ArrayArgumentException
     parent = None
 
     # JIT hints that length of all those arrays is a constant
-    
+
     def get_shape(self):
         shape = self.shape
         jit.hint(len(shape), promote=True)
                               new_shape, self, orig_array)
         else:
             return None
-    
+
     def get_real(self, orig_array):
         strides = self.get_strides()
         backstrides = self.get_backstrides()
             dtype =  self.dtype.float_type
             return SliceArray(self.start, strides, backstrides,
                           self.get_shape(), self, orig_array, dtype=dtype)
-        return SliceArray(self.start, strides, backstrides, 
+        return SliceArray(self.start, strides, backstrides,
                           self.get_shape(), self, orig_array)
 
+    def set_real(self, space, orig_array, w_value):    
+        tmp = self.get_real(orig_array)
+        tmp.setslice(space, convert_to_array(space, w_value))
+
     def get_imag(self, orig_array):
         strides = self.get_strides()
         backstrides = self.get_backstrides()
         if self.dtype.is_complex_type():
             dtype =  self.dtype.float_type
-            return SliceArray(self.start + dtype.get_size(), strides, 
+            return SliceArray(self.start + dtype.get_size(), strides,
                     backstrides, self.get_shape(), self, orig_array, dtype=dtype)
         if self.dtype.is_flexible_type():
             # numpy returns self for self.imag
         impl.fill(self.dtype.box(0))
         return impl
 
+    def set_imag(self, space, orig_array, w_value):    
+        tmp = self.get_imag(orig_array)
+        tmp.setslice(space, convert_to_array(space, w_value))
+
     # -------------------- applevel get/setitem -----------------------
 
     @jit.unroll_safe
             space.isinstance_w(w_idx, space.w_slice) or
             space.is_w(w_idx, space.w_None)):
             raise IndexError
-        if isinstance(w_idx, W_NDimArray):
+        if isinstance(w_idx, W_NDimArray) and not isinstance(w_idx.implementation, scalar.Scalar):
             raise ArrayArgumentException
         shape = self.get_shape()
         shape_len = len(shape)
-        if shape_len == 0:
-            raise OperationError(space.w_IndexError, space.wrap(
-                "0-d arrays can't be indexed"))
         view_w = None
-        if (space.isinstance_w(w_idx, space.w_list) or
-            isinstance(w_idx, W_NDimArray)):
+        if space.isinstance_w(w_idx, space.w_list):
             raise ArrayArgumentException
         if space.isinstance_w(w_idx, space.w_tuple):
             view_w = space.fixedview(w_idx)
         shape = self.get_shape()[:]
         strides = self.get_strides()[:]
         backstrides = self.get_backstrides()[:]
-        shape[axis1], shape[axis2] = shape[axis2], shape[axis1]   
+        shape[axis1], shape[axis2] = shape[axis2], shape[axis1]
         strides[axis1], strides[axis2] = strides[axis2], strides[axis1]
-        backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] 
-        return W_NDimArray.new_slice(self.start, strides, 
+        backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1]
+        return W_NDimArray.new_slice(self.start, strides,
                                      backstrides, shape, self, orig_arr)
 
     def get_storage_as_int(self, space):
         self.backstrides = backstrides
         self.storage = storage
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         if shape is None or shape == self.get_shape():
             return iter.ConcreteArrayIterator(self)
         r = calculate_broadcast_strides(self.get_strides(),
                                         self.get_backstrides(),
-                                        self.get_shape(), shape)
+                                        self.get_shape(), shape, backward_broadcast)
         return iter.MultiDimViewIterator(self, self.dtype, 0, r[0], r[1], shape)
 
     def fill(self, box):
         free_raw_storage(self.storage, track_allocation=False)
 
 
-        
+
 
 class NonWritableArray(ConcreteArray):
     def descr_setitem(self, space, orig_array, w_index, w_value):
         raise OperationError(space.w_RuntimeError, space.wrap(
             "array is not writable"))
-        
+
 
 class SliceArray(BaseConcreteArray):
     def __init__(self, start, strides, backstrides, shape, parent, orig_arr,
     def fill(self, box):
         loop.fill(self, box.convert_to(self.dtype))
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         if shape is not None and shape != self.get_shape():
             r = calculate_broadcast_strides(self.get_strides(),
                                             self.get_backstrides(),
-                                            self.get_shape(), shape)
+                                            self.get_shape(), shape,
+                                            backward_broadcast)
             return iter.MultiDimViewIterator(self.parent, self.dtype,
                                              self.start, r[0], r[1], shape)
         if len(self.get_shape()) == 1:
-            return iter.OneDimViewIterator(self.parent, self.dtype, self.start, 
+            return iter.OneDimViewIterator(self.parent, self.dtype, self.start,
                     self.get_strides(), self.get_shape())
         return iter.MultiDimViewIterator(self.parent, self.dtype, self.start,
                                     self.get_strides(),

File pypy/module/micronumpy/arrayimpl/scalar.py

 
 from pypy.module.micronumpy.arrayimpl import base
-from pypy.module.micronumpy.base import W_NDimArray
+from pypy.module.micronumpy.base import W_NDimArray, convert_to_array
 from pypy.module.micronumpy import support
 from pypy.interpreter.error import OperationError
 
     def get_strides(self):
         return []
 
-    def create_iter(self, shape=None):
+    def get_backstrides(self):
+        return []
+
+    def create_iter(self, shape=None, backward_broadcast=False):
         return ScalarIterator(self)
 
     def get_scalar_value(self):
     def transpose(self, _):
         return self
 
+    def get_real(self, orig_array):
+        if self.dtype.is_complex_type():
+            scalar = Scalar(self.dtype.float_type)
+            scalar.value = self.value.convert_real_to(scalar.dtype)
+            return scalar
+        return self
+
+    def set_real(self, space, orig_array, w_val):
+        w_arr = convert_to_array(space, w_val)
+        dtype = self.dtype.float_type or self.dtype
+        if len(w_arr.get_shape()) > 0:
+            raise OperationError(space.w_ValueError, space.wrap(
+                "could not broadcast input array from shape " + 
+                "(%s) into shape ()" % (
+                    ','.join([str(x) for x in w_arr.get_shape()],))))
+        if self.dtype.is_complex_type():
+            #imag = dtype.itemtype.unbox(self.value.convert_imag_to(dtype))
+            #val = dtype.itemtype.unbox(w_arr.get_scalar_value().
+            #                                           convert_to(dtype))
+            #self.value = self.dtype.box_complex(val, imag)
+            self.value = self.dtype.itemtype.composite(w_arr.get_scalar_value().convert_to(dtype),
+                                    self.value.convert_imag_to(dtype))
+        else:
+            self.value = w_arr.get_scalar_value()
+
+    def get_imag(self, orig_array):
+        if self.dtype.is_complex_type():
+            scalar = Scalar(self.dtype.float_type)
+            scalar.value = self.value.convert_imag_to(scalar.dtype)
+            return scalar
+        scalar = Scalar(self.dtype)
+        if self.dtype.is_flexible_type():
+            scalar.value = self.value
+        else:
+            scalar.value = scalar.dtype.itemtype.box(0)
+        return scalar
+
+    def set_imag(self, space, orig_array, w_val):
+        #Only called on complex dtype
+        assert self.dtype.is_complex_type()
+        w_arr = convert_to_array(space, w_val)
+        dtype = self.dtype.float_type
+        if len(w_arr.get_shape()) > 0:
+            raise OperationError(space.w_ValueError, space.wrap(
+                "could not broadcast input array from shape " + 
+                "(%s) into shape ()" % (
+                    ','.join([str(x) for x in w_arr.get_shape()],))))
+        #real = dtype.itemtype.unbox(self.value.convert_real_to(dtype))
+        #val = dtype.itemtype.unbox(w_arr.get_scalar_value().
+        #                                              convert_to(dtype))
+        #self.value = self.dtype.box_complex(real, val)
+        self.value = self.dtype.itemtype.composite(
+                            self.value.convert_real_to(dtype),
+                            w_arr.get_scalar_value(),
+                            )
+
     def descr_getitem(self, space, _, w_idx):
         raise OperationError(space.w_IndexError,
                              space.wrap("scalars cannot be indexed"))
     def descr_setitem(self, space, _, w_idx, w_val):
         raise OperationError(space.w_IndexError,
                              space.wrap("scalars cannot be indexed"))
-        
+
     def setitem_index(self, space, idx, w_val):
         raise OperationError(space.w_IndexError,
                              space.wrap("scalars cannot be indexed"))
 
     def reshape(self, space, orig_array, new_shape):
         return self.set_shape(space, orig_array, new_shape)
-        
+
     def create_axis_iter(self, shape, dim, cum):
         raise Exception("axis iter should not happen on scalar")
 

File pypy/module/micronumpy/interp_dtype.py

 
     def build_and_convert(self, space, box):
         return self.itemtype.build_and_convert(space, self, box)
-
     def coerce(self, space, w_item):
         return self.itemtype.coerce(space, self, w_item)
 

File pypy/module/micronumpy/interp_flatiter.py

     def get_shape(self):
         return self.shape
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         assert isinstance(self.base(), W_NDimArray)
         return self.base().create_iter()
 

File pypy/module/micronumpy/interp_numarray.py

 from pypy.module.micronumpy import loop
 from pypy.module.micronumpy.dot import match_dot_shapes
 from pypy.module.micronumpy.interp_arrayops import repeat, choose
+from pypy.module.micronumpy.arrayimpl import scalar
 from rpython.tool.sourcetools import func_with_new_name
 from rpython.rlib import jit
 from rpython.rlib.rstring import StringBuilder
             raise OperationError(space.w_ValueError,
                                  space.wrap("index out of range for array"))
         size = loop.count_all_true(arr)
-        res = W_NDimArray.from_shape([size], self.get_dtype())
+        if len(arr.get_shape()) == 1:
+            res_shape = [size] + self.get_shape()[1:]
+        else:
+            res_shape = [size]
+        res = W_NDimArray.from_shape(res_shape, self.get_dtype())
         return loop.getitem_filter(res, self, arr)
 
     def setitem_filter(self, space, idx, val):
         s.append('])')
         return s.build()
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         assert isinstance(self.implementation, BaseArrayImplementation)
-        return self.implementation.create_iter(shape)
+        return self.implementation.create_iter(shape=shape,
+                                   backward_broadcast=backward_broadcast)
 
     def create_axis_iter(self, shape, dim, cum):
         return self.implementation.create_axis_iter(shape, dim, cum)
 
     def descr_set_real(self, space, w_value):
         # copy (broadcast) values into self
-        tmp = self.implementation.get_real(self)
-        tmp.setslice(space, convert_to_array(space, w_value))
+        self.implementation.set_real(space, self, w_value)
 
     def descr_set_imag(self, space, w_value):
         # if possible, copy (broadcast) values into self
         if not self.get_dtype().is_complex_type():
             raise OperationError(space.w_TypeError,
                     space.wrap('array does not have imaginary part to set'))
-        tmp = self.implementation.get_imag(self)
-        tmp.setslice(space, convert_to_array(space, w_value))
+        self.implementation.set_imag(space, self, w_value)
 
     def descr_reshape(self, space, args_w):
         """reshape(...)
         if not space.is_none(w_axis):
             raise OperationError(space.w_NotImplementedError,
                                  space.wrap("axis unsupported for compress"))
+            arr = self
+        else:
+            arr = self.descr_reshape(space, [space.wrap(-1)])
         index = convert_to_array(space, w_obj)
-        return self.getitem_filter(space, index)
+        return arr.getitem_filter(space, index)
 
     def descr_flatten(self, space, w_order=None):
         if self.is_scalar():
     descr_argmax = _reduce_argmax_argmin_impl("max")
     descr_argmin = _reduce_argmax_argmin_impl("min")
 
+    def descr_int(self, space):
+        shape = self.get_shape()
+        if len(shape) == 0:
+            assert isinstance(self.implementation, scalar.Scalar)
+            return space.int(space.wrap(self.implementation.get_scalar_value()))
+        if shape == [1]:
+            return space.int(self.descr_getitem(space, space.wrap(0)))
+        raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars"))
+
 
 @unwrap_spec(offset=int)
 def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None,
 
     __repr__ = interp2app(W_NDimArray.descr_repr),
     __str__ = interp2app(W_NDimArray.descr_str),
+    __int__ = interp2app(W_NDimArray.descr_int),
 
     __pos__ = interp2app(W_NDimArray.descr_pos),
     __neg__ = interp2app(W_NDimArray.descr_neg),

File pypy/module/micronumpy/interp_ufuncs.py

     return not dtype.itemtype.bool(val)
 
 class W_Ufunc(Wrappable):
-    _attrs_ = ["name", "promote_to_float", "promote_bools", "identity", 
+    _attrs_ = ["name", "promote_to_float", "promote_bools", "identity",
                "allow_complex", "complex_to_float"]
-    _immutable_fields_ = ["promote_to_float", "promote_bools", "name", 
+    _immutable_fields_ = ["promote_to_float", "promote_bools", "name",
             "allow_complex", "complex_to_float"]
 
     def __init__(self, name, promote_to_float, promote_bools, identity,
         assert isinstance(self, W_Ufunc2)
         obj = convert_to_array(space, w_obj)
         if obj.get_dtype().is_flexible_type():
-            raise OperationError(space.w_TypeError, 
+            raise OperationError(space.w_TypeError,
                       space.wrap('cannot perform reduce for flexible type'))
         obj_shape = obj.get_shape()
         if obj.is_scalar():
             return obj.get_scalar_value()
         shapelen = len(obj_shape)
-        axis = unwrap_axis_arg(space, shapelen, w_axis)    
+        axis = unwrap_axis_arg(space, shapelen, w_axis)
         assert axis >= 0
         size = obj.get_size()
         dtype = interp_dtype.decode_w_dtype(space, dtype)
                 out = None
         w_obj = convert_to_array(space, w_obj)
         if w_obj.get_dtype().is_flexible_type():
-            raise OperationError(space.w_TypeError, 
+            raise OperationError(space.w_TypeError,
                       space.wrap('Not implemented for this type'))
         if self.int_only and not w_obj.get_dtype().is_int_type():
             raise OperationError(space.w_TypeError, space.wrap(
             if self.complex_to_float and calc_dtype.is_complex_type():
                 if calc_dtype.name == 'complex64':
                     res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype
-                else:    
+                else:
                     res_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype
         if w_obj.is_scalar():
             w_val = self.func(calc_dtype,
     argcount = 2
 
     def __init__(self, func, name, promote_to_float=False, promote_bools=False,
-        identity=None, comparison_func=False, int_only=False, 
+        identity=None, comparison_func=False, int_only=False,
         allow_complex=True, complex_to_float=False):
 
         W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity,
         return current_guess
     elif space.isinstance_w(w_obj, space.w_str):
         if (current_guess is None):
-            return interp_dtype.variable_dtype(space, 
+            return interp_dtype.variable_dtype(space,
                                                'S%d' % space.len_w(w_obj))
         elif current_guess.num ==18:
             if  current_guess.itemtype.get_size() < space.len_w(w_obj):
-                return interp_dtype.variable_dtype(space, 
+                return interp_dtype.variable_dtype(space,
                                                    'S%d' % space.len_w(w_obj))
         return current_guess
     if current_guess is complex_type:
             ("negative", "neg", 1),
             ("absolute", "abs", 1, {"complex_to_float": True}),
             ("sign", "sign", 1, {"promote_bools": True}),
-            ("signbit", "signbit", 1, {"bool_result": True, 
+            ("signbit", "signbit", 1, {"bool_result": True,
                                        "allow_complex": False}),
             ("reciprocal", "reciprocal", 1),
             ("conjugate", "conj", 1),
                                  "allow_complex": False}),
             ("fmax", "fmax", 2, {"promote_to_float": True}),
             ("fmin", "fmin", 2, {"promote_to_float": True}),
-            ("fmod", "fmod", 2, {"promote_to_float": True, 
+            ("fmod", "fmod", 2, {"promote_to_float": True,
                                  'allow_complex': False}),
             ("floor", "floor", 1, {"promote_to_float": True,
                                    "allow_complex": False}),

File pypy/module/micronumpy/loop.py

 
 def getitem_filter(res, arr, index):
     res_iter = res.create_iter()
-    index_iter = index.create_iter()
+    shapelen = len(arr.get_shape())
+    if shapelen > 1 and len(index.get_shape()) < 2:
+        index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True)
+    else:
+        index_iter = index.create_iter()
     arr_iter = arr.create_iter()
-    shapelen = len(arr.get_shape())
     arr_dtype = arr.get_dtype()
     index_dtype = index.get_dtype()
     # XXX length of shape of index as well?

File pypy/module/micronumpy/strides.py

     rshape += shape[s:]
     return rshape, rstart, rstrides, rbackstrides
 
-def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape):
+def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape, backwards=False):
     rstrides = []
     rbackstrides = []
     for i in range(len(orig_shape)):
         else:
             rstrides.append(strides[i])
             rbackstrides.append(backstrides[i])
-    rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides
-    rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides
+    if backwards:
+        rstrides = rstrides + [0] * (len(res_shape) - len(orig_shape))  
+        rbackstrides = rbackstrides + [0] * (len(res_shape) - len(orig_shape)) 
+    else:
+        rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides
+        rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides
     return rstrides, rbackstrides
 
 def is_single_elem(space, w_elem, is_rec_type):

File pypy/module/micronumpy/test/test_complex.py

     def test_fmax(self):
         from numpypy import fmax, array
         nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf')
-        a = array((complex(ninf, 10), complex(10, ninf), 
+        a = array((complex(ninf, 10), complex(10, ninf),
                    complex( inf, 10), complex(10,  inf),
                    5+5j, 5-5j, -5+5j, -5-5j,
                    0+5j, 0-5j, 5, -5,
                    complex(nan, 0), complex(0, nan)), dtype = complex)
         b = [ninf]*a.size
-        res = [a[0 ], a[1 ], a[2 ], a[3 ], 
+        res = [a[0 ], a[1 ], a[2 ], a[3 ],
                a[4 ], a[5 ], a[6 ], a[7 ],
                a[8 ], a[9 ], a[10], a[11],
                b[12], b[13]]
         assert (fmax(a, b) == res).all()
         b = [inf]*a.size
-        res = [b[0 ], b[1 ], a[2 ], b[3 ], 
+        res = [b[0 ], b[1 ], a[2 ], b[3 ],
                b[4 ], b[5 ], b[6 ], b[7 ],
                b[8 ], b[9 ], b[10], b[11],
                b[12], b[13]]
         assert (fmax(a, b) == res).all()
         b = [0]*a.size
-        res = [b[0 ], a[1 ], a[2 ], a[3 ], 
+        res = [b[0 ], a[1 ], a[2 ], a[3 ],
                a[4 ], a[5 ], b[6 ], b[7 ],
                a[8 ], b[9 ], a[10], b[11],
                b[12], b[13]]
     def test_fmin(self):
         from numpypy import fmin, array
         nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf')
-        a = array((complex(ninf, 10), complex(10, ninf), 
+        a = array((complex(ninf, 10), complex(10, ninf),
                    complex( inf, 10), complex(10,  inf),
                    5+5j, 5-5j, -5+5j, -5-5j,
                    0+5j, 0-5j, 5, -5,
                    complex(nan, 0), complex(0, nan)), dtype = complex)
         b = [inf]*a.size
-        res = [a[0 ], a[1 ], b[2 ], a[3 ], 
+        res = [a[0 ], a[1 ], b[2 ], a[3 ],
                a[4 ], a[5 ], a[6 ], a[7 ],
                a[8 ], a[9 ], a[10], a[11],
                b[12], b[13]]
         assert (fmin(a, b) == res).all()
         b = [ninf]*a.size
-        res = [b[0 ], b[1 ], b[2 ], b[3 ], 
+        res = [b[0 ], b[1 ], b[2 ], b[3 ],
                b[4 ], b[5 ], b[6 ], b[7 ],
                b[8 ], b[9 ], b[10], b[11],
                b[12], b[13]]
         assert (fmin(a, b) == res).all()
         b = [0]*a.size
-        res = [a[0 ], b[1 ], b[2 ], b[3 ], 
+        res = [a[0 ], b[1 ], b[2 ], b[3 ],
                b[4 ], b[5 ], a[6 ], a[7 ],
                b[8 ], a[9 ], b[10], a[11],
                b[12], b[13]]
             pass # no longdouble yet
         inf = float('inf')
         nan = float('nan')
-        #complex    
-        orig = [2.+4.j, -2.+4.j, 2.-4.j, -2.-4.j, 
-                complex(inf, 3), complex(inf, -3), complex(inf, -inf), 
+        #complex
+        orig = [2.+4.j, -2.+4.j, 2.-4.j, -2.-4.j,
+                complex(inf, 3), complex(inf, -3), complex(inf, -inf),
                 complex(nan, 3), 0+0j, 0-0j]
         a2 = 2.**2 + 4.**2
         r = 2. / a2
         i = 4. / a2
         cnan = complex(nan, nan)
-        expected = [complex(r, -i), complex(-r, -i), complex(r, i), 
-                    complex(-r, i), 
-                    -0j, 0j, cnan, 
+        expected = [complex(r, -i), complex(-r, -i), complex(r, i),
+                    complex(-r, i),
+                    -0j, 0j, cnan,
                     cnan, cnan, cnan]
         for c, rel_err in c_and_relerr:
             actual = reciprocal(array([orig], dtype=c))
 
     def test_floorceiltrunc(self):
         from numpypy import array, floor, ceil, trunc
-        a = array([ complex(-1.4, -1.4), complex(-1.5, -1.5)]) 
+        a = array([ complex(-1.4, -1.4), complex(-1.5, -1.5)])
         raises(TypeError, floor, a)
         raises(TypeError, ceil, a)
         raises(TypeError, trunc, a)
                             (c,a[i], b[i], res)
                 # cast untranslated boxed results to float,
                 # does no harm when translated
-                t1 = float(res[0])        
-                t2 = float(b[i].real)        
+                t1 = float(res[0])
+                t2 = float(b[i].real)
                 self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
-                t1 = float(res[1])        
-                t2 = float(b[i].imag)        
+                t1 = float(res[1])
+                t2 = float(b[i].imag)
                 self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
 
     def test_expm1(self):
                             (c,a[i], b[i], res)
                 # cast untranslated boxed results to float,
                 # does no harm when translated
-                t1 = float(res.real)        
-                t2 = float(b[i].real)        
+                t1 = float(res.real)
+                t2 = float(b[i].real)
                 self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
-                t1 = float(res.imag)        
-                t2 = float(b[i].imag)        
+                t1 = float(res.imag)
+                t2 = float(b[i].imag)
                 self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
 
     def test_not_complex(self):
         raises(TypeError, logaddexp, complex(1, 1), complex(3, 3))
         raises(TypeError, logaddexp2, complex(1, 1), complex(3, 3))
         raises(TypeError, arctan2, complex(1, 1), complex(3, 3))
-        raises (TypeError, fmod, complex(90,90), 3) 
+        raises (TypeError, fmod, complex(90,90), 3)
 
     def test_isnan_isinf(self):
         from numpypy import isnan, isinf, array
-        assert (isnan(array([0.2+2j, complex(float('inf'),0), 
+        assert (isnan(array([0.2+2j, complex(float('inf'),0),
                 complex(0,float('inf')), complex(0,float('nan')),
                 complex(float('nan'), 0)], dtype=complex)) == \
                 [False, False, False, True, True]).all()
 
-        assert (isinf(array([0.2+2j, complex(float('inf'),0), 
+        assert (isinf(array([0.2+2j, complex(float('inf'),0),
                 complex(0,float('inf')), complex(0,float('nan')),
                 complex(float('nan'), 0)], dtype=complex)) == \
                 [False, True, True, False, False]).all()
                 b = power(a, p)
                 for i in range(len(a)):
                     try:
-                        r = self.c_pow((float(a[i].real), float(a[i].imag)), 
+                        r = self.c_pow((float(a[i].real), float(a[i].imag)),
                                 (float(p.real), float(p.imag)))
                     except ZeroDivisionError:
                         r = (nan, nan)
                         r = (nan, nan)
                     msg = 'result of %r(%r)**%r got %r expected %r\n ' % \
                             (c,a[i], p, b[i], r)
-                    t1 = float(r[0])        
-                    t2 = float(b[i].real)        
+                    t1 = float(r[0])
+                    t2 = float(b[i].real)
                     self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
-                    t1 = float(r[1])        
+                    t1 = float(r[1])
                     t2 = float(b[i].imag)
                     self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
 
                             (c,a[i], b[i], res)
                 # cast untranslated boxed results to float,
                 # does no harm when translated
-                t1 = float(res.real)        
-                t2 = float(b[i].real)        
+                t1 = float(res.real)
+                t2 = float(b[i].real)
                 self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
-                t1 = float(res.imag)        
-                t2 = float(b[i].imag)        
+                t1 = float(res.imag)
+                t2 = float(b[i].imag)
                 self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
         for c,rel_err in ((complex128, 2e-15), (complex64, 1e-7)):
             b = log1p(array(a,dtype=c))
                             (c,a[i], b[i], res)
                 # cast untranslated boxed results to float,
                 # does no harm when translated
-                t1 = float(res.real)        
-                t2 = float(b[i].real)        
+                t1 = float(res.real)
+                t2 = float(b[i].real)
                 self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
-                t1 = float(res.imag)        
-                t2 = float(b[i].imag)        
+                t1 = float(res.imag)
+                t2 = float(b[i].imag)
                 self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
 
     def test_logical_ops(self):
         assert imag(0.0) == 0.0
         a = array([complex(3.0, 4.0)])
         b = a.real
+        b[0] = 1024
+        assert a[0].real == 1024
         assert b.dtype == dtype(float)
+        a = array(complex(3.0, 4.0))
+        b = a.real
+        assert b == array(3)
+        a.real = 1024
+        assert a.real == 1024 
+        assert a.imag == array(4)
+        assert b.dtype == dtype(float)
+        a = array(4.0)
+        b = a.imag
+        assert b == 0
+        assert b.dtype == dtype(float)
+        raises(TypeError, 'a.imag = 1024')
+        raises(ValueError, 'a.real = [1, 3]')
+        a = array('abc')