Commits

Maciej Fijalkowski committed 95b54cd

import numpy

Comments (2)

  1. mattip

    No. this repo is abandoned, as it says on the top of the main page "import of numpy's pure python part. BY NOW THIS IS UNMAINTAINED AND DOESN'T DO ANYTHING USEFUL". If you are interested in adding the pure python part of numpy to pypy, please search for the pypy-hack, which can currently be found on github/rlamy,or github/mattip, but may move in the future. It builds on the pypy numpypy builtin module which supplies the equivalent of numpy's c-level multiarray and dtypes, adding in as much of the rest of numpy as we can deal with right now. Help is welcome, in particular enabling the c-api fft should just work, but needs hacking the setup.py to acutally build the module

Files changed (249)

+syntax: glob
+*.py[co]
+*~
+.*.swp

numpypy/__config__.py

+# This file is generated by /home/fijal/src/numpy/setup.py
+# It contains system_info results at the time of building this package.
+__all__ = ["get_info","show"]
+
+blas_info={'libraries': ['blas'], 'library_dirs': ['/usr/lib64'], 'language': 'f77'}
+lapack_info={'libraries': ['lapack'], 'library_dirs': ['/usr/lib64'], 'language': 'f77'}
+atlas_threads_info={}
+blas_opt_info={'libraries': ['blas'], 'library_dirs': ['/usr/lib64'], 'define_macros': [('NO_ATLAS_INFO', 1)], 'language': 'f77'}
+atlas_blas_threads_info={}
+lapack_opt_info={'libraries': ['lapack', 'blas'], 'library_dirs': ['/usr/lib64'], 'define_macros': [('NO_ATLAS_INFO', 1)], 'language': 'f77'}
+atlas_info={}
+lapack_mkl_info={}
+blas_mkl_info={}
+atlas_blas_info={}
+mkl_info={}
+
+def get_info(name):
+    g = globals()
+    return g.get(name, g.get(name + "_info", {}))
+
+def show():
+    for name,info_dict in globals().items():
+        if name[0] == "_" or type(info_dict) is not type({}): continue
+        print(name + ":")
+        if not info_dict:
+            print("  NOT AVAILABLE")
+        for k,v in info_dict.items():
+            v = str(v)
+            if k == "sources" and len(v) > 200:
+                v = v[:60] + " ...\n... " + v[-60:]
+            print("    %s = %s" % (k,v))
+    

numpypy/__init__.py

+"""
+NumPy
+=====
+
+Provides
+  1. An array object of arbitrary homogeneous items
+  2. Fast mathematical operations over arrays
+  3. Linear Algebra, Fourier Transforms, Random Number Generation
+
+How to use the documentation
+----------------------------
+Documentation is available in two forms: docstrings provided
+with the code, and a loose standing reference guide, available from
+`the NumPy homepage <http://www.scipy.org>`_.
+
+We recommend exploring the docstrings using
+`IPython <http://ipython.scipy.org>`_, an advanced Python shell with
+TAB-completion and introspection capabilities.  See below for further
+instructions.
+
+The docstring examples assume that `numpy` has been imported as `np`::
+
+  >>> import numpy as np
+
+Code snippets are indicated by three greater-than signs::
+
+  >>> x = 42
+  >>> x = x + 1
+
+Use the built-in ``help`` function to view a function's docstring::
+
+  >>> help(np.sort)
+  ... # doctest: +SKIP
+
+For some objects, ``np.info(obj)`` may provide additional help.  This is
+particularly true if you see the line "Help on ufunc object:" at the top
+of the help() page.  Ufuncs are implemented in C, not Python, for speed.
+The native Python help() does not know how to view their help, but our
+np.info() function does.
+
+To search for documents containing a keyword, do::
+
+  >>> np.lookfor('keyword')
+  ... # doctest: +SKIP
+
+General-purpose documents like a glossary and help on the basic concepts
+of numpy are available under the ``doc`` sub-module::
+
+  >>> from numpy import doc
+  >>> help(doc)
+  ... # doctest: +SKIP
+
+Available subpackages
+---------------------
+doc
+    Topical documentation on broadcasting, indexing, etc.
+lib
+    Basic functions used by several sub-packages.
+random
+    Core Random Tools
+linalg
+    Core Linear Algebra Tools
+fft
+    Core FFT routines
+polynomial
+    Polynomial tools
+testing
+    Numpy testing tools
+f2py
+    Fortran to Python Interface Generator.
+distutils
+    Enhancements to distutils with support for
+    Fortran compilers support and more.
+
+Utilities
+---------
+test
+    Run numpy unittests
+show_config
+    Show numpy build configuration
+dual
+    Overwrite certain functions with high-performance Scipy tools
+matlib
+    Make everything matrices.
+__version__
+    Numpy version string
+
+Viewing documentation using IPython
+-----------------------------------
+Start IPython with the NumPy profile (``ipython -p numpy``), which will
+import `numpy` under the alias `np`.  Then, use the ``cpaste`` command to
+paste examples into the shell.  To see which functions are available in
+`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
+``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
+down the list.  To view the docstring for a function, use
+``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
+the source code).
+
+Copies vs. in-place operation
+-----------------------------
+Most of the functions in `numpy` return a copy of the array argument
+(e.g., `np.sort`).  In-place versions of these functions are often
+available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
+Exceptions to this rule are documented.
+
+"""
+
+# We first need to detect if we're being called as part of the numpy setup
+# procedure itself in a reliable manner.
+try:
+    __NUMPY_SETUP__
+except NameError:
+    __NUMPY_SETUP__ = False
+
+
+if __NUMPY_SETUP__:
+    import sys as _sys
+    _sys.stderr.write('Running from numpy source directory.\n')
+    del _sys
+else:
+    try:
+        from numpy.__config__ import show as show_config
+    except ImportError:
+        msg = """Error importing numpy: you should not try to import numpy from
+        its source directory; please exit the numpy source tree, and relaunch
+        your python intepreter from there."""
+        raise ImportError(msg)
+    from version import git_revision as __git_revision__
+    from version import version as __version__
+
+    from _import_tools import PackageLoader
+
+    def pkgload(*packages, **options):
+        loader = PackageLoader(infunc=True)
+        return loader(*packages, **options)
+
+    import add_newdocs
+    __all__ = ['add_newdocs']
+
+    pkgload.__doc__ = PackageLoader.__call__.__doc__
+
+    from testing import Tester
+    test = Tester().test
+    bench = Tester().bench
+
+    import core
+    from core import *
+    import compat
+    import lib
+    from lib import *
+    import linalg
+    import fft
+    import polynomial
+    import random
+    import ctypeslib
+    import ma
+    import matrixlib as _mat
+    from matrixlib import *
+
+    # Make these accessible from numpy name-space
+    #  but not imported in from numpy import *
+    from __builtin__ import bool, int, long, float, complex, \
+         object, unicode, str
+    from core import round, abs, max, min
+
+    __all__.extend(['__version__', 'pkgload', 'PackageLoader',
+               'show_config'])
+    __all__.extend(core.__all__)
+    __all__.extend(_mat.__all__)
+    __all__.extend(lib.__all__)
+    __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])

numpypy/_import_tools.py

+import os
+import sys
+
+__all__ = ['PackageLoader']
+
+class PackageLoader(object):
+    def __init__(self, verbose=False, infunc=False):
+        """ Manages loading packages.
+        """
+
+        if infunc:
+            _level = 2
+        else:
+            _level = 1
+        self.parent_frame = frame = sys._getframe(_level)
+        self.parent_name = eval('__name__',frame.f_globals,frame.f_locals)
+        parent_path = eval('__path__',frame.f_globals,frame.f_locals)
+        if isinstance(parent_path, str):
+            parent_path = [parent_path]
+        self.parent_path = parent_path
+        if '__all__' not in frame.f_locals:
+            exec('__all__ = []',frame.f_globals,frame.f_locals)
+        self.parent_export_names = eval('__all__',frame.f_globals,frame.f_locals)
+
+        self.info_modules = {}
+        self.imported_packages = []
+        self.verbose = None
+
+    def _get_info_files(self, package_dir, parent_path, parent_package=None):
+        """ Return list of (package name,info.py file) from parent_path subdirectories.
+        """
+        from glob import glob
+        files = glob(os.path.join(parent_path,package_dir,'info.py'))
+        for info_file in glob(os.path.join(parent_path,package_dir,'info.pyc')):
+            if info_file[:-1] not in files:
+                files.append(info_file)
+        info_files = []
+        for info_file in files:
+            package_name = os.path.dirname(info_file[len(parent_path)+1:])\
+                           .replace(os.sep,'.')
+            if parent_package:
+                package_name = parent_package + '.' + package_name
+            info_files.append((package_name,info_file))
+            info_files.extend(self._get_info_files('*',
+                                                   os.path.dirname(info_file),
+                                                   package_name))
+        return info_files
+
+    def _init_info_modules(self, packages=None):
+        """Initialize info_modules = {<package_name>: <package info.py module>}.
+        """
+        import imp
+        info_files = []
+        info_modules = self.info_modules
+
+        if packages is None:
+            for path in self.parent_path:
+                info_files.extend(self._get_info_files('*',path))
+        else:
+            for package_name in packages:
+                package_dir = os.path.join(*package_name.split('.'))
+                for path in self.parent_path:
+                    names_files = self._get_info_files(package_dir, path)
+                    if names_files:
+                        info_files.extend(names_files)
+                        break
+                else:
+                    try:
+                        exec 'import %s.info as info' % (package_name)
+                        info_modules[package_name] = info
+                    except ImportError, msg:
+                        self.warn('No scipy-style subpackage %r found in %s. '\
+                                  'Ignoring: %s'\
+                                  % (package_name,':'.join(self.parent_path), msg))
+
+        for package_name,info_file in info_files:
+            if package_name in info_modules:
+                continue
+            fullname = self.parent_name +'.'+ package_name
+            if info_file[-1]=='c':
+                filedescriptor = ('.pyc','rb',2)
+            else:
+                filedescriptor = ('.py','U',1)
+
+            try:
+                info_module = imp.load_module(fullname+'.info',
+                                              open(info_file,filedescriptor[1]),
+                                              info_file,
+                                              filedescriptor)
+            except Exception,msg:
+                self.error(msg)
+                info_module = None
+
+            if info_module is None or getattr(info_module,'ignore',False):
+                info_modules.pop(package_name,None)
+            else:
+                self._init_info_modules(getattr(info_module,'depends',[]))
+                info_modules[package_name] = info_module
+
+        return
+
+    def _get_sorted_names(self):
+        """ Return package names sorted in the order as they should be
+        imported due to dependence relations between packages.
+        """
+
+        depend_dict = {}
+        for name,info_module in self.info_modules.items():
+            depend_dict[name] = getattr(info_module,'depends',[])
+        package_names = []
+
+        for name in depend_dict.keys():
+            if not depend_dict[name]:
+                package_names.append(name)
+                del depend_dict[name]
+
+        while depend_dict:
+            for name, lst in depend_dict.items():
+                new_lst = [n for n in lst if n in depend_dict]
+                if not new_lst:
+                    package_names.append(name)
+                    del depend_dict[name]
+                else:
+                    depend_dict[name] = new_lst
+
+        return package_names
+
+    def __call__(self,*packages, **options):
+        """Load one or more packages into parent package top-level namespace.
+
+       This function is intended to shorten the need to import many
+       subpackages, say of scipy, constantly with statements such as
+
+         import scipy.linalg, scipy.fftpack, scipy.etc...
+
+       Instead, you can say:
+
+         import scipy
+         scipy.pkgload('linalg','fftpack',...)
+
+       or
+
+         scipy.pkgload()
+
+       to load all of them in one call.
+
+       If a name which doesn't exist in scipy's namespace is
+       given, a warning is shown.
+
+       Parameters
+       ----------
+        *packages : arg-tuple
+             the names (one or more strings) of all the modules one
+             wishes to load into the top-level namespace.
+        verbose= : integer
+             verbosity level [default: -1].
+             verbose=-1 will suspend also warnings.
+        force= : bool
+             when True, force reloading loaded packages [default: False].
+        postpone= : bool
+             when True, don't load packages [default: False]
+
+     """
+        frame = self.parent_frame
+        self.info_modules = {}
+        if options.get('force',False):
+            self.imported_packages = []
+        self.verbose = verbose = options.get('verbose',-1)
+        postpone = options.get('postpone',None)
+        self._init_info_modules(packages or None)
+
+        self.log('Imports to %r namespace\n----------------------------'\
+                 % self.parent_name)
+
+        for package_name in self._get_sorted_names():
+            if package_name in self.imported_packages:
+                continue
+            info_module = self.info_modules[package_name]
+            global_symbols = getattr(info_module,'global_symbols',[])
+            postpone_import = getattr(info_module,'postpone_import',False)
+            if (postpone and not global_symbols) \
+                   or (postpone_import and postpone is not None):
+                continue
+
+            old_object = frame.f_locals.get(package_name,None)
+
+            cmdstr = 'import '+package_name
+            if self._execcmd(cmdstr):
+                continue
+            self.imported_packages.append(package_name)
+
+            if verbose!=-1:
+                new_object = frame.f_locals.get(package_name)
+                if old_object is not None and old_object is not new_object:
+                    self.warn('Overwriting %s=%s (was %s)' \
+                              % (package_name,self._obj2repr(new_object),
+                                 self._obj2repr(old_object)))
+
+            if '.' not in package_name:
+                self.parent_export_names.append(package_name)
+
+            for symbol in global_symbols:
+                if symbol=='*':
+                    symbols = eval('getattr(%s,"__all__",None)'\
+                                   % (package_name),
+                                   frame.f_globals,frame.f_locals)
+                    if symbols is None:
+                        symbols = eval('dir(%s)' % (package_name),
+                                       frame.f_globals,frame.f_locals)
+                        symbols = filter(lambda s:not s.startswith('_'),symbols)
+                else:
+                    symbols = [symbol]
+
+                if verbose!=-1:
+                    old_objects = {}
+                    for s in symbols:
+                        if s in frame.f_locals:
+                            old_objects[s] = frame.f_locals[s]
+
+                cmdstr = 'from '+package_name+' import '+symbol
+                if self._execcmd(cmdstr):
+                    continue
+
+                if verbose!=-1:
+                    for s,old_object in old_objects.items():
+                        new_object = frame.f_locals[s]
+                        if new_object is not old_object:
+                            self.warn('Overwriting %s=%s (was %s)' \
+                                      % (s,self._obj2repr(new_object),
+                                         self._obj2repr(old_object)))
+
+                if symbol=='*':
+                    self.parent_export_names.extend(symbols)
+                else:
+                    self.parent_export_names.append(symbol)
+
+        return
+
+    def _execcmd(self,cmdstr):
+        """ Execute command in parent_frame."""
+        frame = self.parent_frame
+        try:
+            exec (cmdstr, frame.f_globals,frame.f_locals)
+        except Exception,msg:
+            self.error('%s -> failed: %s' % (cmdstr,msg))
+            return True
+        else:
+            self.log('%s -> success' % (cmdstr))
+        return
+
+    def _obj2repr(self,obj):
+        """ Return repr(obj) with"""
+        module = getattr(obj,'__module__',None)
+        file = getattr(obj,'__file__',None)
+        if module is not None:
+            return repr(obj) + ' from ' + module
+        if file is not None:
+            return repr(obj) + ' from ' + file
+        return repr(obj)
+
+    def log(self,mess):
+        if self.verbose>1:
+            print >> sys.stderr, str(mess)
+    def warn(self,mess):
+        if self.verbose>=0:
+            print >> sys.stderr, str(mess)
+    def error(self,mess):
+        if self.verbose!=-1:
+            print >> sys.stderr, str(mess)
+
+    def _get_doc_title(self, info_module):
+        """ Get the title from a package info.py file.
+        """
+        title = getattr(info_module,'__doc_title__',None)
+        if title is not None:
+            return title
+        title = getattr(info_module,'__doc__',None)
+        if title is not None:
+            title = title.lstrip().split('\n',1)[0]
+            return title
+        return '* Not Available *'
+
+    def _format_titles(self,titles,colsep='---'):
+        display_window_width = 70 # How to determine the correct value in runtime??
+        lengths = [len(name)-name.find('.')-1 for (name,title) in titles]+[0]
+        max_length = max(lengths)
+        lines = []
+        for (name,title) in titles:
+            name = name[name.find('.')+1:]
+            w = max_length - len(name)
+            words = title.split()
+            line = '%s%s %s' % (name,w*' ',colsep)
+            tab = len(line) * ' '
+            while words:
+                word = words.pop(0)
+                if len(line)+len(word)>display_window_width:
+                    lines.append(line)
+                    line = tab
+                line += ' ' + word
+            else:
+                lines.append(line)
+        return '\n'.join(lines)
+
+    def get_pkgdocs(self):
+        """ Return documentation summary of subpackages.
+        """
+        import sys
+        self.info_modules = {}
+        self._init_info_modules(None)
+
+        titles = []
+        symbols = []
+        for package_name, info_module in self.info_modules.items():
+            global_symbols = getattr(info_module,'global_symbols',[])
+            fullname = self.parent_name +'.'+ package_name
+            note = ''
+            if fullname not in sys.modules:
+                note = ' [*]'
+            titles.append((fullname,self._get_doc_title(info_module) + note))
+            if global_symbols:
+                symbols.append((package_name,', '.join(global_symbols)))
+
+        retstr = self._format_titles(titles) +\
+               '\n  [*] - using a package requires explicit import (see pkgload)'
+
+
+        if symbols:
+            retstr += """\n\nGlobal symbols from subpackages"""\
+                      """\n-------------------------------\n""" +\
+                      self._format_titles(symbols,'-->')
+
+        return retstr
+
+class PackageLoaderDebug(PackageLoader):
+    def _execcmd(self,cmdstr):
+        """ Execute command in parent_frame."""
+        frame = self.parent_frame
+        print 'Executing',`cmdstr`,'...',
+        sys.stdout.flush()
+        exec (cmdstr, frame.f_globals,frame.f_locals)
+        print 'ok'
+        sys.stdout.flush()
+        return
+
+if int(os.environ.get('NUMPY_IMPORT_DEBUG','0')):
+    PackageLoader = PackageLoaderDebug

numpypy/add_newdocs.py

+# This is only meant to add docs to objects defined in C-extension modules.
+# The purpose is to allow easier editing of the docstrings without
+# requiring a re-compile.
+
+# NOTE: Many of the methods of ndarray have corresponding functions.
+#       If you update these docstrings, please keep also the ones in
+#       core/fromnumeric.py, core/defmatrix.py up-to-date.
+
+from numpy.lib import add_newdoc
+
+###############################################################################
+#
+# flatiter
+#
+# flatiter needs a toplevel description
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'flatiter',
+    """
+    Flat iterator object to iterate over arrays.
+
+    A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
+    It allows iterating over the array as if it were a 1-D array,
+    either in a for-loop or by calling its `next` method.
+
+    Iteration is done in C-contiguous style, with the last index varying the
+    fastest. The iterator can also be indexed using basic slicing or
+    advanced indexing.
+
+    See Also
+    --------
+    ndarray.flat : Return a flat iterator over an array.
+    ndarray.flatten : Returns a flattened copy of an array.
+
+    Notes
+    -----
+    A `flatiter` iterator can not be constructed directly from Python code
+    by calling the `flatiter` constructor.
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2, 3)
+    >>> fl = x.flat
+    >>> type(fl)
+    <type 'numpy.flatiter'>
+    >>> for item in fl:
+    ...     print item
+    ...
+    0
+    1
+    2
+    3
+    4
+    5
+
+    >>> fl[2:4]
+    array([2, 3])
+
+    """)
+
+# flatiter attributes
+
+add_newdoc('numpy.core', 'flatiter', ('base',
+    """
+    A reference to the array that is iterated over.
+
+    Examples
+    --------
+    >>> x = np.arange(5)
+    >>> fl = x.flat
+    >>> fl.base is x
+    True
+
+    """))
+
+
+
+add_newdoc('numpy.core', 'flatiter', ('coords',
+    """
+    An N-dimensional tuple of current coordinates.
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2, 3)
+    >>> fl = x.flat
+    >>> fl.coords
+    (0, 0)
+    >>> fl.next()
+    0
+    >>> fl.coords
+    (0, 1)
+
+    """))
+
+
+
+add_newdoc('numpy.core', 'flatiter', ('index',
+    """
+    Current flat index into the array.
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2, 3)
+    >>> fl = x.flat
+    >>> fl.index
+    0
+    >>> fl.next()
+    0
+    >>> fl.index
+    1
+
+    """))
+
+# flatiter functions
+
+add_newdoc('numpy.core', 'flatiter', ('__array__',
+    """__array__(type=None) Get array from iterator
+
+    """))
+
+
+add_newdoc('numpy.core', 'flatiter', ('copy',
+    """
+    copy()
+
+    Get a copy of the iterator as a 1-D array.
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2, 3)
+    >>> x
+    array([[0, 1, 2],
+           [3, 4, 5]])
+    >>> fl = x.flat
+    >>> fl.copy()
+    array([0, 1, 2, 3, 4, 5])
+
+    """))
+
+
+###############################################################################
+#
+# nditer
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'nditer',
+    """
+    Efficient multi-dimensional iterator object to iterate over arrays.
+    To get started using this object, see the
+    :ref:`introductory guide to array iteration <arrays.nditer>`.
+
+    Parameters
+    ----------
+    op : ndarray or sequence of array_like
+        The array(s) to iterate over.
+    flags : sequence of str, optional
+        Flags to control the behavior of the iterator.
+
+          * "buffered" enables buffering when required.
+          * "c_index" causes a C-order index to be tracked.
+          * "f_index" causes a Fortran-order index to be tracked.
+          * "multi_index" causes a multi-index, or a tuple of indices
+            with one per iteration dimension, to be tracked.
+          * "common_dtype" causes all the operands to be converted to
+            a common data type, with copying or buffering as necessary.
+          * "delay_bufalloc" delays allocation of the buffers until
+            a reset() call is made. Allows "allocate" operands to
+            be initialized before their values are copied into the buffers.
+          * "external_loop" causes the `values` given to be
+            one-dimensional arrays with multiple values instead of
+            zero-dimensional arrays.
+          * "grow_inner" allows the `value` array sizes to be made
+            larger than the buffer size when both "buffered" and
+            "external_loop" is used.
+          * "ranged" allows the iterator to be restricted to a sub-range
+            of the iterindex values.
+          * "refs_ok" enables iteration of reference types, such as
+            object arrays.
+          * "reduce_ok" enables iteration of "readwrite" operands
+            which are broadcasted, also known as reduction operands.
+          * "zerosize_ok" allows `itersize` to be zero.
+    op_flags : list of list of str, optional
+        This is a list of flags for each operand. At minimum, one of
+        "readonly", "readwrite", or "writeonly" must be specified.
+
+          * "readonly" indicates the operand will only be read from.
+          * "readwrite" indicates the operand will be read from and written to.
+          * "writeonly" indicates the operand will only be written to.
+          * "no_broadcast" prevents the operand from being broadcasted.
+          * "contig" forces the operand data to be contiguous.
+          * "aligned" forces the operand data to be aligned.
+          * "nbo" forces the operand data to be in native byte order.
+          * "copy" allows a temporary read-only copy if required.
+          * "updateifcopy" allows a temporary read-write copy if required.
+          * "allocate" causes the array to be allocated if it is None
+            in the `op` parameter.
+          * "no_subtype" prevents an "allocate" operand from using a subtype.
+          * "arraymask" indicates that this operand is the mask to use
+            for selecting elements when writing to operands with the
+            'writemasked' flag set. The iterator does not enforce this,
+            but when writing from a buffer back to the array, it only
+            copies those elements indicated by this mask.
+          * 'writemasked' indicates that only elements where the chosen
+            'arraymask' operand is True will be written to.
+          * 'use_maskna' indicates that this operand should be treated
+            like an NA-masked array.
+    op_dtypes : dtype or tuple of dtype(s), optional
+        The required data type(s) of the operands. If copying or buffering
+        is enabled, the data will be converted to/from their original types.
+    order : {'C', 'F', 'A', or 'K'}, optional
+        Controls the iteration order. 'C' means C order, 'F' means
+        Fortran order, 'A' means 'F' order if all the arrays are Fortran
+        contiguous, 'C' order otherwise, and 'K' means as close to the
+        order the array elements appear in memory as possible. This also
+        affects the element memory order of "allocate" operands, as they
+        are allocated to be compatible with iteration order.
+        Default is 'K'.
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur when making a copy
+        or buffering.  Setting this to 'unsafe' is not recommended,
+        as it can adversely affect accumulations.
+
+          * 'no' means the data types should not be cast at all.
+          * 'equiv' means only byte-order changes are allowed.
+          * 'safe' means only casts which can preserve values are allowed.
+          * 'same_kind' means only safe casts or casts within a kind,
+            like float64 to float32, are allowed.
+          * 'unsafe' means any data conversions may be done.
+    op_axes : list of list of ints, optional
+        If provided, is a list of ints or None for each operands.
+        The list of axes for an operand is a mapping from the dimensions
+        of the iterator to the dimensions of the operand. A value of
+        -1 can be placed for entries, causing that dimension to be
+        treated as "newaxis".
+    itershape : tuple of ints, optional
+        The desired shape of the iterator. This allows "allocate" operands
+        with a dimension mapped by op_axes not corresponding to a dimension
+        of a different operand to get a value not equal to 1 for that
+        dimension.
+    buffersize : int, optional
+        When buffering is enabled, controls the size of the temporary
+        buffers. Set to 0 for the default value.
+
+    Attributes
+    ----------
+    dtypes : tuple of dtype(s)
+        The data types of the values provided in `value`. This may be
+        different from the operand data types if buffering is enabled.
+    finished : bool
+        Whether the iteration over the operands is finished or not.
+    has_delayed_bufalloc : bool
+        If True, the iterator was created with the "delay_bufalloc" flag,
+        and no reset() function was called on it yet.
+    has_index : bool
+        If True, the iterator was created with either the "c_index" or
+        the "f_index" flag, and the property `index` can be used to
+        retrieve it.
+    has_multi_index : bool
+        If True, the iterator was created with the "multi_index" flag,
+        and the property `multi_index` can be used to retrieve it.
+    index :
+        When the "c_index" or "f_index" flag was used, this property
+        provides access to the index. Raises a ValueError if accessed
+        and `has_index` is False.
+    iterationneedsapi : bool
+        Whether iteration requires access to the Python API, for example
+        if one of the operands is an object array.
+    iterindex : int
+        An index which matches the order of iteration.
+    itersize : int
+        Size of the iterator.
+    itviews :
+        Structured view(s) of `operands` in memory, matching the reordered
+        and optimized iterator access pattern.
+    multi_index :
+        When the "multi_index" flag was used, this property
+        provides access to the index. Raises a ValueError if accessed
+        accessed and `has_multi_index` is False.
+    ndim : int
+        The iterator's dimension.
+    nop : int
+        The number of iterator operands.
+    operands : tuple of operand(s)
+        The array(s) to be iterated over.
+    shape : tuple of ints
+        Shape tuple, the shape of the iterator.
+    value :
+        Value of `operands` at current iteration. Normally, this is a
+        tuple of array scalars, but if the flag "external_loop" is used,
+        it is a tuple of one dimensional arrays.
+
+    Notes
+    -----
+    `nditer` supersedes `flatiter`.  The iterator implementation behind
+    `nditer` is also exposed by the Numpy C API.
+
+    The Python exposure supplies two iteration interfaces, one which follows
+    the Python iterator protocol, and another which mirrors the C-style
+    do-while pattern.  The native Python approach is better in most cases, but
+    if you need the iterator's coordinates or index, use the C-style pattern.
+
+    Examples
+    --------
+    Here is how we might write an ``iter_add`` function, using the
+    Python iterator protocol::
+
+        def iter_add_py(x, y, out=None):
+            addop = np.add
+            it = np.nditer([x, y, out], [],
+                        [['readonly'], ['readonly'], ['writeonly','allocate']])
+            for (a, b, c) in it:
+                addop(a, b, out=c)
+            return it.operands[2]
+
+    Here is the same function, but following the C-style pattern::
+
+        def iter_add(x, y, out=None):
+            addop = np.add
+
+            it = np.nditer([x, y, out], [],
+                        [['readonly'], ['readonly'], ['writeonly','allocate']])
+
+            while not it.finished:
+                addop(it[0], it[1], out=it[2])
+                it.iternext()
+
+            return it.operands[2]
+
+    Here is an example outer product function::
+
+        def outer_it(x, y, out=None):
+            mulop = np.multiply
+
+            it = np.nditer([x, y, out], ['external_loop'],
+                    [['readonly'], ['readonly'], ['writeonly', 'allocate']],
+                    op_axes=[range(x.ndim)+[-1]*y.ndim,
+                             [-1]*x.ndim+range(y.ndim),
+                             None])
+
+            for (a, b, c) in it:
+                mulop(a, b, out=c)
+
+            return it.operands[2]
+
+        >>> a = np.arange(2)+1
+        >>> b = np.arange(3)+1
+        >>> outer_it(a,b)
+        array([[1, 2, 3],
+               [2, 4, 6]])
+
+    Here is an example function which operates like a "lambda" ufunc::
+
+        def luf(lamdaexpr, *args, **kwargs):
+            "luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
+            nargs = len(args)
+            op = (kwargs.get('out',None),) + args
+            it = np.nditer(op, ['buffered','external_loop'],
+                    [['writeonly','allocate','no_broadcast']] +
+                                    [['readonly','nbo','aligned']]*nargs,
+                    order=kwargs.get('order','K'),
+                    casting=kwargs.get('casting','safe'),
+                    buffersize=kwargs.get('buffersize',0))
+            while not it.finished:
+                it[0] = lamdaexpr(*it[1:])
+                it.iternext()
+            return it.operands[0]
+
+        >>> a = np.arange(5)
+        >>> b = np.ones(5)
+        >>> luf(lambda i,j:i*i + j/2, a, b)
+        array([  0.5,   1.5,   4.5,   9.5,  16.5])
+
+    """)
+
+# nditer methods
+
+add_newdoc('numpy.core', 'nditer', ('copy',
+    """
+    copy()
+
+    Get a copy of the iterator in its current state.
+
+    Examples
+    --------
+    >>> x = np.arange(10)
+    >>> y = x + 1
+    >>> it = np.nditer([x, y])
+    >>> it.next()
+    (array(0), array(1))
+    >>> it2 = it.copy()
+    >>> it2.next()
+    (array(1), array(2))
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('debug_print',
+    """
+    debug_print()
+
+    Print the current state of the `nditer` instance and debug info to stdout.
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
+    """
+    enable_external_loop()
+
+    When the "external_loop" was not used during construction, but
+    is desired, this modifies the iterator to behave as if the flag
+    was specified.
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('iternext',
+    """
+    iternext()
+
+    Check whether iterations are left, and perform a single internal iteration
+    without returning the result.  Used in the C-style pattern do-while
+    pattern.  For an example, see `nditer`.
+
+    Returns
+    -------
+    iternext : bool
+        Whether or not there are iterations left.
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('remove_axis',
+    """
+    remove_axis(i)
+
+    Removes axis `i` from the iterator. Requires that the flag "multi_index"
+    be enabled.
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
+    """
+    remove_multi_index()
+
+    When the "multi_index" flag was specified, this removes it, allowing
+    the internal iteration structure to be optimized further.
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('reset',
+    """
+    reset()
+
+    Reset the iterator to its initial state.
+
+    """))
+
+
+
+###############################################################################
+#
+# broadcast
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'broadcast',
+    """
+    Produce an object that mimics broadcasting.
+
+    Parameters
+    ----------
+    in1, in2, ... : array_like
+        Input parameters.
+
+    Returns
+    -------
+    b : broadcast object
+        Broadcast the input parameters against one another, and
+        return an object that encapsulates the result.
+        Amongst others, it has ``shape`` and ``nd`` properties, and
+        may be used as an iterator.
+
+    Examples
+    --------
+    Manually adding two vectors, using broadcasting:
+
+    >>> x = np.array([[1], [2], [3]])
+    >>> y = np.array([4, 5, 6])
+    >>> b = np.broadcast(x, y)
+
+    >>> out = np.empty(b.shape)
+    >>> out.flat = [u+v for (u,v) in b]
+    >>> out
+    array([[ 5.,  6.,  7.],
+           [ 6.,  7.,  8.],
+           [ 7.,  8.,  9.]])
+
+    Compare against built-in broadcasting:
+
+    >>> x + y
+    array([[5, 6, 7],
+           [6, 7, 8],
+           [7, 8, 9]])
+
+    """)
+
+# attributes
+
+add_newdoc('numpy.core', 'broadcast', ('index',
+    """
+    current index in broadcasted result
+
+    Examples
+    --------
+    >>> x = np.array([[1], [2], [3]])
+    >>> y = np.array([4, 5, 6])
+    >>> b = np.broadcast(x, y)
+    >>> b.index
+    0
+    >>> b.next(), b.next(), b.next()
+    ((1, 4), (1, 5), (1, 6))
+    >>> b.index
+    3
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('iters',
+    """
+    tuple of iterators along ``self``'s "components."
+
+    Returns a tuple of `numpy.flatiter` objects, one for each "component"
+    of ``self``.
+
+    See Also
+    --------
+    numpy.flatiter
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> row, col = b.iters
+    >>> row.next(), col.next()
+    (1, 4)
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('nd',
+    """
+    Number of dimensions of broadcasted result.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> b.nd
+    2
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('numiter',
+    """
+    Number of iterators possessed by the broadcasted result.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> b.numiter
+    2
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('shape',
+    """
+    Shape of broadcasted result.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> b.shape
+    (3, 3)
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('size',
+    """
+    Total size of broadcasted result.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> b.size
+    9
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('reset',
+    """
+    reset()
+
+    Reset the broadcasted result's iterator(s).
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    None
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]]
+    >>> b = np.broadcast(x, y)
+    >>> b.index
+    0
+    >>> b.next(), b.next(), b.next()
+    ((1, 4), (2, 4), (3, 4))
+    >>> b.index
+    3
+    >>> b.reset()
+    >>> b.index
+    0
+
+    """))
+
+###############################################################################
+#
+# numpy functions
+#
+###############################################################################
+
+add_newdoc('numpy.core.multiarray', 'array',
+    """
+    array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0, maskna=None, ownmaskna=False)
+
+    Create an array.
+
+    Parameters
+    ----------
+    object : array_like
+        An array, any object exposing the array interface, an
+        object whose __array__ method returns an array, or any
+        (nested) sequence.
+    dtype : data-type, optional
+        The desired data-type for the array.  If not given, then
+        the type will be determined as the minimum type required
+        to hold the objects in the sequence.  This argument can only
+        be used to 'upcast' the array.  For downcasting, use the
+        .astype(t) method.
+    copy : bool, optional
+        If true (default), then the object is copied.  Otherwise, a copy
+        will only be made if __array__ returns a copy, if obj is a
+        nested sequence, or if a copy is needed to satisfy any of the other
+        requirements (`dtype`, `order`, etc.).
+    order : {'C', 'F', 'A'}, optional
+        Specify the order of the array.  If order is 'C' (default), then the
+        array will be in C-contiguous order (last-index varies the
+        fastest).  If order is 'F', then the returned array
+        will be in Fortran-contiguous order (first-index varies the
+        fastest).  If order is 'A', then the returned array may
+        be in any order (either C-, Fortran-contiguous, or even
+        discontiguous).
+    subok : bool, optional
+        If True, then sub-classes will be passed-through, otherwise
+        the returned array will be forced to be a base-class array (default).
+    ndmin : int, optional
+        Specifies the minimum number of dimensions that the resulting
+        array should have.  Ones will be pre-pended to the shape as
+        needed to meet this requirement.
+    maskna : bool or None, optional
+        If this is set to True, it forces the array to have an NA mask.
+        If the input is an array without a mask, this means a view with
+        an NA mask is created. If the input is an array with a mask, the
+        mask is preserved as-is.
+
+        If this is set to False, it forces the array to not have an NA
+        mask. If the input is an array with a mask, and has no NA values,
+        it will create a copy of the input without an NA mask.
+    ownmaskna : bool, optional
+        If this is set to True, forces the array to have a mask which
+        it owns. It may still return a view of the data from the input,
+        but the result will always own its own mask.
+
+    Returns
+    -------
+    out : ndarray
+        An array object satisfying the specified requirements.
+
+    See Also
+    --------
+    empty, empty_like, zeros, zeros_like, ones, ones_like, fill
+
+    Examples
+    --------
+    >>> np.array([1, 2, 3])
+    array([1, 2, 3])
+
+    Upcasting:
+
+    >>> np.array([1, 2, 3.0])
+    array([ 1.,  2.,  3.])
+
+    More than one dimension:
+
+    >>> np.array([[1, 2], [3, 4]])
+    array([[1, 2],
+           [3, 4]])
+
+    Minimum dimensions 2:
+
+    >>> np.array([1, 2, 3], ndmin=2)
+    array([[1, 2, 3]])
+
+    Type provided:
+
+    >>> np.array([1, 2, 3], dtype=complex)
+    array([ 1.+0.j,  2.+0.j,  3.+0.j])
+
+    Data-type consisting of more than one element:
+
+    >>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
+    >>> x['a']
+    array([1, 3])
+
+    Creating an array from sub-classes:
+
+    >>> np.array(np.mat('1 2; 3 4'))
+    array([[1, 2],
+           [3, 4]])
+
+    >>> np.array(np.mat('1 2; 3 4'), subok=True)
+    matrix([[1, 2],
+            [3, 4]])
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'empty',
+    """
+    empty(shape, dtype=float, order='C')
+
+    Return a new array of given shape and type, without initializing entries.
+
+    Parameters
+    ----------
+    shape : int or tuple of int
+        Shape of the empty array
+    dtype : data-type, optional
+        Desired output data-type.
+    order : {'C', 'F'}, optional
+        Whether to store multi-dimensional data in C (row-major) or
+        Fortran (column-major) order in memory.
+    maskna : boolean
+        If this is true, the returned array will have an NA mask.
+
+    See Also
+    --------
+    empty_like, zeros, ones
+
+    Notes
+    -----
+    `empty`, unlike `zeros`, does not set the array values to zero,
+    and may therefore be marginally faster.  On the other hand, it requires
+    the user to manually set all the values in the array, and should be
+    used with caution.
+
+    Examples
+    --------
+    >>> np.empty([2, 2])
+    array([[ -9.74499359e+001,   6.69583040e-309],
+           [  2.13182611e-314,   3.06959433e-309]])         #random
+
+    >>> np.empty([2, 2], dtype=int)
+    array([[-1073741821, -1067949133],
+           [  496041986,    19249760]])                     #random
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'empty_like',
+    """
+    empty_like(a, dtype=None, order='K', subok=True)
+
+    Return a new array with the same shape and type as a given array.
+
+    Parameters
+    ----------
+    a : array_like
+        The shape and data-type of `a` define these same attributes of the
+        returned array.
+    dtype : data-type, optional
+        Overrides the data type of the result.
+    order : {'C', 'F', 'A', or 'K'}, optional
+        Overrides the memory layout of the result. 'C' means C-order,
+        'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
+        'C' otherwise. 'K' means match the layout of ``a`` as closely
+        as possible.
+    subok : bool, optional.
+        If True, then the newly created array will use the sub-class
+        type of 'a', otherwise it will be a base-class array. Defaults
+        to True.
+
+    Returns
+    -------
+    out : ndarray
+        Array of uninitialized (arbitrary) data with the same
+        shape and type as `a`.
+
+    See Also
+    --------
+    ones_like : Return an array of ones with shape and type of input.
+    zeros_like : Return an array of zeros with shape and type of input.
+    empty : Return a new uninitialized array.
+    ones : Return a new array setting values to one.
+    zeros : Return a new array setting values to zero.
+
+    Notes
+    -----
+    This function does *not* initialize the returned array; to do that use
+    `zeros_like` or `ones_like` instead.  It may be marginally faster than
+    the functions that do set the array values.
+
+    Examples
+    --------
+    >>> a = ([1,2,3], [4,5,6])                         # a is array-like
+    >>> np.empty_like(a)
+    array([[-1073741821, -1073741821,           3],    #random
+           [          0,           0, -1073741821]])
+    >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
+    >>> np.empty_like(a)
+    array([[ -2.00000715e+000,   1.48219694e-323,  -2.00000572e+000],#random
+           [  4.38791518e-305,  -2.00000715e+000,   4.17269252e-309]])
+
+    """)
+
+
+add_newdoc('numpy.core.multiarray', 'scalar',
+    """
+    scalar(dtype, obj)
+
+    Return a new scalar array of the given type initialized with obj.
+
+    This function is meant mainly for pickle support. `dtype` must be a
+    valid data-type descriptor. If `dtype` corresponds to an object
+    descriptor, then `obj` can be any object, otherwise `obj` must be a
+    string. If `obj` is not given, it will be interpreted as None for object
+    type and as zeros for all other types.
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'zeros',
+    """
+    zeros(shape, dtype=float, order='C')
+
+    Return a new array of given shape and type, filled with zeros.
+
+    Parameters
+    ----------
+    shape : int or sequence of ints
+        Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+    dtype : data-type, optional
+        The desired data-type for the array, e.g., `numpy.int8`.  Default is
+        `numpy.float64`.
+    order : {'C', 'F'}, optional
+        Whether to store multidimensional data in C- or Fortran-contiguous
+        (row- or column-wise) order in memory.
+
+    Returns
+    -------
+    out : ndarray
+        Array of zeros with the given shape, dtype, and order.
+
+    See Also
+    --------
+    zeros_like : Return an array of zeros with shape and type of input.
+    ones_like : Return an array of ones with shape and type of input.
+    empty_like : Return an empty array with shape and type of input.
+    ones : Return a new array setting values to one.
+    empty : Return a new uninitialized array.
+
+    Examples
+    --------
+    >>> np.zeros(5)
+    array([ 0.,  0.,  0.,  0.,  0.])
+
+    >>> np.zeros((5,), dtype=numpy.int)
+    array([0, 0, 0, 0, 0])
+
+    >>> np.zeros((2, 1))
+    array([[ 0.],
+           [ 0.]])
+
+    >>> s = (2,2)
+    >>> np.zeros(s)
+    array([[ 0.,  0.],
+           [ 0.,  0.]])
+
+    >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
+    array([(0, 0), (0, 0)],
+          dtype=[('x', '<i4'), ('y', '<i4')])
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'isna',
+    """
+    isna(a)
+
+    Returns an array with True for each element of *a* that is NA.
+
+    Parameters
+    ----------
+    a : array_like
+        The array for which to check for NA.
+
+    Returns
+    -------
+    result : bool or array of bool
+        Number of non-zero values in the array.
+
+    Examples
+    --------
+    >>> np.isna(np.NA)
+    True
+    >>> np.isna(1.5)
+    False
+    >>> np.isna(np.nan)
+    False
+    >>> a = np.array([0, np.NA, 3.5, np.NA])
+    >>> np.isna(a)
+    array([False,  True, False,  True], dtype=bool)
+    """)
+
+add_newdoc('numpy.core.multiarray', 'count_nonzero',
+    """
+    count_nonzero(a)
+
+    Counts the number of non-zero values in the array ``a``.
+
+    Parameters
+    ----------
+    a : array_like
+        The array for which to count non-zeros.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which a reduction is performed.
+        The default (`axis` = None) is perform a reduction over all
+        the dimensions of the input array.
+    skipna : bool, optional
+        If this is set to True, any NA elements in the array are skipped
+        instead of propagating.
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the original `arr`.
+
+    Returns
+    -------
+    count : int or array of int
+        Number of non-zero values in the array.
+
+    See Also
+    --------
+    nonzero : Return the coordinates of all the non-zero values.
+
+    Examples
+    --------
+    >>> np.count_nonzero(np.eye(4))
+    4
+    >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]])
+    5
+    >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=1)
+    array([2, 3])
+    >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=1, keepdims=True)
+    array([[2],
+           [3]])
+    """)
+
+add_newdoc('numpy.core.multiarray', 'count_reduce_items',
+    """
+    count_reduce_items(arr, axis=None, skipna=False, keepdims=False)
+
+    Counts the number of items a reduction with the same `axis`
+    and `skipna` parameter values would use. The purpose of this
+    function is for the creation of reduction operations
+    which use the item count, such as :func:`mean`.
+
+    When `skipna` is False or `arr` doesn't have an NA mask,
+    the result is simply the product of the reduction axis
+    sizes, returned as a single scalar.
+
+    Parameters
+    ----------
+    arr : array_like
+        The array for which to count the reduce items.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which a reduction is performed.
+        The default (`axis` = None) is perform a reduction over all
+        the dimensions of the input array.
+    skipna : bool, optional
+        If this is set to True, any NA elements in the array are not
+        counted. The only time this function does any actual counting
+        instead of a cheap multiply of a few sizes is when `skipna` is
+        true and `arr` has an NA mask.
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the original `arr`.
+
+    Returns
+    -------
+    count : intp or array of intp
+        Number of items that would be used in a reduction with the
+        same `axis` and `skipna` parameter values.
+
+    Examples
+    --------
+    >>> a = np.array([[1,np.NA,1], [1,1,np.NA]])
+
+    >>> np.count_reduce_items(a)
+    6
+    >>> np.count_reduce_items(a, skipna=True)
+    4
+    >>> np.sum(a, skipna=True)
+    4
+
+    >>> np.count_reduce_items(a, axis=0, skipna=True)
+    array([2, 1, 1])
+    >>> np.sum(a, axis=0, skipna=True)
+    array([2, 1, 1])
+    """)
+
+add_newdoc('numpy.core.multiarray','set_typeDict',
+    """set_typeDict(dict)
+
+    Set the internal dictionary that can look up an array type using a
+    registered code.
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'fromstring',
+    """
+    fromstring(string, dtype=float, count=-1, sep='')
+
+    A new 1-D array initialized from raw binary or text data in a string.
+
+    Parameters
+    ----------
+    string : str
+        A string containing the data.
+    dtype : data-type, optional
+        The data type of the array; default: float.  For binary input data,
+        the data must be in exactly this format.
+    count : int, optional
+        Read this number of `dtype` elements from the data.  If this is
+        negative (the default), the count will be determined from the
+        length of the data.
+    sep : str, optional
+        If not provided or, equivalently, the empty string, the data will
+        be interpreted as binary data; otherwise, as ASCII text with
+        decimal numbers.  Also in this latter case, this argument is
+        interpreted as the string separating numbers in the data; extra
+        whitespace between elements is also ignored.
+
+    Returns
+    -------
+    arr : ndarray
+        The constructed array.
+
+    Raises
+    ------
+    ValueError
+        If the string is not the correct size to satisfy the requested
+        `dtype` and `count`.
+
+    See Also
+    --------
+    frombuffer, fromfile, fromiter
+
+    Examples
+    --------
+    >>> np.fromstring('\\x01\\x02', dtype=np.uint8)
+    array([1, 2], dtype=uint8)
+    >>> np.fromstring('1 2', dtype=int, sep=' ')
+    array([1, 2])
+    >>> np.fromstring('1, 2', dtype=int, sep=',')
+    array([1, 2])
+    >>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
+    array([1, 2, 3], dtype=uint8)
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'fromiter',
+    """
+    fromiter(iterable, dtype, count=-1)
+
+    Create a new 1-dimensional array from an iterable object.
+
+    Parameters
+    ----------
+    iterable : iterable object
+        An iterable object providing data for the array.
+    dtype : data-type
+        The data-type of the returned array.
+    count : int, optional
+        The number of items to read from *iterable*.  The default is -1,
+        which means all data is read.
+
+    Returns
+    -------
+    out : ndarray
+        The output array.
+
+    Notes
+    -----
+    Specify `count` to improve performance.  It allows ``fromiter`` to
+    pre-allocate the output array, instead of resizing it on demand.
+
+    Examples
+    --------
+    >>> iterable = (x*x for x in range(5))
+    >>> np.fromiter(iterable, np.float)
+    array([  0.,   1.,   4.,   9.,  16.])
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'fromfile',
+    """
+    fromfile(file, dtype=float, count=-1, sep='')
+
+    Construct an array from data in a text or binary file.
+
+    A highly efficient way of reading binary data with a known data-type,
+    as well as parsing simply formatted text files.  Data written using the
+    `tofile` method can be read using this function.
+
+    Parameters
+    ----------
+    file : file or str
+        Open file object or filename.
+    dtype : data-type
+        Data type of the returned array.
+        For binary files, it is used to determine the size and byte-order
+        of the items in the file.
+    count : int
+        Number of items to read. ``-1`` means all items (i.e., the complete
+        file).
+    sep : str
+        Separator between items if file is a text file.
+        Empty ("") separator means the file should be treated as binary.
+        Spaces (" ") in the separator match zero or more whitespace characters.
+        A separator consisting only of spaces must match at least one
+        whitespace.
+
+    See also
+    --------
+    load, save
+    ndarray.tofile
+    loadtxt : More flexible way of loading data from a text file.
+
+    Notes
+    -----
+    Do not rely on the combination of `tofile` and `fromfile` for
+    data storage, as the binary files generated are are not platform
+    independent.  In particular, no byte-order or data-type information is
+    saved.  Data can be stored in the platform independent ``.npy`` format
+    using `save` and `load` instead.
+
+    Examples
+    --------
+    Construct an ndarray:
+
+    >>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
+    ...                ('temp', float)])
+    >>> x = np.zeros((1,), dtype=dt)
+    >>> x['time']['min'] = 10; x['temp'] = 98.25
+    >>> x
+    array([((10, 0), 98.25)],
+          dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
+
+    Save the raw data to disk:
+
+    >>> import os
+    >>> fname = os.tmpnam()
+    >>> x.tofile(fname)
+
+    Read the raw data from disk:
+
+    >>> np.fromfile(fname, dtype=dt)
+    array([((10, 0), 98.25)],
+          dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
+
+    The recommended way to store and load data:
+
+    >>> np.save(fname, x)
+    >>> np.load(fname + '.npy')
+    array([((10, 0), 98.25)],
+          dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'frombuffer',
+    """
+    frombuffer(buffer, dtype=float, count=-1, offset=0)
+
+    Interpret a buffer as a 1-dimensional array.
+
+    Parameters
+    ----------
+    buffer : buffer_like
+        An object that exposes the buffer interface.
+    dtype : data-type, optional
+        Data-type of the returned array; default: float.
+    count : int, optional
+        Number of items to read. ``-1`` means all data in the buffer.
+    offset : int, optional
+        Start reading the buffer from this offset; default: 0.
+
+    Notes
+    -----
+    If the buffer has data that is not in machine byte-order, this should
+    be specified as part of the data-type, e.g.::
+
+      >>> dt = np.dtype(int)
+      >>> dt = dt.newbyteorder('>')
+      >>> np.frombuffer(buf, dtype=dt)
+
+    The data of the resulting array will not be byteswapped, but will be
+    interpreted correctly.
+
+    Examples
+    --------
+    >>> s = 'hello world'
+    >>> np.frombuffer(s, dtype='S1', count=5, offset=6)
+    array(['w', 'o', 'r', 'l', 'd'],
+          dtype='|S1')
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'concatenate',
+    """
+    concatenate((a1, a2, ...), axis=0)
+
+    Join a sequence of arrays together.
+
+    Parameters
+    ----------
+    a1, a2, ... : sequence of array_like
+        The arrays must have the same shape, except in the dimension
+        corresponding to `axis` (the first, by default).
+    axis : int, optional
+        The axis along which the arrays will be joined.  Default is 0.
+
+    Returns
+    -------
+    res : ndarray
+        The concatenated array.
+
+    See Also
+    --------
+    ma.concatenate : Concatenate function that preserves input masks.
+    array_split : Split an array into multiple sub-arrays of equal or
+                  near-equal size.
+    split : Split array into a list of multiple sub-arrays of equal size.
+    hsplit : Split array into multiple sub-arrays horizontally (column wise)
+    vsplit : Split array into multiple sub-arrays vertically (row wise)
+    dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
+    hstack : Stack arrays in sequence horizontally (column wise)
+    vstack : Stack arrays in sequence vertically (row wise)
+    dstack : Stack arrays in sequence depth wise (along third dimension)
+
+    Notes
+    -----
+    When one or more of the arrays to be concatenated is a MaskedArray,
+    this function will return a MaskedArray object instead of an ndarray,
+    but the input masks are *not* preserved. In cases where a MaskedArray
+    is expected as input, use the ma.concatenate function from the masked
+    array module instead.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> b = np.array([[5, 6]])
+    >>> np.concatenate((a, b), axis=0)
+    array([[1, 2],
+           [3, 4],
+           [5, 6]])
+    >>> np.concatenate((a, b.T), axis=1)
+    array([[1, 2, 5],
+           [3, 4, 6]])
+