Commits

Konstantin Lopuhin committed 39cee1a Merge

merge default

  • Participants
  • Parent commits e4e2d36, cc48c91
  • Branches fix-jit-logs

Comments (0)

Files changed (35)

File lib-python/2/distutils/sysconfig_pypy.py

     g['SO'] = _get_so_extension() or ".so"
     g['SOABI'] = g['SO'].rsplit('.')[0]
     g['LIBDIR'] = os.path.join(sys.prefix, 'lib')
+    g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check
 
     global _config_vars
     _config_vars = g

File lib_pypy/_sqlite3.py

         self._in_transaction = False
         self.isolation_level = isolation_level
 
-        self._cursors = []
+        self.__cursors = []
+        self.__cursors_counter = 0
         self.__statements = []
-        self.__statement_counter = 0
+        self.__statements_counter = 0
         self._statement_cache = _StatementCache(self, cached_statements)
 
         self.__func_cache = {}
     def close(self):
         self._check_thread()
 
-        for statement in self.__statements:
-            obj = statement()
-            if obj is not None:
-                obj._finalize()
+        self.__do_all_statements(Statement._finalize, True)
 
         if self._db:
             ret = _lib.sqlite3_close(self._db)
         exc.error_code = error_code
         return exc
 
+    def _remember_cursor(self, cursor):
+        self.__cursors.append(weakref.ref(cursor))
+        self.__cursors_counter += 1
+        if self.__cursors_counter < 200:
+            return
+        self.__cursors_counter = 0
+        self.__cursors = [r for r in self.__cursors if r() is not None]
+
     def _remember_statement(self, statement):
         self.__statements.append(weakref.ref(statement))
-        self.__statement_counter += 1
+        self.__statements_counter += 1
+        if self.__statements_counter < 200:
+            return
+        self.__statements_counter = 0
+        self.__statements = [r for r in self.__statements if r() is not None]
 
-        if self.__statement_counter % 100 == 0:
-            self.__statements = [ref for ref in self.__statements
-                                 if ref() is not None]
+    def __do_all_statements(self, action, reset_cursors):
+        for weakref in self.__statements:
+            statement = weakref()
+            if statement is not None:
+                action(statement)
+
+        if reset_cursors:
+            for weakref in self.__cursors:
+                cursor = weakref()
+                if cursor is not None:
+                    cursor._reset = True
 
     @_check_thread_wrap
     @_check_closed_wrap
         if not self._in_transaction:
             return
 
-        for statement in self.__statements:
-            obj = statement()
-            if obj is not None:
-                obj._reset()
+        self.__do_all_statements(Statement._reset, False)
 
         statement = c_void_p()
         ret = _lib.sqlite3_prepare_v2(self._db, b"COMMIT", -1,
         if not self._in_transaction:
             return
 
-        for statement in self.__statements:
-            obj = statement()
-            if obj is not None:
-                obj._reset()
-
-        for cursor_ref in self._cursors:
-            cursor = cursor_ref()
-            if cursor:
-                cursor._reset = True
+        self.__do_all_statements(Statement._reset, True)
 
         statement = c_void_p()
         ret = _lib.sqlite3_prepare_v2(self._db, b"ROLLBACK", -1,
     __statement = None
 
     def __init__(self, con):
-        self.__initialized = True
-        self.__connection = con
-
         if not isinstance(con, Connection):
             raise TypeError
-        con._check_thread()
-        con._check_closed()
-        con._cursors.append(weakref.ref(self))
+        self.__connection = con
 
         self.arraysize = 1
         self.row_factory = None
         self.__description = None
         self.__rowcount = -1
 
+        con._check_thread()
+        con._remember_cursor(self)
+
+        self.__initialized = True
+
     def __del__(self):
-        try:
-            self.__connection._cursors.remove(weakref.ref(self))
-        except (AttributeError, ValueError):
-            pass
         if self.__statement:
             self.__statement._reset()
 
                     self.__rowcount += _lib.sqlite3_changes(self.__connection._db)
         finally:
             self.__locked = False
-
         return self
 
     @__check_cursor_wrap
             if rc != _lib.SQLITE_DONE:
                 _lib.sqlite3_finalize(statement)
                 if rc == _lib.SQLITE_OK:
-                    return self
+                    break
                 else:
                     raise self.__connection._get_exception(rc)
+
             rc = _lib.sqlite3_finalize(statement)
             if rc != _lib.SQLITE_OK:
                 raise self.__connection._get_exception(rc)
 
     def __init__(self, connection, sql):
         self.__con = connection
+        self.__con._remember_statement(self)
 
         if not isinstance(sql, basestring):
             raise Warning("SQL is of wrong type. Must be string or unicode.")
             ret = _lib.sqlite3_prepare_v2(self.__con._db, sql, -1,
                                           byref(self._statement), byref(sql))
             self._kind = Statement._DQL
-
         if ret != _lib.SQLITE_OK:
             raise self.__con._get_exception(ret)
-        self.__con._remember_statement(self)
+
         sql = sql.value.decode('utf-8')
         if _check_remaining_sql(sql):
             raise Warning("You can only execute one statement at a time.")

File pypy/doc/architecture.rst

 
  * a compliant, flexible and fast implementation of the Python_ Language 
    which uses the above toolchain to enable new advanced high-level features 
-   without having to encode the low-level details.
+   without having to encode the low-level details.  We call this PyPy.
 
 By separating concerns in this way, our implementation
 of Python - and other dynamic languages - is able to automatically
 High Level Goals
 =============================
 
-PyPy - the Translation Framework 
+RPython - the Translation Toolchain
 -----------------------------------------------
 
 Traditionally, language interpreters are written in a target platform language
 very challenging because of the involved complexity.
 
 
-PyPy - the Python Interpreter 
+PyPy - the Python Interpreter
 --------------------------------------------
 
 Our main motivation for developing the translation framework is to
 of `Extreme Programming`_, the architecture of PyPy has evolved over time
 and continues to evolve.  Nevertheless, the high level architecture is 
 stable. As described above, there are two rather independent basic
-subsystems: the `Python Interpreter`_ and the `Translation Framework`_.
+subsystems: the `PyPy Python Interpreter`_ and the `RPython Translation Toolchain`_.
 
 .. _`translation framework`:
 
-The Translation Framework
+RPython Translation Toolchain
 -------------------------
 
 The job of the RPython toolchain is to translate RPython_ programs
 
 * Optionally, `various transformations`_ can then be applied which, for
   example, perform optimizations such as inlining, add capabilities
-  such as stackless-style concurrency (deprecated), or insert code for the
+  such as stackless-style concurrency, or insert code for the
   `garbage collector`_.
 
 * Then, the graphs are converted to source code for the target platform
 .. _`standard interpreter`: 
 .. _`python interpreter`: 
 
-The Python Interpreter
+PyPy Python Interpreter
 -------------------------------------
 
 PyPy's *Python Interpreter* is written in RPython and implements the

File pypy/doc/faq.rst

 CPython extension and replace it with a pure python version that the
 JIT can see.
 
-We fully support ctypes-based extensions.
+We fully support ctypes-based extensions. But for best performance, we
+recommend that you use the cffi_ module to interface with C code.
 
 For information on which third party extensions work (or do not work) 
 with PyPy see the `compatibility wiki`_.
 
 .. _`extension modules`: cpython_differences.html#extension-modules
 .. _`cpython differences`: cpython_differences.html
-.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home
+.. _`compatibility wiki`:
+.. https://bitbucket.org/pypy/compatibility/wiki/Home
+.. _cffi: http://cffi.readthedocs.org/
 
 ---------------------------------
 On which platforms does PyPy run?
 bootstrap, as cross compilation is not really meant to work yet.
 At the moment you need CPython 2.5 - 2.7
 for the translation process. PyPy's JIT requires an x86 or x86_64 CPU.
+(There has also been good progress on getting the JIT working for ARMv7.)
 
 ------------------------------------------------
 Which Python version (2.x?) does PyPy implement?
 * Second, and perhaps most important: do you have a really good reason
   for writing the module in RPython in the first place?  Nowadays you
   should really look at alternatives, like writing it in pure Python,
-  using ctypes if it needs to call C code.  Other alternatives are being
-  developed too (as of summer 2011), like a Cython binding.
+  using cffi_ if it needs to call C code.
 
 In this context it is not that important to be able to translate
 RPython modules independently of translating the complete interpreter.

File pypy/doc/getting-started-dev.rst

 The translator is a tool based on the PyPy interpreter which can translate
 sufficiently static RPython programs into low-level code (in particular it can
 be used to translate the `full Python interpreter`_). To be able to experiment with it
-you need to:
+you need to download and install the usual (CPython) version of:
 
-  * Download and install Pygame_.
-
-  * Download and install `Dot Graphviz`_ 
+  * Pygame_
+  * `Dot Graphviz`_
 
 To start the interactive translator shell do::
 

File pypy/doc/whatsnew-head.rst

 .. branch: numpypy-real-as-view
 Convert real, imag from ufuncs to views. This involves the beginning of
 view() functionality
+.. branch: indexing-by-array
+Adds indexing by scalar, adds int conversion from scalar and single element array,
+fixes compress, indexing by an array with a smaller shape and the indexed object.
 
 .. branch: signatures
 Improved RPython typing
 Moves optimized JIT frames from stack to heap. As a side effect it enables
 stackless to work well with the JIT on PyPy. Also removes a bunch of code from
 the GC which fixes cannot find gc roots.
+
+.. branch: pycon2013-doc-fixes
+Documentation fixes after going through the docs at PyCon 2013 sprint.

File pypy/interpreter/error.py

-import os, sys
+import cStringIO
+import os
+import sys
+import traceback
+from errno import EINTR
+
 from rpython.rlib import jit
 from rpython.rlib.objectmodel import we_are_translated
-from errno import EINTR
+
+from pypy.interpreter import debug
+
 
 AUTO_DEBUG = os.getenv('PYPY_DEBUG')
 RECORD_INTERPLEVEL_TRACEBACK = True
         if space is None:
             # this part NOT_RPYTHON
             exc_typename = str(self.w_type)
-            exc_value    = str(w_value)
+            exc_value = str(w_value)
         else:
             w = space.wrap
             if space.is_w(space.type(self.w_type), space.w_str):
 
     def print_application_traceback(self, space, file=None):
         "NOT_RPYTHON: Dump a standard application-level traceback."
-        if file is None: file = sys.stderr
+        if file is None:
+            file = sys.stderr
         self.print_app_tb_only(file)
         print >> file, self.errorstr(space)
 
     def print_detailed_traceback(self, space=None, file=None):
         """NOT_RPYTHON: Dump a nice detailed interpreter- and
         application-level traceback, useful to debug the interpreter."""
-        import traceback, cStringIO
-        if file is None: file = sys.stderr
+        if file is None:
+            file = sys.stderr
         f = cStringIO.StringIO()
         for i in range(len(self.debug_excs)-1, -1, -1):
             print >> f, "Traceback (interpreter-level):"
         self.print_app_tb_only(file)
         print >> file, '(application-level)', self.errorstr(space)
         if AUTO_DEBUG:
-            import debug
             debug.fire(self)
 
     @jit.unroll_safe
         #  ("string", ...)            ("string", ...)              deprecated
         #  (inst, None)               (inst.__class__, inst)          no
         #
-        w_type  = self.w_type
+        w_type = self.w_type
         w_value = self.get_w_value(space)
         while space.is_true(space.isinstance(w_type, space.w_tuple)):
             w_type = space.getitem(w_type, space.wrap(0))
             w_value = w_inst
             w_type = w_instclass
 
-        self.w_type   = w_type
+        self.w_type = w_type
         self._w_value = w_value
 
     def _exception_getclass(self, space, w_inst):
         from rpython.rlib.unroll import unrolling_iterable
         attrs = ['x%d' % i for i in range(len(formats))]
         entries = unrolling_iterable(enumerate(attrs))
-        #
+
         class OpErrFmt(OperationError):
             def __init__(self, w_type, strings, *args):
                 self.setup(w_type)
                 for i, attr in entries:
                     setattr(self, attr, args[i])
                 assert w_type is not None
+
             def _compute_value(self):
                 lst = [None] * (len(formats) + len(formats) + 1)
                 for i, attr in entries:

File pypy/interpreter/nestedscope.py

-from pypy.interpreter.error import OperationError
-from pypy.interpreter import function, pycode, pyframe
-from pypy.interpreter.baseobjspace import Wrappable
-from pypy.interpreter.mixedmodule import MixedModule
-from pypy.interpreter.astcompiler import consts
 from rpython.rlib import jit
 from rpython.tool.uid import uid
 
+from pypy.interpreter import function, pycode, pyframe
+from pypy.interpreter.astcompiler import consts
+from pypy.interpreter.baseobjspace import Wrappable
+from pypy.interpreter.error import OperationError
+from pypy.interpreter.mixedmodule import MixedModule
+
+
 class Cell(Wrappable):
     "A simple container for a wrapped value."
 
 
     def get(self):
         if self.w_value is None:
-            raise ValueError, "get() from an empty cell"
+            raise ValueError("get() from an empty cell")
         return self.w_value
 
     def set(self, w_value):
 
     def delete(self):
         if self.w_value is None:
-            raise ValueError, "delete() on an empty cell"
+            raise ValueError("delete() on an empty cell")
         self.w_value = None
-  
+
     def descr__cmp__(self, space, w_other):
         other = space.interpclass_w(w_other)
         if not isinstance(other, Cell):
         return space.cmp(self.w_value, other.w_value)
 
     def descr__reduce__(self, space):
-        w_mod    = space.getbuiltinmodule('_pickle_support')
-        mod      = space.interp_w(MixedModule, w_mod)
+        w_mod = space.getbuiltinmodule('_pickle_support')
+        mod = space.interp_w(MixedModule, w_mod)
         new_inst = mod.get('cell_new')
-        if self.w_value is None:    #when would this happen?
+        if self.w_value is None:    # when would this happen?
             return space.newtuple([new_inst, space.newtuple([])])
         tup = [self.w_value]
         return space.newtuple([new_inst, space.newtuple([]),
 
     def descr__setstate__(self, space, w_state):
         self.w_value = space.getitem(w_state, space.wrap(0))
-        
+
     def __repr__(self):
         """ representation for debugging purposes """
         if self.w_value is None:
             raise OperationError(space.w_ValueError, space.wrap("Cell is empty"))
 
 
-
 super_initialize_frame_scopes = pyframe.PyFrame.initialize_frame_scopes
-super_fast2locals             = pyframe.PyFrame.fast2locals
-super_locals2fast             = pyframe.PyFrame.locals2fast
+super_fast2locals = pyframe.PyFrame.fast2locals
+super_locals2fast = pyframe.PyFrame.locals2fast
 
 
 class __extend__(pyframe.PyFrame):
     def fast2locals(self):
         super_fast2locals(self)
         # cellvars are values exported to inner scopes
-        # freevars are values coming from outer scopes 
+        # freevars are values coming from outer scopes
         freevarnames = list(self.pycode.co_cellvars)
         if self.pycode.co_flags & consts.CO_OPTIMIZED:
             freevarnames.extend(self.pycode.co_freevars)
         except ValueError:
             varname = self.getfreevarname(varindex)
             if self.iscellvar(varindex):
-                message = "local variable '%s' referenced before assignment"%varname
+                message = "local variable '%s' referenced before assignment" % varname
                 w_exc_type = self.space.w_UnboundLocalError
             else:
                 message = ("free variable '%s' referenced before assignment"
-                           " in enclosing scope"%varname)
+                           " in enclosing scope" % varname)
                 w_exc_type = self.space.w_NameError
             raise OperationError(w_exc_type, self.space.wrap(message))
         else:

File pypy/module/_continuation/interp_continuation.py

         if self.sthread is not None:
             raise geterror(self.space, "continulet already __init__ialized")
         sthread = build_sthread(self.space)
-        #workaround_disable_jit(sthread)
         #
         # hackish: build the frame "by hand", passing it the correct arguments
         space = self.space
                 global_state.clear()
                 raise geterror(self.space, "continulet already finished")
         self.check_sthread()
-        #workaround_disable_jit(self.sthread)
-        #
+
         global_state.origin = self
         if to is None:
             # simple switch: going to self.h
         sthread = ec.stacklet_thread = SThread(space, ec)
     return sthread
 
-def workaround_disable_jit(sthread):
-    # A bad workaround to kill the JIT anywhere in this thread.
-    # This forces all the frames.  It's a bad workaround because
-    # it takes O(depth) time, and it will cause some "abort:
-    # vable escape" in the JIT.  The goal is to prevent any frame
-    # from being still virtuals, because the JIT generates code
-    # to un-virtualizable them "on demand" by loading values based
-    # on FORCE_TOKEN, which is an address in the stack.
-    sthread.ec.force_all_frames()
-
 # ____________________________________________________________
 
 def permute(space, args_w):

File pypy/module/_io/interp_iobase.py

     def remove(self, w_iobase):
         holder = w_iobase.streamholder
         if holder is not None:
-            del self.streams[holder]
+            try:
+                del self.streams[holder]
+            except KeyError:
+                # this can happen in daemon threads
+                pass
 
     def flush_all(self, space):
         while self.streams:

File pypy/module/micronumpy/arrayimpl/base.py

     def base(self):
         raise NotImplementedError
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         raise NotImplementedError
 
 class BaseArrayIterator(object):

File pypy/module/micronumpy/arrayimpl/concrete.py

 
-from pypy.module.micronumpy.arrayimpl import base
+from pypy.module.micronumpy.arrayimpl import base, scalar
 from pypy.module.micronumpy import support, loop, iter
 from pypy.module.micronumpy.base import convert_to_array, W_NDimArray,\
      ArrayArgumentException
     parent = None
 
     # JIT hints that length of all those arrays is a constant
-    
+
     def get_shape(self):
         shape = self.shape
         jit.hint(len(shape), promote=True)
                               new_shape, self, orig_array)
         else:
             return None
-    
+
     def get_real(self, orig_array):
         strides = self.get_strides()
         backstrides = self.get_backstrides()
             dtype =  self.dtype.float_type
             return SliceArray(self.start, strides, backstrides,
                           self.get_shape(), self, orig_array, dtype=dtype)
-        return SliceArray(self.start, strides, backstrides, 
+        return SliceArray(self.start, strides, backstrides,
                           self.get_shape(), self, orig_array)
 
     def get_imag(self, orig_array):
         backstrides = self.get_backstrides()
         if self.dtype.is_complex_type():
             dtype =  self.dtype.float_type
-            return SliceArray(self.start + dtype.get_size(), strides, 
+            return SliceArray(self.start + dtype.get_size(), strides,
                     backstrides, self.get_shape(), self, orig_array, dtype=dtype)
         if self.dtype.is_flexible_type():
             # numpy returns self for self.imag
             space.isinstance_w(w_idx, space.w_slice) or
             space.is_w(w_idx, space.w_None)):
             raise IndexError
-        if isinstance(w_idx, W_NDimArray):
+        if isinstance(w_idx, W_NDimArray) and not isinstance(w_idx.implementation, scalar.Scalar):
             raise ArrayArgumentException
         shape = self.get_shape()
         shape_len = len(shape)
-        if shape_len == 0:
-            raise OperationError(space.w_IndexError, space.wrap(
-                "0-d arrays can't be indexed"))
         view_w = None
-        if (space.isinstance_w(w_idx, space.w_list) or
-            isinstance(w_idx, W_NDimArray)):
+        if space.isinstance_w(w_idx, space.w_list):
             raise ArrayArgumentException
         if space.isinstance_w(w_idx, space.w_tuple):
             view_w = space.fixedview(w_idx)
         shape = self.get_shape()[:]
         strides = self.get_strides()[:]
         backstrides = self.get_backstrides()[:]
-        shape[axis1], shape[axis2] = shape[axis2], shape[axis1]   
+        shape[axis1], shape[axis2] = shape[axis2], shape[axis1]
         strides[axis1], strides[axis2] = strides[axis2], strides[axis1]
-        backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] 
-        return W_NDimArray.new_slice(self.start, strides, 
+        backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1]
+        return W_NDimArray.new_slice(self.start, strides,
                                      backstrides, shape, self, orig_arr)
 
     def get_storage_as_int(self, space):
         self.backstrides = backstrides
         self.storage = storage
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         if shape is None or shape == self.get_shape():
             return iter.ConcreteArrayIterator(self)
         r = calculate_broadcast_strides(self.get_strides(),
                                         self.get_backstrides(),
-                                        self.get_shape(), shape)
+                                        self.get_shape(), shape, backward_broadcast)
         return iter.MultiDimViewIterator(self, self.dtype, 0, r[0], r[1], shape)
 
     def fill(self, box):
         free_raw_storage(self.storage, track_allocation=False)
 
 
-        
+
 
 class NonWritableArray(ConcreteArray):
     def descr_setitem(self, space, orig_array, w_index, w_value):
         raise OperationError(space.w_RuntimeError, space.wrap(
             "array is not writable"))
-        
+
 
 class SliceArray(BaseConcreteArray):
     def __init__(self, start, strides, backstrides, shape, parent, orig_arr,
     def fill(self, box):
         loop.fill(self, box.convert_to(self.dtype))
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         if shape is not None and shape != self.get_shape():
             r = calculate_broadcast_strides(self.get_strides(),
                                             self.get_backstrides(),
-                                            self.get_shape(), shape)
+                                            self.get_shape(), shape,
+                                            backward_broadcast)
             return iter.MultiDimViewIterator(self.parent, self.dtype,
                                              self.start, r[0], r[1], shape)
         if len(self.get_shape()) == 1:
-            return iter.OneDimViewIterator(self.parent, self.dtype, self.start, 
+            return iter.OneDimViewIterator(self.parent, self.dtype, self.start,
                     self.get_strides(), self.get_shape())
         return iter.MultiDimViewIterator(self.parent, self.dtype, self.start,
                                     self.get_strides(),

File pypy/module/micronumpy/arrayimpl/scalar.py

     def get_strides(self):
         return []
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         return ScalarIterator(self)
 
     def get_scalar_value(self):
     def descr_setitem(self, space, _, w_idx, w_val):
         raise OperationError(space.w_IndexError,
                              space.wrap("scalars cannot be indexed"))
-        
+
     def setitem_index(self, space, idx, w_val):
         raise OperationError(space.w_IndexError,
                              space.wrap("scalars cannot be indexed"))
 
     def reshape(self, space, orig_array, new_shape):
         return self.set_shape(space, orig_array, new_shape)
-        
+
     def create_axis_iter(self, shape, dim, cum):
         raise Exception("axis iter should not happen on scalar")
 

File pypy/module/micronumpy/interp_flatiter.py

     def get_shape(self):
         return self.shape
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         assert isinstance(self.base(), W_NDimArray)
         return self.base().create_iter()
 

File pypy/module/micronumpy/interp_numarray.py

 from pypy.module.micronumpy import loop
 from pypy.module.micronumpy.dot import match_dot_shapes
 from pypy.module.micronumpy.interp_arrayops import repeat, choose
+from pypy.module.micronumpy.arrayimpl import scalar
 from rpython.tool.sourcetools import func_with_new_name
 from rpython.rlib import jit
 from rpython.rlib.rstring import StringBuilder
             raise OperationError(space.w_ValueError,
                                  space.wrap("index out of range for array"))
         size = loop.count_all_true(arr)
-        res = W_NDimArray.from_shape([size], self.get_dtype())
+        if len(arr.get_shape()) == 1:
+            res_shape = [size] + self.get_shape()[1:]
+        else:
+            res_shape = [size]
+        res = W_NDimArray.from_shape(res_shape, self.get_dtype())
         return loop.getitem_filter(res, self, arr)
 
     def setitem_filter(self, space, idx, val):
         s.append('])')
         return s.build()
 
-    def create_iter(self, shape=None):
+    def create_iter(self, shape=None, backward_broadcast=False):
         assert isinstance(self.implementation, BaseArrayImplementation)
-        return self.implementation.create_iter(shape)
+        return self.implementation.create_iter(shape=shape,
+                                   backward_broadcast=backward_broadcast)
 
     def create_axis_iter(self, shape, dim, cum):
         return self.implementation.create_axis_iter(shape, dim, cum)
         if not space.is_none(w_axis):
             raise OperationError(space.w_NotImplementedError,
                                  space.wrap("axis unsupported for compress"))
+            arr = self
+        else:
+            arr = self.descr_reshape(space, [space.wrap(-1)])
         index = convert_to_array(space, w_obj)
-        return self.getitem_filter(space, index)
+        return arr.getitem_filter(space, index)
 
     def descr_flatten(self, space, w_order=None):
         if self.is_scalar():
     descr_argmax = _reduce_argmax_argmin_impl("max")
     descr_argmin = _reduce_argmax_argmin_impl("min")
 
+    def descr_int(self, space):
+        shape = self.get_shape()
+        if len(shape) == 0:
+            assert isinstance(self.implementation, scalar.Scalar)
+            return space.int(space.wrap(self.implementation.get_scalar_value()))
+        if shape == [1]:
+            return space.int(self.descr_getitem(space, space.wrap(0)))
+        raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars"))
+
 
 @unwrap_spec(offset=int)
 def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None,
 
     __repr__ = interp2app(W_NDimArray.descr_repr),
     __str__ = interp2app(W_NDimArray.descr_str),
+    __int__ = interp2app(W_NDimArray.descr_int),
 
     __pos__ = interp2app(W_NDimArray.descr_pos),
     __neg__ = interp2app(W_NDimArray.descr_neg),

File pypy/module/micronumpy/loop.py

 
 def getitem_filter(res, arr, index):
     res_iter = res.create_iter()
-    index_iter = index.create_iter()
+    shapelen = len(arr.get_shape())
+    if shapelen > 1 and len(index.get_shape()) < 2:
+        index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True)
+    else:
+        index_iter = index.create_iter()
     arr_iter = arr.create_iter()
-    shapelen = len(arr.get_shape())
     arr_dtype = arr.get_dtype()
     index_dtype = index.get_dtype()
     # XXX length of shape of index as well?

File pypy/module/micronumpy/strides.py

     rshape += shape[s:]
     return rshape, rstart, rstrides, rbackstrides
 
-def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape):
+def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape, backwards=False):
     rstrides = []
     rbackstrides = []
     for i in range(len(orig_shape)):
         else:
             rstrides.append(strides[i])
             rbackstrides.append(backstrides[i])
-    rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides
-    rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides
+    if backwards:
+        rstrides = rstrides + [0] * (len(res_shape) - len(orig_shape))  
+        rbackstrides = rbackstrides + [0] * (len(res_shape) - len(orig_shape)) 
+    else:
+        rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides
+        rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides
     return rstrides, rbackstrides
 
 def is_single_elem(space, w_elem, is_rec_type):

File pypy/module/micronumpy/test/test_numarray.py

         assert a[1] == 'xyz'
         assert a.imag[0] == 'abc'
         raises(TypeError, 'a.imag = "qop"')
-        a=array([[1+1j, 2-3j, 4+5j],[-6+7j, 8-9j, -2-1j]]) 
+        a=array([[1+1j, 2-3j, 4+5j],[-6+7j, 8-9j, -2-1j]])
         assert a.real[0,1] == 2
         a.real[0,1] = -20
         assert a[0,1].real == -20
         assert a[1,2].imag == 30
         a.real = 13
         assert a[1,1].real == 13
-        a=array([1+1j, 2-3j, 4+5j, -6+7j, 8-9j, -2-1j]) 
+        a=array([1+1j, 2-3j, 4+5j, -6+7j, 8-9j, -2-1j])
         a.real = 13
         assert a[3].real == 13
         a.imag = -5
         from numpypy import array
         # testcases from numpy docstring
         x = array([[1, 2, 3]])
-        assert (x.swapaxes(0, 1) == array([[1], [2], [3]])).all() 
+        assert (x.swapaxes(0, 1) == array([[1], [2], [3]])).all()
         x = array([[[0,1],[2,3]],[[4,5],[6,7]]]) # shape = (2, 2, 2)
-        assert (x.swapaxes(0, 2) == array([[[0, 4], [2, 6]], 
-                                           [[1, 5], [3, 7]]])).all() 
-        assert (x.swapaxes(0, 1) == array([[[0, 1], [4, 5]], 
+        assert (x.swapaxes(0, 2) == array([[[0, 4], [2, 6]],
+                                           [[1, 5], [3, 7]]])).all()
+        assert (x.swapaxes(0, 1) == array([[[0, 1], [4, 5]],
                                            [[2, 3], [6, 7]]])).all()
-        assert (x.swapaxes(1, 2) == array([[[0, 2], [1, 3]], 
+        assert (x.swapaxes(1, 2) == array([[[0, 2], [1, 3]],
                                            [[4, 6],[5, 7]]])).all()
 
         # more complex shape i.e. (2, 2, 3)
-        x = array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) 
-        assert (x.swapaxes(0, 1) == array([[[1, 2, 3], [7, 8, 9]], 
-                                           [[4, 5, 6], [10, 11, 12]]])).all() 
-        assert (x.swapaxes(0, 2) == array([[[1, 7], [4, 10]], [[2, 8], [5, 11]], 
-                                           [[3, 9], [6, 12]]])).all() 
-        assert (x.swapaxes(1, 2) == array([[[1, 4], [2, 5], [3, 6]], 
-                                           [[7, 10], [8, 11],[9, 12]]])).all() 
+        x = array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
+        assert (x.swapaxes(0, 1) == array([[[1, 2, 3], [7, 8, 9]],
+                                           [[4, 5, 6], [10, 11, 12]]])).all()
+        assert (x.swapaxes(0, 2) == array([[[1, 7], [4, 10]], [[2, 8], [5, 11]],
+                                           [[3, 9], [6, 12]]])).all()
+        assert (x.swapaxes(1, 2) == array([[[1, 4], [2, 5], [3, 6]],
+                                           [[7, 10], [8, 11],[9, 12]]])).all()
 
         # test slice
-        assert (x[0:1,0:2].swapaxes(0,2) == array([[[1], [4]], [[2], [5]], 
+        assert (x[0:1,0:2].swapaxes(0,2) == array([[[1], [4]], [[2], [5]],
                                                    [[3], [6]]])).all()
         # test virtual
-        assert ((x + x).swapaxes(0,1) == array([[[ 2,  4,  6], [14, 16, 18]], 
+        assert ((x + x).swapaxes(0,1) == array([[[ 2,  4,  6], [14, 16, 18]],
                                          [[ 8, 10, 12], [20, 22, 24]]])).all()
         assert array(1).swapaxes(10, 12) == 1
 
         assert (zeros(1)[[]] == []).all()
 
     def test_int_array_index_setitem(self):
-        from numpypy import array, arange, zeros
+        from numpypy import arange, zeros, array
         a = arange(10)
         a[[3, 2, 1, 5]] = zeros(4, dtype=int)
         assert (a == [0, 0, 0, 0, 4, 0, 6, 7, 8, 9]).all()
         assert (b[array([True, False, True])] == [0, 2]).all()
         raises(ValueError, "array([1, 2])[array([True, True, True])]")
         raises(ValueError, "b[array([[True, False], [True, False]])]")
+        a = array([[1,2,3],[4,5,6],[7,8,9]],int)
+        c = array([True,False,True],bool)
+        b = a[c]
+        assert (a[c] == [[1, 2, 3], [7, 8, 9]]).all()
 
     def test_bool_array_index_setitem(self):
         from numpypy import arange, array
         b = arange(5)
         b[array([True, False, True])] = [20, 21, 0, 0, 0, 0, 0]
-        assert (b == [20, 1, 21, 3, 4]).all() 
+        assert (b == [20, 1, 21, 3, 4]).all()
         raises(ValueError, "array([1, 2])[array([True, False, True])] = [1, 2, 3]")
 
     def test_weakref(self):
         b = array([1, 2, 3, 4])
         assert (a == b) == False
 
+    def test__int__(self):
+        from numpypy import array
+        assert int(array(1)) == 1
+        assert int(array([1])) == 1
+        assert raises(TypeError, "int(array([1, 2]))")
+        assert int(array([1.5])) == 1
+
 
 class AppTestMultiDim(BaseNumpyAppTest):
     def test_init(self):
         assert isinstance(i['data'][0], int)
 
     def test_array_indexing_one_elem(self):
-        skip("not yet")
         from numpypy import array, arange
         raises(IndexError, 'arange(3)[array([3.5])]')
         a = arange(3)[array([1])]
         a = arange(10)
         assert (a.compress([True, False, True]) == [0, 2]).all()
         assert (a.compress([1, 0, 13]) == [0, 2]).all()
+        assert (a.compress([1, 0, 13]) == [0, 2]).all()
         assert (a.compress([1, 0, 13.5]) == [0, 2]).all()
         assert (a.compress(array([1, 0, 13.5], dtype='>f4')) == [0, 2]).all()
         assert (a.compress(array([1, 0, 13.5], dtype='<f4')) == [0, 2]).all()
         BaseNumpyAppTest.setup_class.im_func(cls)
         cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4))
         cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3))
-        cls.w_float16val = cls.space.wrap('\x00E') # 5.0 in float16 
+        cls.w_float16val = cls.space.wrap('\x00E') # 5.0 in float16
         cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2))
         cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4))
         cls.w_ulongval = cls.space.wrap(struct.pack('L', 12))
         from numpypy import array, arange
         assert array(2.0).argsort() == 0
         nnp = self.non_native_prefix
-        for dtype in ['int', 'float', 'int16', 'float32', 'uint64', 
+        for dtype in ['int', 'float', 'int16', 'float32', 'uint64',
                         nnp + 'i2', complex]:
             a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype)
             c = a.copy()
             assert (a == c).all() # not modified
             a = arange(100)
             assert (a.argsort() == a).all()
-        raises(NotImplementedError, 'arange(10,dtype="float16").argsort()')    
+        raises(NotImplementedError, 'arange(10,dtype="float16").argsort()')
 
     def test_argsort_nd(self):
         from numpypy import array
 
     def test_argsort_axis(self):
         from numpypy import array
-        a = array([[4, 2], [1, 3]]) 
+        a = array([[4, 2], [1, 3]])
         assert (a.argsort(axis=None) == [2, 1, 3, 0]).all()
         assert (a.argsort(axis=-1) == [[1, 0], [0, 1]]).all()
         assert (a.argsort(axis=0) == [[1, 0], [0, 1]]).all()

File pypy/module/pypyjit/interp_jit.py

                             'lastblock',
                             'is_being_profiled',
                             'w_globals',
+                            'w_f_trace',
                             ]
 
 JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE']
 def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode):
     bytecode.jit_cells[next_instr, is_being_profiled] = newcell
 
-def confirm_enter_jit(next_instr, is_being_profiled, bytecode, frame, ec):
-    return (frame.w_f_trace is None and
-            ec.w_tracefunc is None)
 
 def can_never_inline(next_instr, is_being_profiled, bytecode):
     return False
 pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location,
                               get_jitcell_at = get_jitcell_at,
                               set_jitcell_at = set_jitcell_at,
-                              confirm_enter_jit = confirm_enter_jit,
                               can_never_inline = can_never_inline,
                               should_unroll_one_iteration =
                               should_unroll_one_iteration,

File pypy/module/test_lib_pypy/test_sqlite3.py

         con.commit()
     except _sqlite3.OperationalError:
         pytest.fail("_sqlite3 knew nothing about the implicit ROLLBACK")
+    con.close()
 
 def test_statement_arg_checking():
     con = _sqlite3.connect(':memory:')
     with pytest.raises(ValueError) as e:
         con.execute('insert into foo(x) values (?)', 2)
     assert str(e.value) == 'parameters are of unsupported type'
+    con.close()

File pypy/objspace/std/dictmultiobject.py

         for w_k, w_v in list_pairs_w:
             w_self.setitem(w_k, w_v)
 
+    def setitem(self, w_key, w_value):
+        self.strategy.setitem(self, w_key, w_value)
+
+    def setitem_str(self, key, w_value):
+        self.strategy.setitem_str(self, key, w_value)
+
+
 def _add_indirections():
-    dict_methods = "setitem setitem_str getitem \
+    dict_methods = "getitem \
                     getitem_str delitem length \
                     clear w_keys values \
                     items iterkeys itervalues iteritems setdefault \
     def w_keys(self, w_dict):
         return self.space.newlist(self.unerase(w_dict.dstorage).keys())
 
+    def setitem_str(self, w_dict, s, w_value):
+        self.setitem(w_dict, self.space.wrap(s), w_value)
+
+    def switch_to_object_strategy(self, w_dict):
+        assert 0, "should be unreachable"
+
 create_iterator_classes(ObjectDictStrategy)
 
 class StringDictStrategy(AbstractTypedStrategy, DictStrategy):

File pypy/objspace/std/kwargsdict.py

         return self.space.newlist([self.space.wrap(key) for key in self.unerase(w_dict.dstorage)[0]])
 
     def setitem(self, w_dict, w_key, w_value):
-        space = self.space
         if self.is_correct_type(w_key):
             self.setitem_str(w_dict, self.unwrap(w_key), w_value)
             return

File rpython/annotator/description.py

             result = schedule(graph, inputcells)
             signature = getattr(self.pyobj, '_signature_', None)
             if signature:
-                result = enforce_signature_return(self, signature[1], result)
-                self.bookkeeper.annotator.addpendingblock(graph, graph.returnblock, [result])
+                sigresult = enforce_signature_return(self, signature[1], result)
+                if sigresult is not None:
+                    self.bookkeeper.annotator.addpendingblock(graph, graph.returnblock, [sigresult])
+                    result = sigresult
         # Some specializations may break the invariant of returning
         # annotations that are always more general than the previous time.
         # We restore it here:

File rpython/annotator/signature.py

         inputcells[:] = args_s
 
 def finish_type(paramtype, bookkeeper, func):
-    from rpython.rlib.types import SelfTypeMarker
+    from rpython.rlib.types import SelfTypeMarker, AnyTypeMarker
     if isinstance(paramtype, SomeObject):
         return paramtype
     elif isinstance(paramtype, SelfTypeMarker):
         raise Exception("%r argument declared as annotation.types.self(); class needs decorator rlib.signature.finishsigs()" % (func,))
+    elif isinstance(paramtype, AnyTypeMarker):
+        return None
     else:
         return paramtype(bookkeeper)
 
     assert len(paramtypes) == len(actualtypes)
     params_s = [finish_type(paramtype, funcdesc.bookkeeper, funcdesc.pyobj) for paramtype in paramtypes]
     for i, (s_param, s_actual) in enumerate(zip(params_s, actualtypes)):
+        if s_param is None: # can be anything
+            continue
         if not s_param.contains(s_actual):
             raise Exception("%r argument %d:\n"
                             "expected %s,\n"
                             "     got %s" % (funcdesc, i+1, s_param, s_actual))
-    actualtypes[:] = params_s
+    for i, s_param in enumerate(params_s):
+        if s_param is None:
+            continue
+        actualtypes[i] = s_param
 
 def enforce_signature_return(funcdesc, sigtype, inferredtype):
     s_sigret = finish_type(sigtype, funcdesc.bookkeeper, funcdesc.pyobj)
-    if not s_sigret.contains(inferredtype):
+    if s_sigret is not None and not s_sigret.contains(inferredtype):
         raise Exception("%r return value:\n"
                         "expected %s,\n"
                         "     got %s" % (funcdesc, s_sigret, inferredtype))

File rpython/jit/backend/arm/assembler.py

 from __future__ import with_statement
+
 import os
+
+from rpython.jit.backend.arm import conditions as c, registers as r
+from rpython.jit.backend.arm.arch import (WORD, DOUBLE_WORD, FUNC_ALIGN,
+    JITFRAME_FIXED_SIZE)
+from rpython.jit.backend.arm.codebuilder import ARMv7Builder, OverwritingBuilder
+from rpython.jit.backend.arm.locations import imm, StackLocation
+from rpython.jit.backend.arm.opassembler import ResOpAssembler
+from rpython.jit.backend.arm.regalloc import (Regalloc,
+    CoreRegisterManager, check_imm_arg, VFPRegisterManager,
+    operations as regalloc_operations,
+    operations_with_guard as regalloc_operations_with_guard)
 from rpython.jit.backend.llsupport import jitframe
-from rpython.jit.backend.arm.helper.assembler import saved_registers
-from rpython.jit.backend.arm import conditions as c
-from rpython.jit.backend.arm import registers as r
-from rpython.jit.backend.arm.arch import WORD, DOUBLE_WORD, FUNC_ALIGN, \
-                                    N_REGISTERS_SAVED_BY_MALLOC, \
-                                    JITFRAME_FIXED_SIZE
-from rpython.jit.backend.arm.codebuilder import ARMv7Builder, OverwritingBuilder
-from rpython.jit.backend.arm.locations import get_fp_offset, imm, StackLocation
-from rpython.jit.backend.arm.regalloc import (Regalloc, ARMFrameManager,
-                    CoreRegisterManager, check_imm_arg,
-                    VFPRegisterManager,
-                    operations as regalloc_operations,
-                    operations_with_guard as regalloc_operations_with_guard)
-from rpython.jit.backend.llsupport.assembler import debug_bridge
+from rpython.jit.backend.llsupport.assembler import DEBUG_COUNTER, debug_bridge
 from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper
 from rpython.jit.backend.model import CompiledLoopToken
-from rpython.jit.codewriter import longlong
 from rpython.jit.codewriter.effectinfo import EffectInfo
-from rpython.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT
-from rpython.jit.metainterp.history import BoxInt, ConstInt
-from rpython.jit.metainterp.resoperation import rop, ResOperation
-from rpython.rlib import rgc
-from rpython.rlib.objectmodel import we_are_translated, specialize
+from rpython.jit.metainterp.history import AbstractFailDescr, FLOAT
+from rpython.jit.metainterp.resoperation import rop
+from rpython.rlib.debug import debug_print, debug_start, debug_stop
+from rpython.rlib.jit import AsmInfo
+from rpython.rlib.objectmodel import we_are_translated, specialize, compute_unique_id
+from rpython.rlib.rarithmetic import r_uint
 from rpython.rtyper.annlowlevel import llhelper, cast_instance_to_gcref
-from rpython.rtyper.lltypesystem import lltype, rffi, llmemory
-from rpython.rtyper.lltypesystem.lloperation import llop
-from rpython.jit.backend.arm.opassembler import ResOpAssembler
-from rpython.rlib.debug import (debug_print, debug_start, debug_stop,
-                             have_debug_prints, fatalerror)
-from rpython.rlib.jit import AsmInfo
-from rpython.rlib.objectmodel import compute_unique_id
-from rpython.rlib.rarithmetic import intmask, r_uint
-
-
-DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed),
-                              ('type', lltype.Char),  # 'b'ridge, 'l'abel or
-                                                      # 'e'ntry point
-                              ('number', lltype.Signed))
+from rpython.rtyper.lltypesystem import lltype, rffi
 
 
 class AssemblerARM(ResOpAssembler):
         self.loop_run_counters.append(struct)
         return struct
 
-    def _append_debugging_code(self, operations, tp, number, token):
-        counter = self._register_counter(tp, number, token)
-        c_adr = ConstInt(rffi.cast(lltype.Signed, counter))
-        box = BoxInt()
-        box2 = BoxInt()
-        ops = [ResOperation(rop.GETFIELD_RAW, [c_adr],
-                            box, descr=self.debug_counter_descr),
-               ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2),
-               ResOperation(rop.SETFIELD_RAW, [c_adr, box2],
-                            None, descr=self.debug_counter_descr)]
-        operations.extend(ops)
-
     @specialize.argtype(1)
     def _inject_debugging_code(self, looptoken, operations, tp, number):
         if self._debug:
         mc.gen_load_int(r.r0.value, self.cpu.pos_exception())
         mc.LDR_ri(r.r0.value, r.r0.value)
         mc.TST_rr(r.r0.value, r.r0.value)
-        # restore registers and return 
+        # restore registers and return
         # We check for c.EQ here, meaning all bits zero in this case
         mc.POP([reg.value for reg in r.argument_regs] + [r.pc.value], cond=c.EQ)
         #
             self.mc.LDR_ri(r.lr.value, r.lr.value)             # ldr lr, *lengh
             # calculate ofs
             self.mc.SUB_rr(r.ip.value, r.ip.value, r.sp.value) # SUB ip, current
-            # if ofs 
+            # if ofs
             self.mc.CMP_rr(r.ip.value, r.lr.value)             # CMP ip, lr
             self.mc.BL(self.stack_check_slowpath, c=c.HI)      # call if ip > lr
             #
         self._check_frame_depth(self.mc, self._regalloc.get_gcmap(),
                                 expected_size=expected_size)
 
-
     def _check_frame_depth(self, mc, gcmap, expected_size=-1):
         """ check if the frame is of enough depth to follow this bridge.
         Otherwise reallocate the frame in a helper.
         effectinfo = op.getdescr().get_extra_info()
         oopspecindex = effectinfo.oopspecindex
         asm_llong_operations[oopspecindex](self, op, arglocs, regalloc, fcond)
-        return fcond 
+        return fcond
 
     def regalloc_emit_math(self, op, arglocs, fcond, regalloc):
         effectinfo = op.getdescr().get_extra_info()
             assert 0, 'unsupported case'
 
     def _mov_stack_to_loc(self, prev_loc, loc, cond=c.AL):
-        pushed = False
         if loc.is_reg():
             assert prev_loc.type != FLOAT, 'trying to load from an \
                 incompatible location into a core register'
         self.store_reg(mc, r.ip, r.fp, ofs)
 
 
-
 def not_implemented(msg):
     os.write(2, '[ARM/asm] %s\n' % msg)
     raise NotImplementedError(msg)

File rpython/jit/backend/arm/test/test_calling_convention.py

 from rpython.rtyper.annlowlevel import llhelper
 from rpython.jit.metainterp.history import JitCellToken
-from rpython.jit.backend.test.calling_convention_test import TestCallingConv, parse
+from rpython.jit.backend.test.calling_convention_test import CallingConvTests, parse
 from rpython.rtyper.lltypesystem import lltype
 from rpython.jit.codewriter.effectinfo import EffectInfo
 
 from rpython.jit.backend.arm.test.support import skip_unless_run_slow_tests
 skip_unless_run_slow_tests()
 
-class TestARMCallingConvention(TestCallingConv):
+class TestARMCallingConvention(CallingConvTests):
     # ../../test/calling_convention_test.py
 
     def test_call_argument_spilling(self):

File rpython/jit/backend/llgraph/runner.py

         self.operations = []
         for op in operations:
             if op.getdescr() is not None:
-                if op.is_guard():
+                if op.is_guard() or op.getopnum() == rop.FINISH:
                     newdescr = op.getdescr()
                 else:
                     newdescr = WeakrefDescr(op.getdescr())

File rpython/jit/backend/llsupport/assembler.py

-
+from rpython.jit.backend.llsupport import jitframe
+from rpython.jit.backend.llsupport.memcpy import memcpy_fn
+from rpython.jit.backend.llsupport.symbolic import WORD
+from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken,
+    ConstInt, BoxInt)
+from rpython.jit.metainterp.resoperation import ResOperation, rop
 from rpython.rlib import rgc
+from rpython.rlib.debug import (debug_start, debug_stop, have_debug_prints,
+                                debug_print)
 from rpython.rlib.rarithmetic import r_uint
-from rpython.jit.backend.llsupport.symbolic import WORD
-from rpython.jit.backend.llsupport import jitframe
-from rpython.jit.metainterp.history import INT, REF, FLOAT, JitCellToken
 from rpython.rtyper.annlowlevel import cast_instance_to_gcref
 from rpython.rtyper.lltypesystem import rffi, lltype
-from rpython.jit.backend.llsupport.memcpy import memcpy_fn
-from rpython.rlib.debug import (debug_print, debug_start, debug_stop,
-                                have_debug_prints)
+
+
+DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER',
+    # 'b'ridge, 'l'abel or # 'e'ntry point
+    ('i', lltype.Signed),
+    ('type', lltype.Char),
+    ('number', lltype.Signed)
+)
+
 
 class GuardToken(object):
     def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc,
         #     to incompatibilities in how it's done, we leave it for the
         #     caller to deal with
 
+    def _append_debugging_code(self, operations, tp, number, token):
+        counter = self._register_counter(tp, number, token)
+        c_adr = ConstInt(rffi.cast(lltype.Signed, counter))
+        box = BoxInt()
+        box2 = BoxInt()
+        ops = [ResOperation(rop.GETFIELD_RAW, [c_adr],
+                            box, descr=self.debug_counter_descr),
+               ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2),
+               ResOperation(rop.SETFIELD_RAW, [c_adr, box2],
+                            None, descr=self.debug_counter_descr)]
+        operations.extend(ops)
+
 
 def debug_bridge(descr_number, rawstart, codeendpos):
     debug_start("jit-backend-addr")

File rpython/jit/backend/test/calling_convention_test.py

         raise NotImplementedError
 
     def test_call_aligned_explicit_check(self):
+        if sys.maxint == 2 ** 31 - 1:
+            py.test.skip("libffi on 32bit is broken")
         cpu = self.cpu
         if not cpu.supports_floats:
             py.test.skip('requires floats')

File rpython/jit/backend/x86/assembler.py

+import sys
+import os
 
-import sys, os
 from rpython.jit.backend.llsupport import symbolic, jitframe
 from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler,
-                                                    debug_bridge)
+                                                DEBUG_COUNTER, debug_bridge)
 from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper
 from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
-from rpython.jit.metainterp.history import Const, Box, BoxInt, ConstInt
+from rpython.jit.metainterp.history import Const, Box
 from rpython.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT
 from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory
 from rpython.rtyper.lltypesystem.lloperation import llop
     imm0, imm1, FloatImmedLoc, RawEbpLoc, RawEspLoc)
 from rpython.rlib.objectmodel import we_are_translated, specialize
 from rpython.jit.backend.x86 import rx86, codebuf
-from rpython.jit.metainterp.resoperation import rop, ResOperation
+from rpython.jit.metainterp.resoperation import rop
 from rpython.jit.backend.x86 import support
 from rpython.rlib.debug import debug_print, debug_start, debug_stop
 from rpython.rlib import rgc
 from rpython.rlib.rarithmetic import intmask, r_uint
 from rpython.rlib.objectmodel import compute_unique_id
 
+
 # darwin requires the stack to be 16 bytes aligned on calls. Same for gcc 4.5.0,
 # better safe than sorry
 CALL_ALIGN = 16 // WORD
 
+
 def align_stack_words(words):
     return (words + CALL_ALIGN - 1) & ~(CALL_ALIGN-1)
 
-DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed),
-                              ('type', lltype.Char), # 'b'ridge, 'l'abel or
-                                                     # 'e'ntry point
-                              ('number', lltype.Signed))
 
 class Assembler386(BaseAssembler):
     _regalloc = None
             targettoken._ll_loop_code += rawstart
         self.target_tokens_currently_compiling = None
 
-    def _append_debugging_code(self, operations, tp, number, token):
-        counter = self._register_counter(tp, number, token)
-        c_adr = ConstInt(rffi.cast(lltype.Signed, counter))
-        box = BoxInt()
-        box2 = BoxInt()
-        ops = [ResOperation(rop.GETFIELD_RAW, [c_adr],
-                            box, descr=self.debug_counter_descr),
-               ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2),
-               ResOperation(rop.SETFIELD_RAW, [c_adr, box2],
-                            None, descr=self.debug_counter_descr)]
-        operations.extend(ops)
-
     @specialize.argtype(1)
     def _inject_debugging_code(self, looptoken, operations, tp, number):
         if self._debug:
 
     def _call_assembler_check_descr(self, value, tmploc):
         ofs = self.cpu.get_ofs_of_frame_field('jf_descr')
-        self.mc.CMP_mi((eax.value, ofs), value)
+        self.mc.CMP(mem(eax, ofs), imm(value))
         # patched later
         self.mc.J_il8(rx86.Conditions['E'], 0) # goto B if we get 'done_with_this_frame'
         return self.mc.get_relative_pos()

File rpython/rlib/rmmap.py

-
 from rpython.rtyper.tool import rffi_platform
-from rpython.rtyper.lltypesystem import rffi, lltype, llmemory
+from rpython.rtyper.lltypesystem import rffi, lltype
 from rpython.rlib import rposix
 from rpython.translator.tool.cbuild import ExternalCompilationInfo
 from rpython.rlib.nonconst import NonConstant
 
 _POSIX = os.name == "posix"
 _MS_WINDOWS = os.name == "nt"
-_LINUX = "linux" in sys.platform
 _64BIT = "64bit" in platform.architecture()[0]
-_ARM = platform.machine().startswith('arm')
-_PPC = platform.machine().startswith('ppc')
 _CYGWIN = "cygwin" == sys.platform
 
 class RValueError(Exception):
 if _POSIX:
     includes += ['unistd.h', 'sys/mman.h']
 elif _MS_WINDOWS:
-    includes += ['winsock2.h','windows.h']
+    includes += ['winsock2.h', 'windows.h']
 
 class CConfig:
     _compilation_info_ = ExternalCompilationInfo(
     from rpython.rlib.rwin32 import NULL_HANDLE, INVALID_HANDLE_VALUE
     from rpython.rlib.rwin32 import DWORD, WORD, DWORD_PTR, LPDWORD
     from rpython.rlib.rwin32 import BOOL, LPVOID, LPCSTR, SIZE_T
-    from rpython.rlib.rwin32 import INT, LONG, PLONG
+    from rpython.rlib.rwin32 import LONG, PLONG
 
 # export the constants inside and outside. see __init__.py
 cConfig = rffi_platform.configure(CConfig)
 if _POSIX:
     has_mremap = cConfig['has_mremap']
     c_mmap, c_mmap_safe = external('mmap', [PTR, size_t, rffi.INT, rffi.INT,
-                               rffi.INT, off_t], PTR, macro=True)
+                                   rffi.INT, off_t], PTR, macro=True)
     # 'mmap' on linux32 is a macro that calls 'mmap64'
     _, c_munmap_safe = external('munmap', [PTR, size_t], rffi.INT)
     c_msync, _ = external('msync', [PTR, size_t, rffi.INT], rffi.INT)
 
     # this one is always safe
     _pagesize = rffi_platform.getintegerfunctionresult('getpagesize',
-                                                includes=includes)
+                                                       includes=includes)
     _get_allocation_granularity = _get_page_size = lambda: _pagesize
 
 elif _MS_WINDOWS:
             'SYSINFO_STRUCT',
                 ("wProcessorArchitecture", WORD),
                 ("wReserved", WORD),
-            )
+        )
 
         SYSINFO_UNION = rffi.CStruct(
             'union SYSINFO_UNION',
                 ("dwOemId", DWORD),
                 ("_struct_", SYSINFO_STRUCT),
-            )
+        )
         # sorry, I can't find a way to insert the above
         # because the union field has no name
         SYSTEM_INFO = rffi_platform.Struct(
     VirtualFree = winexternal('VirtualFree',
                               [rffi.VOIDP, rffi.SIZE_T, DWORD], BOOL)
 
-
     def _get_page_size():
         try:
             si = rffi.make(SYSTEM_INFO)
                 # this is not checked
                 return res
             elif _POSIX:
-##                XXX why is this code here?  There is no equivalent in CPython
-##                if _LINUX:
-##                    # alignment of the address
-##                    value = cast(self.data, c_void_p).value
-##                    aligned_value = value & ~(PAGESIZE - 1)
-##                    # the size should be increased too. otherwise the final
-##                    # part is not "msynced"
-##                    new_size = size + value & (PAGESIZE - 1)
                 res = c_msync(start, size, MS_SYNC)
                 if res == -1:
                     errno = rposix.get_errno()
 
         # check boundings
         if (src < 0 or dest < 0 or count < 0 or
-            src + count > self.size or dest + count > self.size):
+                src + count > self.size or dest + count > self.size):
             raise RValueError("source or destination out of range")
 
         datasrc = self.getptr(src)
             SetEndOfFile(self.file_handle)
             # create another mapping object and remap the file view
             res = CreateFileMapping(self.file_handle, NULL, PAGE_READWRITE,
-                                 newsize_high, newsize_low, self.tagname)
+                                    newsize_high, newsize_low, self.tagname)
             self.map_handle = res
 
-            dwErrCode = 0
             if self.map_handle:
                 data = MapViewOfFile(self.map_handle, FILE_MAP_WRITE,
                                      offset_high, offset_low, newsize)
 
         if len(value) != 1:
             raise RValueError("mmap assignment must be "
-                             "single-character string")
+                              "single-character string")
         if index < 0:
             index += self.size
         self.data[index] = value[0]
 
 if _POSIX:
     def mmap(fileno, length, flags=MAP_SHARED,
-        prot=PROT_WRITE | PROT_READ, access=_ACCESS_DEFAULT, offset=0):
+             prot=PROT_WRITE | PROT_READ, access=_ACCESS_DEFAULT, offset=0):
 
         fd = fileno
 
         # check access is not there when flags and prot are there
-        if access != _ACCESS_DEFAULT and ((flags != MAP_SHARED) or\
+        if access != _ACCESS_DEFAULT and ((flags != MAP_SHARED) or
                                           (prot != (PROT_WRITE | PROT_READ))):
             raise RValueError("mmap can't specify both access and flags, prot.")
 
                 pass     # ignore non-seeking files and errors and trust map_size
             else:
                 if not high and low <= sys.maxint:
-                   size = low
-                else:   
+                    size = low
+                else:
                     # not so sure if the signed/unsigned strictness is a good idea:
                     high = rffi.cast(lltype.Unsigned, high)
                     low = rffi.cast(lltype.Unsigned, low)
         case of a sandboxed process
         """
         null = lltype.nullptr(rffi.VOIDP.TO)
-        res = VirtualAlloc(null, map_size, MEM_COMMIT|MEM_RESERVE,
+        res = VirtualAlloc(null, map_size, MEM_COMMIT | MEM_RESERVE,
                            PAGE_EXECUTE_READWRITE)
         if not res:
             raise MemoryError

File rpython/rlib/ropenssl.py

 ssl_external('SSLv23_method', [], SSL_METHOD)
 ssl_external('SSL_CTX_use_PrivateKey_file', [SSL_CTX, rffi.CCHARP, rffi.INT], rffi.INT)
 ssl_external('SSL_CTX_use_certificate_chain_file', [SSL_CTX, rffi.CCHARP], rffi.INT)
-ssl_external('SSL_CTX_get_options', [SSL_CTX], rffi.INT, macro=True)
-ssl_external('SSL_CTX_set_options', [SSL_CTX, rffi.INT], rffi.INT, macro=True)
+ssl_external('SSL_CTX_get_options', [SSL_CTX], rffi.LONG, macro=True)
+ssl_external('SSL_CTX_set_options', [SSL_CTX, rffi.LONG], rffi.LONG, macro=True)
 if HAVE_SSL_CTX_CLEAR_OPTIONS:
-    ssl_external('SSL_CTX_clear_options', [SSL_CTX, rffi.INT], rffi.INT,
+    ssl_external('SSL_CTX_clear_options', [SSL_CTX, rffi.LONG], rffi.LONG,
                  macro=True)
 ssl_external('SSL_CTX_ctrl', [SSL_CTX, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT)
 ssl_external('SSL_CTX_set_verify', [SSL_CTX, rffi.INT, rffi.VOIDP], lltype.Void)

File rpython/rlib/test/test_signature.py

     exc = py.test.raises(Exception, annotate_at, C.incomplete_sig_meth).value
     assert 'incomplete_sig_meth' in repr(exc.args)
     assert 'finishsigs' in repr(exc.args)
+
+def test_any_as_argument():
+    @signature(types.any(), types.int(), returns=types.float())
+    def f(x, y):
+        return x + y
+    @signature(types.int(), returns=types.float())
+    def g(x):
+        return f(x, x)
+    sig = getsig(g)
+    assert sig == [model.SomeInteger(), model.SomeFloat()]
+
+    @signature(types.float(), returns=types.float())
+    def g(x):
+        return f(x, 4)
+    sig = getsig(g)
+    assert sig == [model.SomeFloat(), model.SomeFloat()]
+
+    @signature(types.str(), returns=types.int())
+    def cannot_add_string(x):
+        return f(x, 2)
+    exc = py.test.raises(Exception, annotate_at, cannot_add_string).value
+    assert 'Blocked block' in repr(exc.args)
+
+def test_return_any():
+    @signature(types.int(), returns=types.any())
+    def f(x):
+        return x
+    sig = getsig(f)
+    assert sig == [model.SomeInteger(), model.SomeInteger()]
+
+    @signature(types.str(), returns=types.any())
+    def cannot_add_string(x):
+        return f(3) + x
+    exc = py.test.raises(Exception, annotate_at, cannot_add_string).value
+    assert 'Blocked block' in repr(exc.args)
+    assert 'cannot_add_string' in repr(exc.args)

File rpython/rlib/types.py

 
 def self():
     return SelfTypeMarker()
+
+
+class AnyTypeMarker(object):
+    pass
+
+def any():
+    return AnyTypeMarker()

File rpython/rtyper/lltypesystem/ll2ctypes.py

 
 _POSIX = os.name == "posix"
 _MS_WINDOWS = os.name == "nt"
-_LINUX = "linux" in sys.platform
 _64BIT = "64bit" in host_platform.architecture()[0]
 
 
         return ctype()
 
 def do_allocation_in_far_regions():
-    """On 32 bits: this reserves 1.25GB of address space, or 2.5GB on Linux,
+    """On 32 bits: this reserves 1.25GB of address space, or 2.5GB on POSIX,
        which helps test this module for address values that are signed or
        unsigned.
 
         if _64BIT:
             PIECESIZE = 0x80000000
         else:
-            if _LINUX:
+            if _POSIX:
                 PIECESIZE = 0x10000000
             else:
                 PIECESIZE = 0x08000000
         PIECES = 10
         flags = (0,)
-        if _LINUX:
+        if _POSIX:
             flags = (rmmap.MAP_PRIVATE|rmmap.MAP_ANONYMOUS|rmmap.MAP_NORESERVE,
                      rmmap.PROT_READ|rmmap.PROT_WRITE)
-        if _MS_WINDOWS:
+        elif _MS_WINDOWS:
             flags = (rmmap.MEM_RESERVE,)
             # XXX seems not to work
+        else:
+            assert False  # should always generate flags
         m = rmmap.mmap(-1, PIECES * PIECESIZE, *flags)
         m.close = lambda : None    # leak instead of giving a spurious
                                    # error at CPython's shutdown