Commits

wlav committed c4081e8 Merge

merge default into branch

  • Participants
  • Parent commits 58b4496, 0b33ee5
  • Branches reflex-support

Comments (0)

Files changed (112)

File lib-python/2.7/test/test_csv.py

     """
     def _test_arg_valid(self, ctor, arg):
         self.assertRaises(TypeError, ctor)
-        self.assertRaises(TypeError, ctor, None)
+        # PyPy gets an AttributeError instead of a TypeError
+        self.assertRaises((TypeError, AttributeError), ctor, None)
         self.assertRaises(TypeError, ctor, arg, bad_attr = 0)
         self.assertRaises(TypeError, ctor, arg, delimiter = 0)
         self.assertRaises(TypeError, ctor, arg, delimiter = 'XX')
         self.assertRaises((TypeError, AttributeError), setattr, obj.dialect,
                           'delimiter', ':')
         self.assertRaises(AttributeError, delattr, obj.dialect, 'quoting')
-        self.assertRaises(AttributeError, setattr, obj.dialect,
+        # PyPy gets a TypeError instead of an AttributeError
+        self.assertRaises((AttributeError, TypeError), setattr, obj.dialect,
                           'quoting', None)
 
     def test_reader_attrs(self):
             os.unlink(name)
 
     def test_write_arg_valid(self):
-        self.assertRaises(csv.Error, self._write_test, None, '')
+        # PyPy gets a TypeError instead of a csv.Error for "not a sequence"
+        self.assertRaises((csv.Error, TypeError), self._write_test, None, '')
         self._write_test((), '')
         self._write_test([None], '""')
         self.assertRaises(csv.Error, self._write_test,

File lib-python/conftest.py

     RegrTest('test_cpickle.py', core=True),
     RegrTest('test_cprofile.py'), 
     RegrTest('test_crypt.py', usemodules='crypt', skip=skip_win32),
-    RegrTest('test_csv.py'),
+    RegrTest('test_csv.py', usemodules='_csv'),
 
     RegrTest('test_curses.py', skip="unsupported extension module"),
     RegrTest('test_datetime.py'),

File lib_pypy/_csv.py

                             (self.dialect.delimiter, self.dialect.quotechar))
 
         elif self.state == self.EAT_CRNL:
-            if c in '\r\n':
-                pass
-            else:
+            if c not in '\r\n':
                 raise Error("new-line character seen in unquoted field - "
                             "do you need to open the file "
                             "in universal-newline mode?")

File lib_pypy/_ctypes/pointer.py

         addr = self._buffer[0]
         if addr == 0:
             raise ValueError("NULL pointer access")
-        return self._type_.from_address(addr)
+        instance = self._type_.from_address(addr)
+        instance.__dict__['_base'] = self
+        return instance
 
     def setcontents(self, value):
         if not isinstance(value, self._type_):

File lib_pypy/numpypy/core/_methods.py

 # Array methods which are called by the both the C-code for the method
 # and the Python code for the NumPy-namespace function
 
+#from numpy.core import multiarray as mu
+#from numpy.core import umath as um
 import _numpypy as mu
 um = mu
-#from numpypy.core import umath as um
-from numpypy.core.numeric import asanyarray
+from numpy.core.numeric import asanyarray
 
-def _amax(a, axis=None, out=None, skipna=False, keepdims=False):
+def _amax(a, axis=None, out=None, keepdims=False):
     return um.maximum.reduce(a, axis=axis,
-                            out=out, skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
 
-def _amin(a, axis=None, out=None, skipna=False, keepdims=False):
+def _amin(a, axis=None, out=None, keepdims=False):
     return um.minimum.reduce(a, axis=axis,
-                            out=out, skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
 
-def _sum(a, axis=None, dtype=None, out=None, skipna=False, keepdims=False):
+def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
     return um.add.reduce(a, axis=axis, dtype=dtype,
-                            out=out, skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
 
-def _prod(a, axis=None, dtype=None, out=None, skipna=False, keepdims=False):
+def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
     return um.multiply.reduce(a, axis=axis, dtype=dtype,
-                            out=out, skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
 
-def _mean(a, axis=None, dtype=None, out=None, skipna=False, keepdims=False):
+def _any(a, axis=None, dtype=None, out=None, keepdims=False):
+    return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out,
+                                keepdims=keepdims)
+
+def _all(a, axis=None, dtype=None, out=None, keepdims=False):
+    return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out,
+                                 keepdims=keepdims)
+
+def _count_reduce_items(arr, axis):
+    if axis is None:
+        axis = tuple(xrange(arr.ndim))
+    if not isinstance(axis, tuple):
+        axis = (axis,)
+    items = 1
+    for ax in axis:
+        items *= arr.shape[ax]
+    return items
+
+def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
     arr = asanyarray(a)
 
     # Upgrade bool, unsigned int, and int to float64
     if dtype is None and arr.dtype.kind in ['b','u','i']:
         ret = um.add.reduce(arr, axis=axis, dtype='f8',
-                            out=out, skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
     else:
         ret = um.add.reduce(arr, axis=axis, dtype=dtype,
-                            out=out, skipna=skipna, keepdims=keepdims)
-    rcount = mu.count_reduce_items(arr, axis=axis,
-                            skipna=skipna, keepdims=keepdims)
+                            out=out, keepdims=keepdims)
+    rcount = _count_reduce_items(arr, axis)
     if isinstance(ret, mu.ndarray):
         ret = um.true_divide(ret, rcount,
-                        casting='unsafe', subok=False)
+                        out=ret, casting='unsafe', subok=False)
     else:
         ret = ret / float(rcount)
     return ret
 
 def _var(a, axis=None, dtype=None, out=None, ddof=0,
-                            skipna=False, keepdims=False):
+                            keepdims=False):
     arr = asanyarray(a)
 
     # First compute the mean, saving 'rcount' for reuse later
     if dtype is None and arr.dtype.kind in ['b','u','i']:
-        arrmean = um.add.reduce(arr, axis=axis, dtype='f8',
-                            skipna=skipna, keepdims=True)
+        arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True)
     else:
-        arrmean = um.add.reduce(arr, axis=axis, dtype=dtype,
-                            skipna=skipna, keepdims=True)
-    rcount = mu.count_reduce_items(arr, axis=axis,
-                            skipna=skipna, keepdims=True)
+        arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True)
+    rcount = _count_reduce_items(arr, axis)
     if isinstance(arrmean, mu.ndarray):
         arrmean = um.true_divide(arrmean, rcount,
-                                  casting='unsafe', subok=False)
+                            out=arrmean, casting='unsafe', subok=False)
     else:
         arrmean = arrmean / float(rcount)
 
 
     # (arr - arrmean) ** 2
     if arr.dtype.kind == 'c':
-        x = um.multiply(x, um.conjugate(x)).real
+        x = um.multiply(x, um.conjugate(x), out=x).real
     else:
-        x = um.multiply(x, x)
+        x = um.multiply(x, x, out=x)
 
     # add.reduce((arr - arrmean) ** 2, axis)
-    ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out,
-                        skipna=skipna, keepdims=keepdims)
+    ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
 
     # add.reduce((arr - arrmean) ** 2, axis) / (n - ddof)
     if not keepdims and isinstance(rcount, mu.ndarray):
     rcount -= ddof
     if isinstance(ret, mu.ndarray):
         ret = um.true_divide(ret, rcount,
-                        casting='unsafe', subok=False)
+                        out=ret, casting='unsafe', subok=False)
     else:
         ret = ret / float(rcount)
 
     return ret
 
-def _std(a, axis=None, dtype=None, out=None, ddof=0,
-                            skipna=False, keepdims=False):
+def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
     ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
-                                skipna=skipna, keepdims=keepdims)
+               keepdims=keepdims)
 
     if isinstance(ret, mu.ndarray):
-        ret = um.sqrt(ret)
+        ret = um.sqrt(ret, out=ret)
     else:
         ret = um.sqrt(ret)
 

File lib_pypy/numpypy/core/arrayprint.py

 
 import sys
 import _numpypy as _nt
-from _numpypy import maximum, minimum, absolute, not_equal, isinf, isnan, isna
+from _numpypy import maximum, minimum, absolute, not_equal, isnan, isinf
 #from _numpypy import format_longfloat, datetime_as_string, datetime_data
-from .fromnumeric import ravel
+from fromnumeric import ravel
 
 
 def product(x, y): return x*y
 _line_width = 75
 _nan_str = 'nan'
 _inf_str = 'inf'
-_na_str = 'NA'
 _formatter = None  # formatting function for array elements
 
 if sys.version_info[0] >= 3:
 
 def set_printoptions(precision=None, threshold=None, edgeitems=None,
                      linewidth=None, suppress=None,
-                     nanstr=None, infstr=None, nastr=None,
+                     nanstr=None, infstr=None,
                      formatter=None):
     """
     Set printing options.
         String representation of floating point not-a-number (default nan).
     infstr : str, optional
         String representation of floating point infinity (default inf).
-    nastr : str, optional
-        String representation of NA missing value (default NA).
     formatter : dict of callables, optional
         If not None, the keys should indicate the type(s) that the respective
         formatting function applies to.  Callables should return a string.
 
     global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \
            _line_width, _float_output_suppress_small, _nan_str, _inf_str, \
-           _na_str, _formatter
+           _formatter
     if linewidth is not None:
         _line_width = linewidth
     if threshold is not None:
         _nan_str = nanstr
     if infstr is not None:
         _inf_str = infstr
-    if nastr is not None:
-        _na_str = nastr
     _formatter = formatter
 
 def get_printoptions():
              suppress=_float_output_suppress_small,
              nanstr=_nan_str,
              infstr=_inf_str,
-             nastr=_na_str,
              formatter=_formatter)
     return d
 
     return b
 
 def _boolFormatter(x):
-    if isna(x):
-        return str(x).replace('NA', _na_str, 1)
-    elif x:
+    if x:
         return ' True'
     else:
         return 'False'
 
 
 def repr_format(x):
-    if isna(x):
-        return str(x).replace('NA', _na_str, 1)
-    else:
-        return repr(x)
+    return repr(x)
 
 def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
                   prefix="", formatter=None):
                   #'complexfloat' : ComplexFormat(data, precision,
                   #                               suppress_small),
                   #'longcomplexfloat' : LongComplexFormat(precision),
-                  #'datetime' : DatetimeFormat(data),
-                  #'timedelta' : TimedeltaFormat(data),
+                  'datetime' : DatetimeFormat(data),
+                  'timedelta' : TimedeltaFormat(data),
                   'numpystr' : repr_format,
                   'str' : str}
 
             #    format_function = formatdict['longfloat']
             #else:
             format_function = formatdict['float']
-        elif issubclass(dtypeobj, _nt.complexfloating):
-            if issubclass(dtypeobj, _nt.clongfloat):
-                format_function = formatdict['longcomplexfloat']
-            else:
-                format_function = formatdict['complexfloat']
+        #elif issubclass(dtypeobj, _nt.complexfloating):
+        #    if issubclass(dtypeobj, _nt.clongfloat):
+        #        format_function = formatdict['longcomplexfloat']
+        #    else:
+        #        format_function = formatdict['complexfloat']
         elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
             format_function = formatdict['numpystr']
         elif issubclass(dtypeobj, _nt.datetime64):
 
     if a.shape == ():
         x = a.item()
-        if isna(x):
-            lst = str(x).replace('NA', _na_str, 1)
-        else:
-            try:
-                lst = a._format(x)
-                msg = "The `_format` attribute is deprecated in Numpy " \
-                      "2.0 and will be removed in 2.1. Use the " \
-                      "`formatter` kw instead."
-                import warnings
-                warnings.warn(msg, DeprecationWarning)
-            except AttributeError:
-                if isinstance(x, tuple):
-                    x = _convert_arrays(x)
-                lst = style(x)
+        try:
+            lst = a._format(x)
+            msg = "The `_format` attribute is deprecated in Numpy " \
+                  "2.0 and will be removed in 2.1. Use the " \
+                  "`formatter` kw instead."
+            import warnings
+            warnings.warn(msg, DeprecationWarning)
+        except AttributeError:
+            if isinstance(x, tuple):
+                x = _convert_arrays(x)
+            lst = style(x)
     elif reduce(product, a.shape) == 0:
         # treat as a null array if any of shape elements == 0
         lst = "[]"
         self.exp_format = False
         self.large_exponent = False
         self.max_str_len = 0
-        #try:
-        self.fillFormat(data)
-        #except (TypeError, NotImplementedError):
+        try:
+            self.fillFormat(data)
+        except (TypeError, NotImplementedError):
             # if reduce(data) fails, this instance will not be called, just
             # instantiated in formatdict.
-            #pass
+            pass
 
     def fillFormat(self, data):
         import numeric as _nc
-        # XXX pypy unimplemented
-        #errstate = _nc.seterr(all='ignore')
+        errstate = _nc.seterr(all='ignore')
         try:
-            special = isnan(data) | isinf(data) | isna(data)
-            special[isna(data)] = False
+            special = isnan(data) | isinf(data)
             valid = not_equal(data, 0) & ~special
-            valid[isna(data)] = False
             non_zero = absolute(data.compress(valid))
             if len(non_zero) == 0:
                 max_val = 0.
                 min_val = 0.
             else:
-                max_val = maximum.reduce(non_zero, skipna=True)
-                min_val = minimum.reduce(non_zero, skipna=True)
+                max_val = maximum.reduce(non_zero)
+                min_val = minimum.reduce(non_zero)
                 if max_val >= 1.e8:
                     self.exp_format = True
                 if not self.suppress_small and (min_val < 0.0001
                                            or max_val/min_val > 1000.):
                     self.exp_format = True
         finally:
-            pass
-            # XXX pypy unimplemented
-            #_nc.seterr(**errstate)
+            _nc.seterr(**errstate)
 
         if self.exp_format:
             self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
                 precision = 0
             precision = min(self.precision, precision)
             self.max_str_len = len(str(int(max_val))) + precision + 2
-            if special.any():
+            if _nc.any(special):
                 self.max_str_len = max(self.max_str_len,
                                        len(_nan_str),
-                                       len(_inf_str)+1,
-                                       len(_na_str))
+                                       len(_inf_str)+1)
             if self.sign:
                 format = '%#+'
             else:
 
     def __call__(self, x, strip_zeros=True):
         import numeric as _nc
-        #err = _nc.seterr(invalid='ignore')
+        err = _nc.seterr(invalid='ignore')
         try:
-            if isna(x):
-                return self.special_fmt % (str(x).replace('NA', _na_str, 1),)
-            elif isnan(x):
+            if isnan(x):
                 if self.sign:
                     return self.special_fmt % ('+' + _nan_str,)
                 else:
                 else:
                     return self.special_fmt % ('-' + _inf_str,)
         finally:
-            pass
-            #_nc.seterr(**err)
+            _nc.seterr(**err)
 
         s = self.format % x
         if self.large_exponent:
 class IntegerFormat(object):
     def __init__(self, data):
         try:
-            max_str_len = max(len(str(maximum.reduce(data, skipna=True))),
-                              len(str(minimum.reduce(data, skipna=True))))
+            max_str_len = max(len(str(maximum.reduce(data))),
+                              len(str(minimum.reduce(data))))
             self.format = '%' + str(max_str_len) + 'd'
-        except TypeError, NotImplementedError:
+        except (TypeError, NotImplementedError):
             # if reduce(data) fails, this instance will not be called, just
             # instantiated in formatdict.
             pass
             pass
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
-        elif _MININT < x < _MAXINT:
+        if _MININT < x < _MAXINT:
             return self.format % x
         else:
             return "%s" % x
         self.sign = sign
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
-        elif isnan(x):
+        if isnan(x):
             if self.sign:
                 return '+' + _nan_str
             else:
         self.imag_format = LongFloatFormat(precision, sign=True)
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
-        else:
-            r = self.real_format(x.real)
-            i = self.imag_format(x.imag)
-            return r + i + 'j'
+        r = self.real_format(x.real)
+        i = self.imag_format(x.imag)
+        return r + i + 'j'
 
 
 class ComplexFormat(object):
                                        sign=True)
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
+        r = self.real_format(x.real, strip_zeros=False)
+        i = self.imag_format(x.imag, strip_zeros=False)
+        if not self.imag_format.exp_format:
+            z = i.rstrip('0')
+            i = z + 'j' + ' '*(len(i)-len(z))
         else:
-            r = self.real_format(x.real, strip_zeros=False)
-            i = self.imag_format(x.imag, strip_zeros=False)
-            if not self.imag_format.exp_format:
-                z = i.rstrip('0')
-                i = z + 'j' + ' '*(len(i)-len(z))
-            else:
-                i = i + 'j'
-            return r + i
+            i = i + 'j'
+        return r + i
 
 class DatetimeFormat(object):
     def __init__(self, x, unit=None,
         self.casting = casting
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
-        else:
-            return "'%s'" % datetime_as_string(x,
-                                        unit=self.unit,
-                                        timezone=self.timezone,
-                                        casting=self.casting)
+        return "'%s'" % datetime_as_string(x,
+                                    unit=self.unit,
+                                    timezone=self.timezone,
+                                    casting=self.casting)
 
 class TimedeltaFormat(object):
     def __init__(self, data):
             self.format = '%' + str(max_str_len) + 'd'
 
     def __call__(self, x):
-        if isna(x):
-            return str(x).replace('NA', _na_str, 1)
-        else:
-            return self.format % x.astype('i8')
+        return self.format % x.astype('i8')
 

File lib_pypy/numpypy/core/numeric.py

 
 from _numpypy import array, ndarray, int_, float_, bool_ #, complex_# , longlong
 from _numpypy import concatenate
+from .fromnumeric import any
 import math
 import sys
 import _numpypy as multiarray # ARGH
 
 newaxis = None
 
-def asanyarray(a, dtype=None, order=None, maskna=None, ownmaskna=False):
+# XXX this file to be reviewed
+def seterr(**args):
+    return args
+
+def asanyarray(a, dtype=None, order=None):
     """
     Convert the input to an ndarray, but pass ndarray subclasses through.
 
     order : {'C', 'F'}, optional
         Whether to use row-major ('C') or column-major ('F') memory
         representation.  Defaults to 'C'.
-   maskna : bool or None, optional
-        If this is set to True, it forces the array to have an NA mask.
-        If this is set to False, it forces the array to not have an NA
-        mask.
-    ownmaskna : bool, optional
-        If this is set to True, forces the array to have a mask which
-        it owns.
 
     Returns
     -------
     True
 
     """
-    return array(a, dtype, copy=False, order=order, subok=True,
-                                maskna=maskna, ownmaskna=ownmaskna)
+    return array(a, dtype, copy=False, order=order, subok=True)
 
 def base_repr(number, base=2, padding=0):
     """
         return False
     return bool((a1 == a2).all())
 
-def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False):
+def asarray(a, dtype=None, order=None):
     """
     Convert the input to an array.
 
     order : {'C', 'F'}, optional
         Whether to use row-major ('C') or column-major ('F' for FORTRAN)
         memory representation.  Defaults to 'C'.
-   maskna : bool or None, optional
-        If this is set to True, it forces the array to have an NA mask.
-        If this is set to False, it forces the array to not have an NA
-        mask.
-    ownmaskna : bool, optional
-        If this is set to True, forces the array to have a mask which
-        it owns.
 
     Returns
     -------
     True
 
     """
-    return array(a, dtype, copy=False, order=order,
-                            maskna=maskna, ownmaskna=ownmaskna)
+    return array(a, dtype, copy=False, order=order)
 
 set_string_function(array_str, 0)
 set_string_function(array_repr, 1)

File pypy/config/pypyoption.py

      "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array",
      "_bisect", "binascii", "_multiprocessing", '_warnings',
      "_collections", "_multibytecodec", "micronumpy", "_ffi",
-     "_continuation", "_cffi_backend"]
+     "_continuation", "_cffi_backend", "_csv"]
 ))
 
 translation_modules = default_modules.copy()

File pypy/doc/jit/pyjitpl5.rst

 
 A *virtual* value is an array, struct, or RPython level instance that is created
 during the loop and does not escape from it via calls or longevity past the
-loop.  Since it is only used by the JIT, it be "optimized out"; the value
+loop.  Since it is only used by the JIT, it can be "optimized out"; the value
 doesn't have to be allocated at all and its fields can be stored as first class
 values instead of deferencing them in memory.  Virtuals allow temporary objects
 in the interpreter to be unwrapped.  For example, a W_IntObject in the PyPy can

File pypy/doc/project-ideas.rst

 -------------------------
 
 PyPy's implementation of the Python ``long`` type is slower than CPython's.
-Find out why and optimize them.
+Find out why and optimize them.  **UPDATE:** this was done (thanks stian).
 
 Make bytearray type fast
 ------------------------
 
 * A concurrent garbage collector (a lot of work)
 
-STM, a.k.a. "remove the GIL"
-----------------------------
+STM (Software Transactional Memory)
+-----------------------------------
 
-Removing the GIL --- or more precisely, a GIL-less thread-less solution ---
-is `now work in progress.`__  Contributions welcome.
+This is work in progress.  Besides the main development path, whose goal is
+to make a (relatively fast) version of pypy which includes STM, there are
+independent topics that can already be experimented with on the existing,
+JIT-less pypy-stm version:
+  
+* What kind of conflicts do we get in real use cases?  And, sometimes,
+  which data structures would be more appropriate?  For example, a dict
+  implemented as a hash table will suffer "stm collisions" in all threads
+  whenever one thread writes anything to it; but there could be other
+  implementations.
 
-.. __: http://pypy.org/tmdonate.html
+* More generally, there is the idea that we would need some kind of
+  "debugger"-like tool to "debug" things that are not bugs, but stm
+  conflicts.  How would this tool look like to the end Python
+  programmers?  Like a profiler?  Or like a debugger with breakpoints
+  on aborted transactions?
+
+* Find good ways to have libraries using internally threads and atomics,
+  but not exposing threads to the user.  Right now there is a rough draft
+  in ``lib_pypy/transaction.py``, but much better is possible.  For example
+  we could probably have an iterator-like concept that allows each loop
+  iteration to run in parallel.
+
 
 Introduce new benchmarks
 ------------------------

File pypy/doc/whatsnew-head.rst

 
 .. branch: iterator-in-rpython
 .. branch: numpypy_count_nonzero
+.. branch: numpy-refactor
+Remove numpy lazy evaluation and simplify everything
+.. branch: numpy-fancy-indexing
+Support for array[array-of-ints] in numpy
 .. branch: even-more-jit-hooks
 Implement better JIT hooks
 .. branch: virtual-arguments

File pypy/interpreter/astcompiler/assemble.py

         self.marked = False
         self.have_return = False
 
-    def _post_order(self, blocks):
-        if self.marked:
-            return
-        self.marked = True
-        if self.next_block is not None:
-            self.next_block._post_order(blocks)
-        for instr in self.instructions:
-            if instr.has_jump:
-                instr.jump[0]._post_order(blocks)
-        blocks.append(self)
-        self.marked = True
+    def _post_order_see(self, stack, nextblock):
+        if nextblock.marked == 0:
+            nextblock.marked = 1
+            stack.append(nextblock)
 
     def post_order(self):
-        """Return this block and its children in post order."""
-        blocks = []
-        self._post_order(blocks)
-        blocks.reverse()
-        return blocks
+        """Return this block and its children in post order.
+        This means that the graph of blocks is first cleaned up to
+        ignore back-edges, thus turning it into a DAG.  Then the DAG
+        is linearized.  For example:
+
+                   A --> B -\           =>     [A, D, B, C]
+                     \-> D ---> C
+        """
+        resultblocks = []
+        stack = [self]
+        self.marked = 1
+        while stack:
+            current = stack[-1]
+            if current.marked == 1:
+                current.marked = 2
+                if current.next_block is not None:
+                    self._post_order_see(stack, current.next_block)
+            else:
+                i = current.marked - 2
+                assert i >= 0
+                while i < len(current.instructions):
+                    instr = current.instructions[i]
+                    i += 1
+                    if instr.has_jump:
+                        current.marked = i + 2
+                        self._post_order_see(stack, instr.jump[0])
+                        break
+                else:
+                    resultblocks.append(current)
+                    stack.pop()
+        resultblocks.reverse()
+        return resultblocks
 
     def code_size(self):
         """Return the encoded size of all the instructions in this block."""
     def _stacksize(self, blocks):
         """Compute co_stacksize."""
         for block in blocks:
-            block.marked = False
-            block.initial_depth = -1000
-        return self._recursive_stack_depth_walk(blocks[0], 0, 0)
+            block.initial_depth = 0
+        # Assumes that it is sufficient to walk the blocks in 'post-order'.
+        # This means we ignore all back-edges, but apart from that, we only
+        # look into a block when all the previous blocks have been done.
+        self._max_depth = 0
+        for block in blocks:
+            self._do_stack_depth_walk(block)
+        return self._max_depth
 
-    def _recursive_stack_depth_walk(self, block, depth, max_depth):
-        if block.marked or block.initial_depth >= depth:
-            return max_depth
-        block.marked = True
-        block.initial_depth = depth
+    def _next_stack_depth_walk(self, nextblock, depth):
+        if depth > nextblock.initial_depth:
+            nextblock.initial_depth = depth
+
+    def _do_stack_depth_walk(self, block):
+        depth = block.initial_depth
         done = False
         for instr in block.instructions:
             depth += _opcode_stack_effect(instr.opcode, instr.arg)
-            if depth >= max_depth:
-                max_depth = depth
+            if depth >= self._max_depth:
+                self._max_depth = depth
             if instr.has_jump:
                 target_depth = depth
                 jump_op = instr.opcode
                       jump_op == ops.SETUP_EXCEPT or
                       jump_op == ops.SETUP_WITH):
                     target_depth += 3
-                    if target_depth > max_depth:
-                        max_depth = target_depth
-                max_depth = self._recursive_stack_depth_walk(instr.jump[0],
-                                                             target_depth,
-                                                             max_depth)
+                    if target_depth > self._max_depth:
+                        self._max_depth = target_depth
+                self._next_stack_depth_walk(instr.jump[0], target_depth)
                 if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD:
                     # Nothing more can occur.
                     done = True
                     break
         if block.next_block and not done:
-            max_depth = self._recursive_stack_depth_walk(block.next_block,
-                                                         depth, max_depth)
-        block.marked = False
-        return max_depth
+            max_depth = self._next_stack_depth_walk(block.next_block, depth)
 
     def _build_lnotab(self, blocks):
         """Build the line number table for tracebacks and tracing."""

File pypy/interpreter/astcompiler/test/test_compiler.py

             raise AssertionError("attribute not removed")"""
         yield self.st, test, "X.__name__", "X"
 
+    def test_lots_of_loops(self):
+        source = "for x in y: pass\n" * 1000
+        compile_with_astcompiler(source, 'exec', self.space)
+
 
 class AppTestCompiler:
 

File pypy/interpreter/baseobjspace.py

                 w_exc = self.getitem(w_dic, w_name)
                 exc_types_w[name] = w_exc
                 setattr(self, "w_" + excname, w_exc)
-        # Make a prebuilt recursion error
-        w_msg = self.wrap("maximum recursion depth exceeded")
-        self.prebuilt_recursion_error = OperationError(self.w_RuntimeError,
-                                                       w_msg)
         return exc_types_w
 
     def install_mixedmodule(self, mixedname, installed_builtin_modules):
         return isinstance(obj, RequiredClass)
 
     def unpackiterable(self, w_iterable, expected_length=-1):
-        """Unpack an iterable object into a real (interpreter-level) list.
+        """Unpack an iterable into a real (interpreter-level) list.
+
         Raise an OperationError(w_ValueError) if the length is wrong."""
         w_iterator = self.iter(w_iterable)
         if expected_length == -1:
     def iteriterable(self, w_iterable):
         return W_InterpIterable(self, w_iterable)
 
-    @jit.dont_look_inside
     def _unpackiterable_unknown_length(self, w_iterator, w_iterable):
-        # Unpack a variable-size list of unknown length.
-        # The JIT does not look inside this function because it
-        # contains a loop (made explicit with the decorator above).
-        #
+        """Unpack an iterable of unknown length into an interp-level
+        list.
+        """
         # If we can guess the expected length we can preallocate.
         try:
             lgt_estimate = self.len_w(w_iterable)

File pypy/interpreter/error.py

     _application_traceback = None
 
     def __init__(self, w_type, w_value, tb=None):
-        if not we_are_translated() and w_type is None:
-            from pypy.tool.error import FlowingError
-            raise FlowingError(w_value)
+        assert w_type is not None
         self.setup(w_type)
         self._w_value = w_value
         self._application_traceback = tb
                 self.xstrings = strings
                 for i, attr in entries:
                     setattr(self, attr, args[i])
-                if not we_are_translated() and w_type is None:
-                    from pypy.tool.error import FlowingError
-                    raise FlowingError(self._compute_value())
+                assert w_type is not None
             def _compute_value(self):
                 lst = [None] * (len(formats) + len(formats) + 1)
                 for i, attr in entries:
         return OperationError(exc, w_error)
 
 def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError',
-                  w_exception_class=None): 
+                  w_exception_class=None):
     assert isinstance(e, OSError)
 
     if _WINDOWS and isinstance(e, WindowsError):

File pypy/interpreter/executioncontext.py

             self._trace(frame, 'exception', None, operationerr)
         #operationerr.print_detailed_traceback(self.space)
 
-    def _convert_exc(self, operr):
-        # Only for the flow object space
-        return operr
-
     def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!!
         """Implements sys.exc_info().
         Return an OperationError instance or None."""
         frame = self.gettopframe_nohidden()
         while frame:
             if frame.last_exception is not None:
-                return self._convert_exc(frame.last_exception)
+                return frame.last_exception
             frame = self.getnextframe_nohidden(frame)
         return None
 

File pypy/interpreter/gateway.py

             raise OperationError(space.w_MemoryError, space.w_None)
         except rstackovf.StackOverflow, e:
             rstackovf.check_stack_overflow()
-            raise space.prebuilt_recursion_error
+            raise OperationError(space.w_RuntimeError,
+                                space.wrap("maximum recursion depth exceeded"))
         except RuntimeError:   # not on top of py.py
             raise OperationError(space.w_RuntimeError, space.w_None)
 

File pypy/interpreter/pyopcode.py

             next_instr = self.dispatch_bytecode(co_code, next_instr, ec)
         except OperationError, operr:
             next_instr = self.handle_operation_error(ec, operr)
-        except Reraise:
-            operr = self.last_exception
-            next_instr = self.handle_operation_error(ec, operr,
-                                                     attach_tb=False)
         except RaiseWithExplicitTraceback, e:
             next_instr = self.handle_operation_error(ec, e.operr,
                                                      attach_tb=False)
             next_instr = self.handle_asynchronous_error(ec,
                 self.space.w_MemoryError)
         except rstackovf.StackOverflow, e:
+            # Note that this case catches AttributeError!
             rstackovf.check_stack_overflow()
-            w_err = self.space.prebuilt_recursion_error
-            next_instr = self.handle_operation_error(ec, w_err)
+            next_instr = self.handle_asynchronous_error(ec,
+                self.space.w_RuntimeError,
+                self.space.wrap("maximum recursion depth exceeded"))
         return next_instr
 
     def handle_asynchronous_error(self, ec, w_type, w_value=None):
             ec = self.space.getexecutioncontext()
             while frame:
                 if frame.last_exception is not None:
-                    operror = ec._convert_exc(frame.last_exception)
+                    operror = frame.last_exception
                     break
                 frame = frame.f_backref()
             else:
                     space.wrap("raise: no active exception to re-raise"))
             # re-raise, no new traceback obj will be attached
             self.last_exception = operror
-            raise Reraise
+            raise RaiseWithExplicitTraceback(operror)
 
         w_value = w_traceback = space.w_None
         if nbargs >= 3:
                       isinstance(unroller, SApplicationException))
         if is_app_exc:
             operr = unroller.operr
+            self.last_exception = operr
             w_traceback = self.space.wrap(operr.get_traceback())
             w_suppress = self.call_contextmanager_exit_function(
                 w_exitfunc,
 class Yield(ExitFrame):
     """Raised when exiting a frame via a 'yield' statement."""
 
-class Reraise(Exception):
-    """Raised at interp-level by a bare 'raise' statement."""
 class RaiseWithExplicitTraceback(Exception):
-    """Raised at interp-level by a 3-arguments 'raise' statement."""
+    """Raised at interp-level by a 0- or 3-arguments 'raise' statement."""
     def __init__(self, operr):
         self.operr = operr
 
     def nomoreblocks(self):
         raise RaiseWithExplicitTraceback(self.operr)
 
-    def state_unpack_variables(self, space):
-        return [self.operr.w_type, self.operr.get_w_value(space)]
-    def state_pack_variables(space, w_type, w_value):
-        return SApplicationException(OperationError(w_type, w_value))
-    state_pack_variables = staticmethod(state_pack_variables)
-
 class SBreakLoop(SuspendedUnroller):
     """Signals a 'break' statement."""
     _immutable_ = True

File pypy/interpreter/test/test_syntax.py

+from __future__ import with_statement
 import py
 from pypy.conftest import gettestobjspace
 
         import types
         assert isinstance(acontextfact.exit_params[2], types.TracebackType)
 
+    def test_with_reraise_exception(self):
+        class Context:
+            def __enter__(self):
+                self.calls = []
+            def __exit__(self, exc_type, exc_value, exc_tb):
+                self.calls.append('exit')
+                raise
+
+        c = Context()
+        try:
+            with c:
+                1 / 0
+        except ZeroDivisionError:
+            pass
+        else:
+            raise AssertionError('Should have reraised initial exception')
+        assert c.calls == ['exit']
+
     def test_with_break(self):
 
         s = """

File pypy/jit/backend/test/runner_test.py

                              ('p', lltype.Ptr(TP)))
         a_box, A = self.alloc_array_of(ITEM, 15)
         s_box, S = self.alloc_instance(TP)
+        vsdescr = self.cpu.interiorfielddescrof(A, 'vs')
         kdescr = self.cpu.interiorfielddescrof(A, 'k')
         pdescr = self.cpu.interiorfielddescrof(A, 'p')
         self.execute_operation(rop.SETINTERIORFIELD_GC, [a_box, BoxInt(3),
         r = self.execute_operation(rop.GETINTERIORFIELD_GC, [a_box, BoxInt(3)],
                                    'ref', descr=pdescr)
         assert r.getref_base() == s_box.getref_base()
+        #
+        # test a corner case that used to fail on x86
+        i4 = BoxInt(4)
+        self.execute_operation(rop.SETINTERIORFIELD_GC, [a_box, i4, i4],
+                               'void', descr=vsdescr)
+        r = self.cpu.bh_getinteriorfield_gc_i(a_box.getref_base(), 4, vsdescr)
+        assert r == 4
 
     def test_string_basic(self):
         s_box = self.alloc_string("hello\xfe")

File pypy/jit/backend/test/test_random.py

 
 # ____________________________________________________________
 
+def do_assert(condition, error_message):
+    if condition:
+        return
+    seed = pytest.config.option.randomseed
+    message = "%s\nPython: %s\nRandom seed: %r" % (
+        error_message,
+        sys.executable,
+        seed)
+    raise AssertionError(message)
+
 def Random():
     import random
     seed = pytest.config.option.randomseed
         self.startvars = startvars
         self.prebuilt_ptr_consts = []
         self.r = r
+        self.subloops = []
         self.build_random_loop(cpu, builder_factory, r, startvars, allow_delay)
 
     def build_random_loop(self, cpu, builder_factory, r, startvars, allow_delay):
 
         arguments = [box.value for box in self.loop.inputargs]
         fail = cpu.execute_token(self.runjitcelltoken(), *arguments)
-        assert fail is self.should_fail_by.getdescr()
+        do_assert(fail is self.should_fail_by.getdescr(),
+                  "Got %r, expected %r" % (fail,
+                                           self.should_fail_by.getdescr()))
         for i, v in enumerate(self.get_fail_args()):
             if isinstance(v, (BoxFloat, ConstFloat)):
                 value = cpu.get_latest_value_float(i)
             else:
                 value = cpu.get_latest_value_int(i)
-            assert value == self.expected[v], (
+            do_assert(value == self.expected[v],
                 "Got %r, expected %r for value #%d" % (value,
                                                        self.expected[v],
                                                        i)
         if (self.guard_op is not None and
             self.guard_op.is_guard_exception()):
             if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION:
-                assert exc
+                do_assert(exc,
+                          "grab_exc_value() should not be %r" % (exc,))
         else:
-            assert not exc
+            do_assert(not exc,
+                      "unexpected grab_exc_value(): %r" % (exc,))
 
     def build_bridge(self):
         def exc_handling(guard_op):
             return False
         # generate the branch: a sequence of operations that ends in a FINISH
         subloop = DummyLoop([])
+        self.subloops.append(subloop)   # keep around for debugging
         if guard_op.is_guard_exception():
             subloop.operations.append(exc_handling(guard_op))
         bridge_builder = self.builder.fork(self.builder.cpu, subloop,
             args = [x.clonebox() for x in subset]
             rl = RandomLoop(self.builder.cpu, self.builder.fork,
                                      r, args)
-            dump(rl.loop)
-            self.cpu.compile_loop(rl.loop.inputargs, rl.loop.operations,
-                                  rl.loop._jitcelltoken)
             # done
             self.should_fail_by = rl.should_fail_by
             self.expected = rl.expected

File pypy/jit/backend/x86/assembler.py

File contents unchanged.

File pypy/jit/backend/x86/regalloc.py

         # If 'index_loc' is not an immediate, then we need a 'temp_loc' that
         # is a register whose value will be destroyed.  It's fine to destroy
         # the same register as 'index_loc', but not the other ones.
-        self.rm.possibly_free_var(box_index)
         if not isinstance(index_loc, ImmedLoc):
+            # ...that is, except in a corner case where 'index_loc' would be
+            # in the same register as 'value_loc'...
+            if index_loc is not value_loc:
+                self.rm.possibly_free_var(box_index)
             tempvar = TempBox()
             temp_loc = self.rm.force_allocate_reg(tempvar, [box_base,
                                                             box_value])
             self.rm.possibly_free_var(tempvar)
         else:
             temp_loc = None
+        self.rm.possibly_free_var(box_index)
         self.rm.possibly_free_var(box_base)
         self.possibly_free_var(box_value)
         self.PerformDiscard(op, [base_loc, ofs, itemsize, fieldsize,

File pypy/jit/backend/x86/rx86.py

     J_il8 = insn(immediate(1, 'o'), '\x70', immediate(2, 'b'))
     J_il = insn('\x0F', immediate(1,'o'), '\x80', relative(2))
 
-    SET_ir = insn(rex_w, '\x0F', immediate(1,'o'),'\x90', byte_register(2), '\xC0')
+    SET_ir = insn(rex_fw, '\x0F', immediate(1,'o'),'\x90', byte_register(2), '\xC0')
 
     # The 64-bit version of this, CQO, is defined in X86_64_CodeBuilder
     CDQ = insn(rex_nw, '\x99')

File pypy/jit/metainterp/compile.py

 
 def compile_loop(metainterp, greenkey, start,
                  inputargs, jumpargs,
-                 resume_at_jump_descr, full_preamble_needed=True):
+                 resume_at_jump_descr, full_preamble_needed=True,
+                 try_disabling_unroll=False):
     """Try to compile a new procedure by closing the current history back
     to the first operation.
     """
     jitdriver_sd = metainterp.jitdriver_sd
     history = metainterp.history
 
+    enable_opts = jitdriver_sd.warmstate.enable_opts
+    if try_disabling_unroll:
+        if 'unroll' not in enable_opts:
+            return None
+        enable_opts = enable_opts.copy()
+        del enable_opts['unroll']
+
     jitcell_token = make_jitcell_token(jitdriver_sd)
     part = create_empty_loop(metainterp)
     part.inputargs = inputargs[:]
                       [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)]
 
     try:
-        optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts)
+        optimize_trace(metainterp_sd, part, enable_opts)
     except InvalidLoop:
         return None
     target_token = part.operations[0].getdescr()
         jumpargs = part.operations[-1].getarglist()
 
         try:
-            optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts)
+            optimize_trace(metainterp_sd, part, enable_opts)
         except InvalidLoop:
             return None
             

File pypy/jit/metainterp/pyjitpl.py

                     memmgr = self.staticdata.warmrunnerdesc.memory_manager
                     if memmgr:
                         if self.cancel_count > memmgr.max_unroll_loops:
-                            self.staticdata.log('cancelled too many times!')
-                            raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP)
+                            self.compile_loop_or_abort(original_boxes,
+                                                       live_arg_boxes,
+                                                       start, resumedescr)
                 self.staticdata.log('cancelled, tracing more...')
 
         # Otherwise, no loop found so far, so continue tracing.
                 return None
         return token
 
-    def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr):
+    def compile_loop(self, original_boxes, live_arg_boxes, start,
+                     resume_at_jump_descr, try_disabling_unroll=False):
         num_green_args = self.jitdriver_sd.num_green_args
         greenkey = original_boxes[:num_green_args]
         if not self.partial_trace:
             target_token = compile.compile_loop(self, greenkey, start,
                                                 original_boxes[num_green_args:],
                                                 live_arg_boxes[num_green_args:],
-                                                resume_at_jump_descr)
+                                                resume_at_jump_descr,
+                                     try_disabling_unroll=try_disabling_unroll)
             if target_token is not None:
                 assert isinstance(target_token, TargetToken)
                 self.jitdriver_sd.warmstate.attach_procedure_to_interp(greenkey, target_token.targeting_jitcell_token)
             jitcell_token = target_token.targeting_jitcell_token
             self.raise_continue_running_normally(live_arg_boxes, jitcell_token)
 
+    def compile_loop_or_abort(self, original_boxes, live_arg_boxes,
+                              start, resume_at_jump_descr):
+        """Called after we aborted more than 'max_unroll_loops' times.
+        As a last attempt, try to compile the loop with unrolling disabled.
+        """
+        if not self.partial_trace:
+            self.compile_loop(original_boxes, live_arg_boxes, start,
+                              resume_at_jump_descr, try_disabling_unroll=True)
+        #
+        self.staticdata.log('cancelled too many times!')
+        raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP)
+
     def compile_trace(self, live_arg_boxes, resume_at_jump_descr):
         num_green_args = self.jitdriver_sd.num_green_args
         greenkey = live_arg_boxes[:num_green_args]

File pypy/jit/metainterp/test/test_ajit.py

         finally:
             optimizeopt.optimize_trace = old_optimize_trace
 
+    def test_max_unroll_loops_retry_without_unroll(self):
+        from pypy.jit.metainterp.optimize import InvalidLoop
+        from pypy.jit.metainterp import optimizeopt
+        myjitdriver = JitDriver(greens = [], reds = ['n', 'i'])
+        #
+        def f(n, limit):
+            set_param(myjitdriver, 'threshold', 5)
+            set_param(myjitdriver, 'max_unroll_loops', limit)
+            i = 0
+            while i < n:
+                myjitdriver.jit_merge_point(n=n, i=i)
+                print i
+                i += 1
+            return i
+        #
+        seen = []
+        def my_optimize_trace(metainterp_sd, loop, enable_opts, *args, **kwds):
+            seen.append('unroll' in enable_opts)
+            raise InvalidLoop
+        old_optimize_trace = optimizeopt.optimize_trace
+        optimizeopt.optimize_trace = my_optimize_trace
+        try:
+            res = self.meta_interp(f, [23, 4])
+            assert res == 23
+            assert False in seen
+            assert True in seen
+        finally:
+            optimizeopt.optimize_trace = old_optimize_trace
+
     def test_retrace_limit_with_extra_guards(self):
         myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a',
                                                      'node'])

File pypy/module/_cffi_backend/__init__.py

 from pypy.interpreter.mixedmodule import MixedModule
+from pypy.rlib import rdynload
+
 
 class Module(MixedModule):
 
     appleveldefs = {
         }
     interpleveldefs = {
-        '__version__': 'space.wrap("0.3")',
+        '__version__': 'space.wrap("0.4")',
 
         'nonstandard_integer_types': 'misc.nonstandard_integer_types',
 
         'alignof': 'func.alignof',
         'sizeof': 'func.sizeof',
         'typeof': 'func.typeof',
-        'offsetof': 'func.offsetof',
+        'typeoffsetof': 'func.typeoffsetof',
+        'rawaddressof': 'func.rawaddressof',
         '_getfields': 'func._getfields',
         'getcname': 'func.getcname',
         '_get_types': 'func._get_types',
         'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")',
         'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name
         }
+
+for _name in ["RTLD_LAZY", "RTLD_NOW", "RTLD_GLOBAL", "RTLD_LOCAL",
+              "RTLD_NODELETE", "RTLD_NOLOAD", "RTLD_DEEPBIND"]:
+    if getattr(rdynload.cConfig, _name) is not None:
+        Module.interpleveldefs[_name] = 'space.wrap(%d)' % (
+            getattr(rdynload.cConfig, _name),)
+
+for _name in ["RTLD_LAZY", "RTLD_NOW", "RTLD_GLOBAL", "RTLD_LOCAL"]:
+    Module.interpleveldefs.setdefault(_name, 'space.wrap(0)')

File pypy/module/_cffi_backend/ctypeobj.py

                               "ctype '%s' is of unknown alignment",
                               self.name)
 
-    def offsetof(self, fieldname):
+    def typeoffsetof(self, fieldname):
         space = self.space
-        raise OperationError(space.w_TypeError,
-                             space.wrap("not a struct or union ctype"))
+        if fieldname is None:
+            msg = "expected a struct or union ctype"
+        else:
+            msg = "expected a struct or union ctype, or a pointer to one"
+        raise OperationError(space.w_TypeError, space.wrap(msg))
 
     def _getfields(self):
         return None
 
+    def rawaddressof(self, cdata, offset):
+        space = self.space
+        raise OperationError(space.w_TypeError,
+                             space.wrap("expected a pointer ctype"))
+
     def call(self, funcaddr, args_w):
         space = self.space
         raise operationerrfmt(space.w_TypeError,

File pypy/module/_cffi_backend/ctypeprim.py

         if (isinstance(ob, cdataobj.W_CData) and
                isinstance(ob.ctype, ctypeptr.W_CTypePtrOrArray)):
             value = rffi.cast(lltype.Signed, ob._cdata)
-            value = r_ulonglong(value)
+            value = self._cast_result(value)
         elif space.isinstance_w(w_ob, space.w_str):
             value = self.cast_str(w_ob)
-            value = r_ulonglong(value)
+            value = self._cast_result(value)
         elif space.isinstance_w(w_ob, space.w_unicode):
             value = self.cast_unicode(w_ob)
-            value = r_ulonglong(value)
+            value = self._cast_result(value)
         else:
-            value = misc.as_unsigned_long_long(space, w_ob, strict=False)
+            value = self._cast_generic(w_ob)
         w_cdata = cdataobj.W_CDataMem(space, self.size, self)
         w_cdata.write_raw_integer_data(value)
         return w_cdata
 
+    def _cast_result(self, intvalue):
+        return r_ulonglong(intvalue)
+
+    def _cast_generic(self, w_ob):
+        return misc.as_unsigned_long_long(self.space, w_ob, strict=False)
+
     def _overflow(self, w_ob):
         space = self.space
         s = space.str_w(space.str(w_ob))
             self.vrangemax = (r_ulonglong(1) << sh) - 1
 
     def int(self, cdata):
-        if self.value_fits_long:
-            # this case is to handle enums, but also serves as a slight
-            # performance improvement for some other primitive types
-            value = misc.read_raw_long_data(cdata, self.size)
-            return self.space.wrap(value)
-        else:
-            return self.convert_to_object(cdata)
+        # enums: really call convert_to_object() just below,
+        # and not the one overridden in W_CTypeEnum.
+        return W_CTypePrimitiveSigned.convert_to_object(self, cdata)
 
     def convert_to_object(self, cdata):
         if self.value_fits_long:
         W_CTypePrimitive.__init__(self, *args)
         self.value_fits_long = self.size < rffi.sizeof(lltype.Signed)
         if self.size < rffi.sizeof(lltype.SignedLongLong):
-            sh = self.size * 8
-            self.vrangemax = (r_ulonglong(1) << sh) - 1
+            self.vrangemax = self._compute_vrange_max()
+
+    def _compute_vrange_max(self):
+        sh = self.size * 8
+        return (r_ulonglong(1) << sh) - 1
 
     def int(self, cdata):
         return self.convert_to_object(cdata)
         return self
 
 
+class W_CTypePrimitiveBool(W_CTypePrimitiveUnsigned):
+    _attrs_ = []
+
+    def _compute_vrange_max(self):
+        return r_ulonglong(1)
+
+    def _cast_result(self, intvalue):
+        return r_ulonglong(intvalue != 0)
+
+    def _cast_generic(self, w_ob):
+        return misc.object_as_bool(self.space, w_ob)
+
+    def string(self, cdataobj, maxlen):
+        # bypass the method 'string' implemented in W_CTypePrimitive
+        return W_CType.string(self, cdataobj, maxlen)
+
+
 class W_CTypePrimitiveFloat(W_CTypePrimitive):
     _attrs_ = []
 

File pypy/module/_cffi_backend/ctypeptr.py

             for i in range(len(lst_w)):
                 ctitem.convert_from_object(cdata, lst_w[i])
                 cdata = rffi.ptradd(cdata, ctitem.size)
-        elif isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveChar):
+        elif (self.ctitem.is_primitive_integer and
+              self.ctitem.size == rffi.sizeof(lltype.Char)):
             try:
                 s = space.str_w(w_ob)
             except OperationError, e:
             return True
         else:
             set_mustfree_flag(cdata, False)
-            try:
-                self.convert_from_object(cdata, w_ob)
-            except OperationError:
-                if (self.is_struct_ptr and isinstance(ob, cdataobj.W_CData)
-                    and ob.ctype is self.ctitem):
-                    # special case to make the life of verifier.py easier:
-                    # if the formal argument type is 'struct foo *' but
-                    # we pass a 'struct foo', then get a pointer to it
-                    rffi.cast(rffi.CCHARPP, cdata)[0] = ob._cdata
-                else:
-                    raise
+            self.convert_from_object(cdata, w_ob)
             return False
 
     def getcfield(self, attr):
         return self.ctitem.getcfield(attr)
+
+    def typeoffsetof(self, fieldname):
+        if fieldname is None:
+            return W_CTypePtrBase.typeoffsetof(self, fieldname)
+        else:
+            return self.ctitem.typeoffsetof(fieldname)
+
+    def rawaddressof(self, cdata, offset):
+        from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion
+        space = self.space
+        ctype2 = cdata.ctype
+        if (isinstance(ctype2, W_CTypeStructOrUnion) or
+            (isinstance(ctype2, W_CTypePtrOrArray) and ctype2.is_struct_ptr)):
+            ptrdata = rffi.ptradd(cdata._cdata, offset)
+            return cdataobj.W_CData(space, ptrdata, self)
+        else:
+            raise OperationError(space.w_TypeError,
+                     space.wrap("expected a 'cdata struct-or-union' object"))

File pypy/module/_cffi_backend/ctypestruct.py

         keepalive_until_here(ob)
         return ob
 
-    def offsetof(self, fieldname):
+    def typeoffsetof(self, fieldname):
+        if fieldname is None:
+            return (self, 0)
         self.check_complete()
+        space = self.space
         try:
             cfield = self.fields_dict[fieldname]
         except KeyError:
-            space = self.space
             raise OperationError(space.w_KeyError, space.wrap(fieldname))
-        return cfield.offset
+        if cfield.bitshift >= 0:
+            raise OperationError(space.w_TypeError,
+                                 space.wrap("not supported for bitfields"))
+        return (cfield.ctype, cfield.offset)
 
     def _copy_from_same(self, cdata, w_ob):
         space = self.space

File pypy/module/_cffi_backend/func.py

     align = ctype.alignof()
     return space.wrap(align)
 
-@unwrap_spec(ctype=ctypeobj.W_CType, fieldname=str)
-def offsetof(space, ctype, fieldname):
-    ofs = ctype.offsetof(fieldname)
-    return space.wrap(ofs)
+@unwrap_spec(ctype=ctypeobj.W_CType, fieldname="str_or_None")
+def typeoffsetof(space, ctype, fieldname):
+    ctype, offset = ctype.typeoffsetof(fieldname)
+    return space.newtuple([space.wrap(ctype), space.wrap(offset)])
 
 @unwrap_spec(ctype=ctypeobj.W_CType)
 def _getfields(space, ctype):
     return ctype._getfields()
 
+@unwrap_spec(ctype=ctypeobj.W_CType, cdata=cdataobj.W_CData, offset=int)
+def rawaddressof(space, ctype, cdata, offset=0):
+    return ctype.rawaddressof(cdata, offset)
+
 # ____________________________________________________________
 
 @unwrap_spec(ctype=ctypeobj.W_CType, replace_with=str)

File pypy/module/_cffi_backend/libraryobj.py

 from pypy.interpreter.typedef import TypeDef
 from pypy.rpython.lltypesystem import lltype, rffi
 from pypy.rlib.rdynload import DLLHANDLE, dlopen, dlsym, dlclose, DLOpenError
-from pypy.rlib.rdynload import RTLD_GLOBAL
 
 from pypy.module._cffi_backend.cdataobj import W_CData
 from pypy.module._cffi_backend.ctypeobj import W_CType
     _immutable_ = True
     handle = rffi.cast(DLLHANDLE, 0)
 
-    def __init__(self, space, filename, is_global):
+    def __init__(self, space, filename, flags):
         self.space = space
-        if is_global and RTLD_GLOBAL is not None:
-            mode = RTLD_GLOBAL
-        else:
-            mode = -1     # default value, corresponds to RTLD_LOCAL
         with rffi.scoped_str2charp(filename) as ll_libname:
             if filename is None:
                 filename = "<None>"
             try:
-                self.handle = dlopen(ll_libname, mode)
+                self.handle = dlopen(ll_libname, flags)
             except DLOpenError, e:
                 raise operationerrfmt(space.w_OSError,
-                                      "cannot load '%s': %s",
+                                      "cannot load library %s: %s",
                                       filename, e.msg)
         self.name = filename
 
 W_Library.acceptable_as_base_class = False
 
 
-@unwrap_spec(filename="str_or_None", is_global=int)
-def load_library(space, filename, is_global=0):
-    lib = W_Library(space, filename, is_global)
+@unwrap_spec(filename="str_or_None", flags=int)
+def load_library(space, filename, flags=0):
+    lib = W_Library(space, filename, flags)
     return space.wrap(lib)

File pypy/module/_cffi_backend/misc.py

 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.rlib.rarithmetic import r_ulonglong
 from pypy.rlib.unroll import unrolling_iterable
+from pypy.rlib.objectmodel import keepalive_until_here
 from pypy.rlib import jit
+from pypy.translator.tool.cbuild import ExternalCompilationInfo
 
 # ____________________________________________________________
 
 
 # ____________________________________________________________
 
+def _is_a_float(space, w_ob):
+    from pypy.module._cffi_backend.cdataobj import W_CData
+    from pypy.module._cffi_backend.ctypeprim import W_CTypePrimitiveFloat
+    ob = space.interpclass_w(w_ob)
+    if isinstance(ob, W_CData):
+        return isinstance(ob.ctype, W_CTypePrimitiveFloat)
+    return space.isinstance_w(w_ob, space.w_float)
+
 def as_long_long(space, w_ob):
     # (possibly) convert and cast a Python object to a long long.
     # This version accepts a Python int too, and does convertions from
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
-        if space.isinstance_w(w_ob, space.w_float):
+        if _is_a_float(space, w_ob):
             raise
         bigint = space.bigint_w(space.int(w_ob))
     try:
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
-        if strict and space.isinstance_w(w_ob, space.w_float):
+        if strict and _is_a_float(space, w_ob):
             raise
         bigint = space.bigint_w(space.int(w_ob))
     if strict:
 
 # ____________________________________________________________
 
+class _NotStandardObject(Exception):
+    pass
+
+def _standard_object_as_bool(space, w_ob):
+    if space.isinstance_w(w_ob, space.w_int):
+        return space.int_w(w_ob) != 0
+    if space.isinstance_w(w_ob, space.w_long):
+        return space.bigint_w(w_ob).tobool()
+    if space.isinstance_w(w_ob, space.w_float):
+        return space.float_w(w_ob) != 0.0
+    raise _NotStandardObject
+
+# hackish, but the most straightforward way to know if a LONGDOUBLE object
+# contains the value 0 or not.
+eci = ExternalCompilationInfo(post_include_bits=["""
+#define pypy__is_nonnull_longdouble(x)  ((x) != 0.0)
+"""])
+is_nonnull_longdouble = rffi.llexternal(
+    "pypy__is_nonnull_longdouble", [rffi.LONGDOUBLE], lltype.Bool,
+    compilation_info=eci, _nowrapper=True, elidable_function=True,
+    sandboxsafe=True)
+
+def object_as_bool(space, w_ob):
+    # convert and cast a Python object to a boolean.  Accept an integer
+    # or a float object, up to a CData 'long double'.
+    try:
+        return _standard_object_as_bool(space, w_ob)
+    except _NotStandardObject:
+        pass
+    #
+    from pypy.module._cffi_backend.cdataobj import W_CData
+    from pypy.module._cffi_backend.ctypeprim import W_CTypePrimitiveFloat
+    from pypy.module._cffi_backend.ctypeprim import W_CTypePrimitiveLongDouble
+    ob = space.interpclass_w(w_ob)
+    is_cdata = isinstance(ob, W_CData)
+    if is_cdata and isinstance(ob.ctype, W_CTypePrimitiveFloat):
+        if isinstance(ob.ctype, W_CTypePrimitiveLongDouble):
+            result = is_nonnull_longdouble(read_raw_longdouble_data(ob._cdata))
+        else:
+            result = read_raw_float_data(ob._cdata, ob.ctype.size) != 0.0
+        keepalive_until_here(ob)
+        return result
+    #
+    if not is_cdata and space.lookup(w_ob, '__float__') is not None:
+        w_io = space.float(w_ob)
+    else:
+        w_io = space.int(w_ob)
+    try:
+        return _standard_object_as_bool(space, w_io)
+    except _NotStandardObject:
+        raise OperationError(space.w_TypeError,
+                             space.wrap("integer/float expected"))
+
+# ____________________________________________________________
+
 def _raw_memcopy(source, dest, size):
     if jit.isconstant(size):
         # for the JIT: first handle the case where 'size' is known to be

File pypy/module/_cffi_backend/newtype.py

 eptype("float",  rffi.FLOAT,  ctypeprim.W_CTypePrimitiveFloat)
 eptype("double", rffi.DOUBLE, ctypeprim.W_CTypePrimitiveFloat)
 eptype("long double", rffi.LONGDOUBLE, ctypeprim.W_CTypePrimitiveLongDouble)
+eptype("_Bool",  lltype.Bool,          ctypeprim.W_CTypePrimitiveBool)
 
 @unwrap_spec(name=str)
 def new_primitive_type(space, name):

File pypy/module/_cffi_backend/test/_backend_test_c.py

     return sizeof(BPtr)
 
 
-def find_and_load_library(name, is_global=0):
+def find_and_load_library(name, flags=RTLD_NOW):
     import ctypes.util
     if name is None:
         path = None
     else:
         path = ctypes.util.find_library(name)
-    return load_library(path, is_global)
+    return load_library(path, flags)
 
 def test_load_library():
     x = find_and_load_library('c')
     assert repr(x).startswith("<clibrary '")
-    x = find_and_load_library('c', 1)
+    x = find_and_load_library('c', RTLD_NOW | RTLD_GLOBAL)
     assert repr(x).startswith("<clibrary '")
+    x = find_and_load_library('c', RTLD_LAZY)
+    assert repr(x).startswith("<clibrary '")
+
+def test_all_rtld_symbols():
+    import sys
+    FFI_DEFAULT_ABI        # these symbols must be defined
+    FFI_CDECL
+    RTLD_LAZY
+    RTLD_NOW
+    RTLD_GLOBAL
+    RTLD_LOCAL
+    if sys.platform.startswith("linux"):
+        RTLD_NODELETE
+        RTLD_NOLOAD
+        RTLD_DEEPBIND
 
 def test_nonstandard_integer_types():
     d = nonstandard_integer_types()
     assert repr(s.a1).startswith("<cdata 'int[5]' 0x")
 
 de