Hakan Ardo avatar Hakan Ardo committed efd93ff Merge

hg merge default

Comments (0)

Files changed (13)

lib-python/modified-2.7.0/test/test_genexps.py

 
 Verify re-use of tuples (a side benefit of using genexps over listcomps)
 
-##  >>> tupleids = map(id, ((i,i) for i in xrange(10)))
-##  >>> int(max(tupleids) - min(tupleids))
-##  0
+    >>> from test.test_support import check_impl_detail
+    >>> tupleids = map(id, ((i,i) for i in xrange(10)))
+    >>> int(max(tupleids) - min(tupleids)) if check_impl_detail() else 0
+    0
 
 Verify that syntax error's are raised for genexps used as lvalues
 
     >>> g = (10 // i for i in (5, 0, 2))
     >>> g.next()
     2
-    >>> g.next()
+    >>> g.next()       # doctest: +ELLIPSIS
     Traceback (most recent call last):
       File "<pyshell#37>", line 1, in -toplevel-
         g.next()
       File "<pyshell#35>", line 1, in <generator expression>
         g = (10 // i for i in (5, 0, 2))
-    ZeroDivisionError: integer division by zero
+    ZeroDivisionError: integer division...by zero
     >>> g.next()
     Traceback (most recent call last):
       File "<pyshell#38>", line 1, in -toplevel-
     True
 
     >>> print g.next.__doc__
-    next() -> the next value, or raise StopIteration
+    x.next() -> the next value, or raise StopIteration
     >>> import types
     >>> isinstance(g, types.GeneratorType)
     True

lib-python/modified-2.7.0/test/test_inspect.py

 import unittest
 import inspect
 import linecache
-import datetime
 from UserList import UserList
 from UserDict import UserDict
 
 from test.test_support import run_unittest, check_py3k_warnings
-from pypy.test_support import check_impl_detail
+from test.test_support import check_impl_detail
 
 with check_py3k_warnings(
         ("tuple parameter unpacking has been removed", SyntaxWarning),
         else:
             self.assertFalse(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals))
         if hasattr(types, 'MemberDescriptorType'):
-            self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days')
+            self.istest(inspect.ismemberdescriptor, 'type(lambda: None).func_globals')
         else:
-            self.assertFalse(inspect.ismemberdescriptor(datetime.timedelta.days))
+            self.assertFalse(inspect.ismemberdescriptor(type(lambda: None).func_globals))
 
     def test_isroutine(self):
         self.assertTrue(inspect.isroutine(mod.spam))

pypy/interpreter/baseobjspace.py

         return buffer
 
     def bufferstr_w(self, w_obj):
-        # Directly returns an interp-level str.  Note that if w_obj is a
-        # unicode string, this is different from str_w(buffer(w_obj)):
-        # indeed, the latter returns a string with the raw bytes from
-        # the underlying unicode buffer, but bufferstr_w() just converts
-        # the unicode to an ascii string.  This inconsistency is kind of
-        # needed because CPython has the same issue.  (Well, it's
-        # unclear if there is any use at all for getting the bytes in
-        # the unicode buffer.)
-        try:
-            return self.str_w(w_obj)
-        except OperationError, e:
-            if not e.match(self, self.w_TypeError):
-                raise
-            buffer = self.buffer_w(w_obj)
-            return buffer.as_str()
+        buffer = self.buffer_w(w_obj)
+        return buffer.as_str()
 
     def str_or_None_w(self, w_obj):
         if self.is_w(w_obj, self.w_None):

pypy/interpreter/buffer.py

     if not space.is_w(w_subtype, space.gettypefor(Buffer)):
         raise OperationError(space.w_TypeError,
                              space.wrap("argument 1 must be 'buffer'"))
-    w_buffer = space.buffer(w_object)
+
+    if space.isinstance_w(w_object, space.w_unicode):
+        # unicode objects support the old buffer interface
+        # but not the new buffer interface (change in python  2.7)
+        from pypy.rlib.rstruct.unichar import pack_unichar
+        charlist = []
+        for unich in space.unicode_w(w_object):
+            pack_unichar(unich, charlist)
+        from pypy.interpreter.buffer import StringBuffer
+        w_buffer = space.wrap(StringBuffer(''.join(charlist)))
+    else:
+        w_buffer = space.buffer(w_object)
+
     buffer = space.interp_w(Buffer, w_buffer)    # type-check
     if offset == 0 and size == -1:
         return w_buffer

pypy/module/_multiprocessing/interp_semaphore.py

+from __future__ import with_statement
 from pypy.interpreter.baseobjspace import ObjSpace, Wrappable, W_Root
 from pypy.interpreter.typedef import TypeDef, GetSetProperty
 from pypy.interpreter.gateway import interp2app, Arguments, unwrap_spec
                                                        ('tv_nsec', rffi.LONG)])
         SEM_FAILED = platform.ConstantInteger('SEM_FAILED')
         SEM_VALUE_MAX = platform.ConstantInteger('SEM_VALUE_MAX')
+        SEM_TIMED_WAIT = platform.Has('sem_timedwait')
+        SEM_GETVALUE = platform.Has('sem_getvalue')
 
     config = platform.configure(CConfig)
-    TIMEVAL       = config['TIMEVAL']
-    TIMESPEC      = config['TIMESPEC']
-    TIMEVALP      = rffi.CArrayPtr(TIMEVAL)
-    TIMESPECP     = rffi.CArrayPtr(TIMESPEC)
-    SEM_T         = rffi.COpaquePtr('sem_t', compilation_info=eci)
-    SEM_FAILED    = rffi.cast(SEM_T, config['SEM_FAILED'])
-    SEM_VALUE_MAX = config['SEM_VALUE_MAX']
-    HAVE_BROKEN_SEM_GETVALUE = False
+    TIMEVAL        = config['TIMEVAL']
+    TIMESPEC       = config['TIMESPEC']
+    TIMEVALP       = rffi.CArrayPtr(TIMEVAL)
+    TIMESPECP      = rffi.CArrayPtr(TIMESPEC)
+    SEM_T          = rffi.COpaquePtr('sem_t', compilation_info=eci)
+    SEM_FAILED     = rffi.cast(SEM_T, config['SEM_FAILED'])
+    SEM_VALUE_MAX  = config['SEM_VALUE_MAX']
+    SEM_TIMED_WAIT = config['SEM_TIMED_WAIT']
+    HAVE_BROKEN_SEM_GETVALUE = config['SEM_GETVALUE']
 
     def external(name, args, result):
         return rffi.llexternal(name, args, result,
     _sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT)
     _sem_wait = external('sem_wait', [SEM_T], rffi.INT)
     _sem_trywait = external('sem_trywait', [SEM_T], rffi.INT)
-    _sem_timedwait = external('sem_timedwait', [SEM_T, TIMESPECP], rffi.INT)
     _sem_post = external('sem_post', [SEM_T], rffi.INT)
     _sem_getvalue = external('sem_getvalue', [SEM_T, rffi.INTP], rffi.INT)
 
     _gettimeofday = external('gettimeofday', [TIMEVALP, rffi.VOIDP], rffi.INT)
 
+    _select = external('select', [rffi.INT, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP,
+                                                          TIMEVALP], rffi.INT)
+
     def sem_open(name, oflag, mode, value):
         res = _sem_open(name, oflag, mode, value)
         if res == SEM_FAILED:
         if res < 0:
             raise OSError(rposix.get_errno(), "sem_timedwait failed")
 
+    def _sem_timedwait_save(sem, deadline):
+        delay = 0
+        void = lltype.nullptr(rffi.VOIDP.TO)
+        with lltype.scoped_alloc(TIMEVALP.TO, 1) as tvdeadline:
+            while True:
+                # poll
+                if _sem_trywait(sem) == 0:
+                    return 0
+                elif rposix.get_errno() != errno.EAGAIN:
+                    return -1
+
+                now = gettimeofday()
+                c_tv_sec = rffi.getintfield(deadline[0], 'c_tv_sec')
+                c_tv_nsec = rffi.getintfield(deadline[0], 'c_tv_nsec')
+                if (c_tv_sec < now[0] or
+                    (c_tv_sec == now[0] and c_tv_nsec <= now[1])):
+                    rposix.set_errno(errno.ETIMEDOUT)
+                    return -1
+
+
+                # calculate how much time is left
+                difference = ((c_tv_sec - now[0]) * 1000000 +
+                                    (c_tv_nsec - now[1]))
+
+                # check delay not too long -- maximum is 20 msecs
+                if delay > 20000:
+                    delay = 20000
+                if delay > difference:
+                    delay = difference
+                delay += 1000
+
+                # sleep
+                rffi.setintfield(tvdeadline[0], 'c_tv_sec', delay / 1000000)
+                rffi.setintfield(tvdeadline[0], 'c_tv_usec', delay % 1000000)
+                if _select(0, void, void, void, tvdeadline) < 0:
+                    return -1
+
+    if SEM_TIMED_WAIT:
+        _sem_timedwait = external('sem_timedwait', [SEM_T, TIMESPECP], rffi.INT)
+    else:
+        _sem_timedwait = _sem_timedwait_save
+
     def sem_post(sem):
         res = _sem_post(sem)
         if res < 0:
             time.sleep(0.001)
 
             # if this is main thread let KeyboardInterrupt be raised
-            # XXX PyErr_CheckSignals()
+            _check_signals(self.space)
 
             # recalculate timeout
             if msecs != rwin32.INFINITE:
                     elif e.errno in (errno.EAGAIN, errno.ETIMEDOUT):
                         return False
                     raise
-                # XXX PyErr_CheckSignals()
+                _check_signals(space)
 
                 return True
         finally:
 
     def semlock_getvalue(self, space):
         if HAVE_BROKEN_SEM_GETVALUE:
-            raise OperationError(space.w_NotImplementedError)
+            raise OperationError(space.w_NotImplementedError, space.wrap(
+                        'sem_getvalue is not implemented on this system'))
         else:
             val = sem_getvalue(self.handle)
             # some posix implementations use negative numbers to indicate
     __exit__=interp2app(W_SemLock.exit),
     SEM_VALUE_MAX=SEM_VALUE_MAX,
     )
+
+def _check_signals(space):
+    space.getexecutioncontext().checksignals()

pypy/module/_multiprocessing/test/test_semaphore.py

 
     def test_semaphore(self):
         from _multiprocessing import SemLock
+        import sys
         assert SemLock.SEM_VALUE_MAX > 10
 
         kind = self.SEMAPHORE
         assert isinstance(sem.handle, (int, long))
 
         assert sem._count() == 0
-        assert sem._get_value() == 1
+        if sys.platform == 'darwin':
+            raises(NotImplementedError, 'sem._get_value()')
+        else:
+            assert sem._get_value() == 1
         assert sem._is_zero() == False
         sem.acquire()
         assert sem._is_mine()
         assert sem._count() == 1
-        assert sem._get_value() == 0
+        if sys.platform == 'darwin':
+            raises(NotImplementedError, 'sem._get_value()')
+        else:
+            assert sem._get_value() == 0
         assert sem._is_zero() == True
         sem.release()
         assert sem._count() == 0

pypy/module/imp/__init__.py

         }
 
     appleveldefs = {
+        'load_dynamic':    'app_imp.load_dynamic',
         }
 
     def __init__(self, space, *args):

pypy/module/imp/app_imp.py

+
+
+def load_dynamic(name, pathname, file=None):
+    """Always raises ah ImportError on pypy"""
+    raise ImportError('Not implemented')

pypy/module/imp/test/test_app.py

         assert pathname.endswith('.py') # even if .pyc is up-to-date
         assert description in self.imp.get_suffixes()
 
+    def test_load_dynamic(self):
+        raises(ImportError, self.imp.load_dynamic, 'foo', 'bar')
+        raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', 'baz.so')
 
     def test_suffixes(self):
         for suffix, mode, type in self.imp.get_suffixes():

pypy/objspace/std/bytearrayobject.py

     return W_BytearrayObject(data1 + data2)
 
 def add__Bytearray_ANY(space, w_bytearray1, w_other):
-    if space.isinstance_w(w_other, space.w_unicode):
-        raise OperationError(space.w_TypeError, space.wrap(
-            "can't concat bytearray to unicode"))
-
     data1 = w_bytearray1.data
     data2 = [c for c in space.bufferstr_w(w_other)]
     return W_BytearrayObject(data1 + data2)
     w_bytearray.data += w_other.data
 
 def list_extend__Bytearray_ANY(space, w_bytearray, w_other):
-    if space.isinstance_w(w_other, space.w_unicode):
-        raise OperationError(space.w_TypeError, space.wrap(
-            "bytes string or buffer expected"))
     w_bytearray.data += makebytearraydata_w(space, w_other)
 
 def inplace_add__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2):

pypy/objspace/std/bytearraytype.py

 def makebytearraydata_w(space, w_source):
     # String-like argument
     try:
-        string = space.str_w(w_source)
+        string = space.bufferstr_w(w_source)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise

pypy/objspace/std/test/test_bytes.py

         assert bytearray('ll') in bytearray('hello')
         assert memoryview('ll') in bytearray('hello')
 
+        raises(TypeError, lambda: u'foo' in bytearray('foobar'))
+
     def test_splitlines(self):
         b = bytearray('1234')
         assert b.splitlines()[0] == b

pypy/objspace/std/unicodeobject.py

 
 def eq__Unicode_String(space, w_uni, w_str):
     from pypy.objspace.std.unicodetype import unicode_from_string
-    return _unicode_string_comparison(space, w_uni, w_str, 
+    return _unicode_string_comparison(space, w_uni, w_str,
                     False, unicode_from_string)
 
 eq__Unicode_Rope = eq__Unicode_String
     return unicodedb.isspace(ord(uchar))
 
 def make_generic(funcname):
-    def func(space, w_self): 
+    def func(space, w_self):
         v = w_self._value
         if len(v) == 0:
             return space.w_False
     "internal function called by str_xstrip methods"
     u_self = w_self._value
     u_chars = w_chars._value
-    
+
     lpos = 0
     rpos = len(u_self)
-    
+
     if left:
         while lpos < rpos and u_self[lpos] in u_chars:
            lpos += 1
-       
+
     if right:
         while rpos > lpos and u_self[rpos - 1] in u_chars:
            rpos -= 1
-           
+
     assert rpos >= 0
     result = u_self[lpos: rpos]
     return W_UnicodeObject(result)
 def _strip_none(space, w_self, left, right):
     "internal function called by str_xstrip methods"
     u_self = w_self._value
-    
+
     lpos = 0
     rpos = len(u_self)
-    
+
     if left:
         while lpos < rpos and _isspace(u_self[lpos]):
            lpos += 1
-       
+
     if right:
         while rpos > lpos and _isspace(u_self[rpos - 1]):
            rpos -= 1
-       
+
     assert rpos >= 0
     result = u_self[lpos: rpos]
     return W_UnicodeObject(result)
     for i in range(len(self)):
         result[padding + i] = self[i]
     return W_UnicodeObject(u''.join(result))
-    
+
 def unicode_zfill__Unicode_ANY(space, w_self, w_width):
     self = w_self._value
     width = space.int_w(w_width)
         keepends = 1
     if len(self) == 0:
         return space.newlist([])
-    
+
     start = 0
     end = len(self)
     pos = 0
 
 def sliced(space, s, start, stop, orig_obj):
     assert start >= 0
-    assert stop >= 0 
+    assert stop >= 0
     if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), space.w_unicode):
         return orig_obj
     return space.wrap( s[start:stop])
 
 def unicode_replace__Unicode_ANY_ANY_ANY(space, w_self, w_old, w_new,
                                          w_maxsplit):
-    old = unicode(space.bufferstr_w(w_old))
-    new = unicode(space.bufferstr_w(w_new))
+    if not space.isinstance_w(w_old, space.w_unicode):
+        old = unicode(space.bufferstr_w(w_old))
+    else:
+        old = space.unicode_w(w_old)
+    if not space.isinstance_w(w_new, space.w_unicode):
+        new = unicode(space.bufferstr_w(w_new))
+    else:
+        new = space.unicode_w(w_new)
     return _unicode_replace(space, w_self, old, new, w_maxsplit)
 
 def _unicode_replace(space, w_self, old, new, w_maxsplit):
         ovfcheck(one + len(w_self._value))
     except OverflowError:
         raise OperationError(
-            space.w_OverflowError, 
+            space.w_OverflowError,
             space.wrap("replace string is too long"))
 
     return W_UnicodeObject(new.join(parts))
-    
+
 
 def unicode_encode__Unicode_ANY_ANY(space, w_unistr,
                                     w_encoding=None,
         if ch == u"\n" or ch ==  u"\r":
             prevsize = 0
     totalsize = prevsize
-            
+
     for i in range(1, len(parts)):
         pad = tabsize - prevsize % tabsize
         nextpart = parts[i]
     formatter = newformat.unicode_formatter(space, spec)
     return formatter.format_string(w_unicode._value)
 
-def buffer__Unicode(space, w_unicode):
-    from pypy.rlib.rstruct.unichar import pack_unichar
-    charlist = []
-    for unich in w_unicode._value:
-        pack_unichar(unich, charlist)
-    from pypy.interpreter.buffer import StringBuffer
-    return space.wrap(StringBuffer(''.join(charlist)))
 
 import unicodetype
 register_all(vars(), unicodetype)
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.