Commits

Armin Rigo committed 1d95a8b

(iko, arigo)

A bit unsure, but it seems that thread.locks, implemented on OSX as
pairs of a mutex and a condition variable, don't really reproductibly
survive a fork(). So we do some ugly hack just to reset all of them
after a fork(). Obscure, particularly because CPython doesn't seem
to need it, and we have no clue why.

  • Participants
  • Parent commits a651dda

Comments (0)

Files changed (4)

pypy/module/thread/ll_thread.py

     export_symbols = ['RPyThreadGetIdent', 'RPyThreadLockInit',
                       'RPyThreadAcquireLock', 'RPyThreadReleaseLock',
                       'RPyThreadYield',
-                      'RPyThreadGetStackSize', 'RPyThreadSetStackSize']
+                      'RPyThreadGetStackSize', 'RPyThreadSetStackSize',
+                      'RPyThreadAfterFork']
 )
 
 def llexternal(name, args, result, **kwds):
 TLOCKP = rffi.COpaquePtr('struct RPyOpaque_ThreadLock',
                           compilation_info=eci)
 
-c_thread_lock_init = llexternal('RPyThreadLockInit', [TLOCKP], rffi.INT)
+c_thread_lock_init = llexternal('RPyThreadLockInit', [TLOCKP], rffi.INT,
+                                threadsafe=False)   # may add in a global list
 c_thread_acquirelock = llexternal('RPyThreadAcquireLock', [TLOCKP, rffi.INT],
                                   rffi.INT,
                                   threadsafe=True)    # release the GIL
 
 # ____________________________________________________________
 #
+# Hack
+
+thread_after_fork = llexternal('RPyThreadAfterFork', [], lltype.Void)
+
+# ____________________________________________________________
+#
 # GIL support wrappers
 
 null_ll_lock = lltype.nullptr(TLOCKP.TO)

pypy/module/thread/os_thread.py

     "Called in the child process after a fork()"
     space.threadlocals.reinit_threads(space)
     bootstrapper.reinit()
+    thread.thread_after_fork()
 
     # Clean the threading module after a fork()
     w_modules = space.sys.get('modules')

pypy/translator/c/src/thread_nt.h

 
 /************************************************************/
 
+void RPyThreadAfterFork(void)
+{
+}
+
 int RPyThreadLockInit(struct RPyOpaque_ThreadLock * lock)
 {
   return InitializeNonRecursiveMutex(lock);

pypy/translator/c/src/thread_pthread.h

 	/* a <cond, mutex> pair to handle an acquire of a locked lock */
 	pthread_cond_t   lock_released;
 	pthread_mutex_t  mut;
+	struct RPyOpaque_ThreadLock *prev, *next;
 };
 
 #define RPyOpaque_INITEXPR_ThreadLock  {        \
 void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock);
 long RPyThreadGetStackSize(void);
 long RPyThreadSetStackSize(long);
+void RPyThreadAfterFork(void);
 
 
 /* implementations */
 
 #include <semaphore.h>
 
+void RPyThreadAfterFork(void)
+{
+}
+
 int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock)
 {
 	int status, error = 0;
 #else                                      /* no semaphores */
 /************************************************************/
 
+struct RPyOpaque_ThreadLock *alllocks;   /* doubly-linked list */
+
+void RPyThreadAfterFork(void)
+{
+	/* Mess.  We have no clue about how it works on CPython on OSX,
+	   but the issue is that the state of mutexes is not really
+	   preserved across a fork().  So we need to walk over all lock
+	   objects here, and rebuild their mutex and condition variable.
+
+	   See e.g. http://hackage.haskell.org/trac/ghc/ticket/1391 for
+	   a similar bug about GHC.
+	*/
+	struct RPyOpaque_ThreadLock *p = alllocks;
+	alllocks = NULL;
+	while (p) {
+		struct RPyOpaque_ThreadLock *next = p->next;
+		int was_locked = p->locked;
+		RPyThreadLockInit(p);
+		p->locked = was_locked;
+		p = next;
+	}
+}
+
 int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock)
 {
 	int status, error = 0;
 	if (error)
 		return 0;
 	lock->initialized = 1;
+	/* add 'lock' in the doubly-linked list */
+	if (alllocks)
+		alllocks->prev = lock;
+	lock->next = alllocks;
+	lock->prev = NULL;
+	alllocks = lock;
 	return 1;
 }
 
 {
 	int status, error = 0;
 	if (lock->initialized) {
+		/* remove 'lock' from the doubly-linked list */
+		if (lock->prev)
+			lock->prev->next = lock->next;
+		else {
+			assert(alllocks == lock);
+			alllocks = lock->next;
+		}
+		if (lock->next)
+			lock->next->prev = lock->prev;
+
 		status = pthread_mutex_destroy(&lock->mut);
 		CHECK_STATUS("pthread_mutex_destroy");