Commits

Armin Rigo committed 4256c83

Port from 3.3.

  • Participants
  • Parent commits e9e63db
  • Branches stm-thread-2.7

Comments (0)

Files changed (5)

 PyAPI_FUNC(void) PyEval_AcquireThread(PyThreadState *tstate);
 PyAPI_FUNC(void) PyEval_ReleaseThread(PyThreadState *tstate);
 PyAPI_FUNC(void) PyEval_ReInitThreads(void);
+PyAPI_FUNC(void) _PyEval_SetWithAtomic(PyThreadState *tstate);
+PyAPI_FUNC(PyThreadState *) _PyEval_GetWithAtomic(void);
 
 #define Py_BEGIN_ALLOW_THREADS { \
                         PyThreadState *_save; \

Include/pythread.h

 #define NOWAIT_LOCK	0
 PyAPI_FUNC(void) PyThread_release_lock(PyThread_type_lock);
 
+/* implemented in ceval.h: raise an exception and returns -1 if
+   acquiring the lock would deadlock because we are in a "with atomic"
+   section. */
+PyAPI_FUNC(int) PyThread_check_lock_with_atomic(PyThread_type_lock);
+
 PyAPI_FUNC(size_t) PyThread_get_stacksize(void);
 PyAPI_FUNC(int) PyThread_set_stacksize(size_t);
 

Modules/_io/bufferedio.c

         }
         return 0;
     }
+    if (PyThread_check_lock_with_atomic(self->lock))
+        return 0;
     Py_BEGIN_ALLOW_THREADS
     PyThread_acquire_lock(self->lock, 1);
     Py_END_ALLOW_THREADS

Modules/threadmodule.c

 static PyObject *ThreadError;
 static PyObject *str_dict;
 static long nb_threads = 0;
+static unsigned int ts_atomic_counter = 0;
 
 /* Lock objects */
 
     if (!PyArg_ParseTuple(args, "|i:acquire", &i))
         return NULL;
 
+    if (PyThread_check_lock_with_atomic(self->lock_lock))
+        return NULL;
+
     Py_BEGIN_ALLOW_THREADS
     i = PyThread_acquire_lock(self->lock_lock, i);
     Py_END_ALLOW_THREADS
     Py_RETURN_NONE;
 }
 
+
+/* The type of the "atomic" singleton */
+
+static PyObject *atomic_enter(PyObject *self)
+{
+    if (ts_atomic_counter == 0) {
+        assert(_PyEval_GetWithAtomic() == NULL);
+        _PyEval_SetWithAtomic(PyThreadState_Get());
+    }
+    else
+        assert(_PyEval_GetWithAtomic() == PyThreadState_Get());
+    ts_atomic_counter++;
+    Py_RETURN_NONE;
+}
+
+static PyObject *atomic_exit(PyObject *self)
+{
+    if (ts_atomic_counter == 0) {
+        PyErr_SetString(ThreadError,
+            "_thread.atomic_exit() called more often that atomic_enter()");
+        return NULL;
+    }
+    assert(_PyEval_GetWithAtomic() == PyThreadState_Get());
+    ts_atomic_counter--;
+    if (ts_atomic_counter == 0)
+        _PyEval_SetWithAtomic(NULL);
+    Py_RETURN_NONE;
+}
+
+static PyMethodDef atomic_methods[] = {
+    {"__enter__", (PyCFunction)atomic_enter, METH_NOARGS},
+    {"__exit__",  (PyCFunction)atomic_exit,  METH_VARARGS},
+    {NULL,              NULL}           /* sentinel */
+};
+
+static PyTypeObject atomic_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    /* tp_name           */ "_thread.Atomic",
+    /* tp_size           */ sizeof(PyObject),
+    /* tp_itemsize       */ 0,
+    /* tp_dealloc        */ 0,
+    /* tp_print          */ 0,
+    /* tp_getattr        */ 0,
+    /* tp_setattr        */ 0,
+    /* reserved          */ 0,
+    /* tp_repr           */ 0,
+    /* tp_as_number      */ 0,
+    /* tp_as_sequence    */ 0,
+    /* tp_as_mapping     */ 0,
+    /* tp_hash           */ 0,
+    /* tp_call           */ 0,
+    /* tp_str            */ 0,
+    /* tp_getattro       */ 0,
+    /* tp_setattro       */ 0,
+    /* tp_as_buffer      */ 0,
+    /* tp_flags          */ Py_TPFLAGS_DEFAULT,
+    /* tp_doc            */ "Type of _thread.atomic",
+    /* tp_traverse       */ 0,
+    /* tp_clear          */ 0,
+    /* tp_richcompare    */ 0,
+    /* tp_weaklistoffset */ 0,
+    /* tp_iter           */ 0,
+    /* tp_iternext       */ 0,
+    /* tp_methods        */ atomic_methods,
+};
+
+static PyObject atomic_object;
+
 /* Module functions */
 
 struct bootstate {
     nb_threads++;
     res = PyEval_CallObjectWithKeywords(
         boot->func, boot->args, boot->keyw);
+    if (ts_atomic_counter > 0) {
+        ts_atomic_counter = 0;
+        _PyEval_SetWithAtomic(NULL);
+    }
     if (res == NULL) {
         if (PyErr_ExceptionMatches(PyExc_SystemExit))
             PyErr_Clear();
     if (str_dict == NULL)
         return;
 
+    /* Set up "atomic" */
+    if (PyType_Ready(&atomic_type) < 0)
+        return;
+    if (PyObject_Init(&atomic_object, &atomic_type) == NULL)
+        return;
+    Py_INCREF(&atomic_object);
+    if (PyModule_AddObject(m, "atomic", &atomic_object) < 0)
+        return;
+
     /* Initialize the C thread library */
     PyThread_init_thread();
 }
 static PyThread_type_lock pending_lock = 0; /* for pending calls */
 static long main_thread = 0;
 
+/* ts_withatomic points to the PyThreadState that is currently in
+   a "with atomic" block, or NULL if there isn't any. */
+static PyThreadState *ts_withatomic = NULL;
+
 int
 PyEval_ThreadsInitialized(void)
 {
 void
 PyEval_AcquireLock(void)
 {
+    if (ts_withatomic != NULL) {
+        PyThreadState *tstate = PyThreadState_GET();
+        if (tstate == NULL)
+            Py_FatalError("PyEval_AcquireLock: current thread state is NULL");
+        if (tstate == ts_withatomic)
+            return;  /* "with atomic" blocks: don't take the GIL here, because
+                        it should not have been released at all */
+    }
     PyThread_acquire_lock(interpreter_lock, 1);
 }
 
 void
 PyEval_ReleaseLock(void)
 {
+    if (ts_withatomic != NULL)
+        return;   /* "with atomic" block: don't release the GIL at all */
     PyThread_release_lock(interpreter_lock);
 }
 
         Py_FatalError("PyEval_AcquireThread: NULL new thread state");
     /* Check someone has called PyEval_InitThreads() to create the lock */
     assert(interpreter_lock);
-    PyThread_acquire_lock(interpreter_lock, 1);
+    if (tstate != ts_withatomic)
+        PyThread_acquire_lock(interpreter_lock, 1);
     if (PyThreadState_Swap(tstate) != NULL)
         Py_FatalError(
             "PyEval_AcquireThread: non-NULL old thread state");
         Py_FatalError("PyEval_ReleaseThread: NULL thread state");
     if (PyThreadState_Swap(NULL) != tstate)
         Py_FatalError("PyEval_ReleaseThread: wrong thread state");
-    PyThread_release_lock(interpreter_lock);
+    if (tstate != ts_withatomic)
+        PyThread_release_lock(interpreter_lock);
 }
 
 /* This function is called from PyOS_AfterFork to ensure that newly
         Py_DECREF(result);
     Py_DECREF(threading);
 }
+
+void _PyEval_SetWithAtomic(PyThreadState *tstate)
+{
+    ts_withatomic = tstate;
+}
+
+PyThreadState *_PyEval_GetWithAtomic()
+{
+    return ts_withatomic;
+}
+
+int PyThread_check_lock_with_atomic(PyThread_type_lock lock)
+{
+    if (ts_withatomic == NULL)
+        return 0;
+    else {
+        int success = PyThread_acquire_lock(lock, 0);
+        if (success) {
+            PyThread_release_lock(lock);
+            return 0;
+        }
+        else {
+            PyErr_SetString(PyExc_RuntimeError,
+                "deadlock: an atomic transaction tries to acquire "
+                "a lock that is already acquired");
+            return -1;
+        }
+    }
+}
 #endif
 
 /* Functions save_thread and restore_thread are always defined so
     if (tstate == NULL)
         Py_FatalError("PyEval_SaveThread: NULL tstate");
 #ifdef WITH_THREAD
-    if (interpreter_lock)
+    if (interpreter_lock && tstate != ts_withatomic)
         PyThread_release_lock(interpreter_lock);
 #endif
     return tstate;
     if (tstate == NULL)
         Py_FatalError("PyEval_RestoreThread: NULL tstate");
 #ifdef WITH_THREAD
-    if (interpreter_lock) {
+    if (interpreter_lock && tstate != ts_withatomic) {
         int err = errno;
         PyThread_acquire_lock(interpreter_lock, 1);
         errno = err;
                     _Py_Ticker = 0;
             }
 #ifdef WITH_THREAD
-            if (interpreter_lock) {
+            if (interpreter_lock && tstate != ts_withatomic) {
                 /* Give another thread a chance */
 
                 if (PyThreadState_Swap(NULL) != tstate)