Commits

Armin Rigo committed cd177e2

Minimal changes to reimplement _thread.atomic_enter()/atomic_exit().

Comments (0)

Files changed (3)

 #ifndef Py_LIMITED_API
 PyAPI_FUNC(void) _PyEval_SetSwitchInterval(unsigned long microseconds);
 PyAPI_FUNC(unsigned long) _PyEval_GetSwitchInterval(void);
+PyAPI_FUNC(void) _PyEval_SetWithAtomic(PyThreadState *tstate);
+PyAPI_FUNC(PyThreadState *) _PyEval_GetWithAtomic(void);
 #endif
 
 #define Py_BEGIN_ALLOW_THREADS { \

Modules/_threadmodule.c

 (4kB pages are common; using multiples of 4096 for the stack size is\n\
 the suggested approach in the absence of more specific information).");
 
+static unsigned int ts_atomic_counter = 0;
+
+static PyObject *
+thread_atomic_enter(PyObject *self)
+{
+    if (ts_atomic_counter == 0) {
+        assert(_PyEval_GetWithAtomic() == NULL);
+        _PyEval_SetWithAtomic(PyThreadState_Get());
+    }
+    else
+        assert(_PyEval_GetWithAtomic() == PyThreadState_Get());
+    ts_atomic_counter++;
+    Py_RETURN_NONE;
+}
+
+static PyObject *
+thread_atomic_exit(PyObject *self)
+{
+    if (ts_atomic_counter == 0) {
+        PyErr_SetString(ThreadError,
+            "_thread.atomic_exit() called more often that atomic_enter()");
+        return NULL;
+    }
+    assert(_PyEval_GetWithAtomic() == PyThreadState_Get());
+    ts_atomic_counter--;
+    if (ts_atomic_counter == 0)
+        _PyEval_SetWithAtomic(NULL);
+    Py_RETURN_NONE;
+}
+
 static PyMethodDef thread_methods[] = {
     {"start_new_thread",        (PyCFunction)thread_PyThread_start_new_thread,
      METH_VARARGS, start_new_doc},
      METH_NOARGS, _count_doc},
     {"stack_size",              (PyCFunction)thread_stack_size,
      METH_VARARGS, stack_size_doc},
+    {"atomic_enter",            (PyCFunction)thread_atomic_enter,
+     METH_NOARGS, NULL},
+    {"atomic_exit",             (PyCFunction)thread_atomic_exit,
+     METH_NOARGS, NULL},
     {NULL,                      NULL}           /* sentinel */
 };
 

Python/ceval_gil.h

 static MUTEX_T switch_mutex;
 #endif
 
+/* ts_withatomic points to the PyThreadState that is currently in
+   a "with atomic" block, or NULL if there isn't any. */
+static PyThreadState *ts_withatomic = NULL;
+
 
 static int gil_created(void)
 {
         Py_FatalError("drop_gil: GIL is not locked");
     /* tstate is allowed to be NULL (early interpreter init) */
     if (tstate != NULL) {
+        if (tstate == ts_withatomic)
+            return;   /* "with atomic" block: don't release the GIL at all */
         /* Sub-interpreter support: threads might have been switched
            under our feet using PyThreadState_Swap(). Fix the GIL last
            holder variable so that our heuristics work. */
     int err;
     if (tstate == NULL)
         Py_FatalError("take_gil: NULL tstate");
+    if (tstate == ts_withatomic)
+        return;   /* "with atomic" blocks: don't take the GIL here, because
+                     it should not have been released at all */
 
     err = errno;
     MUTEX_LOCK(gil_mutex);
 {
     return gil_interval;
 }
+
+void _PyEval_SetWithAtomic(PyThreadState *tstate)
+{
+    ts_withatomic = tstate;
+}
+
+PyThreadState *_PyEval_GetWithAtomic()
+{
+    return ts_withatomic;
+}