Commits

Remi Meier committed 2c23968

make nursery_current, nursery_nextlimit and active real thread-locals

Comments (0)

Files changed (9)

 
     k = get_rand(11);
     check(p);
-    assert(thread_descriptor->active);
+    assert(*thread_descriptor->active_ref);
 
     if (k < 3)
         p = simple_events(p, _r, _sr);
 }
 
 
-
+__thread int stm_active;
 __thread struct tx_descriptor *thread_descriptor = NULL;
 
 /* 'global_cur_time' is normally a multiple of 2, except when we turn
 {
   /* Assert that we are running a transaction.
    *      Returns True if this transaction is inevitable. */
-  assert(d->active == 1 + !d->setjmp_buf);
-  return d->active == 2;
+  assert(*d->active_ref == 1 + !d->setjmp_buf);
+  return *d->active_ref == 2;
 }
 
 static pthread_mutex_t mutex_inevitable = PTHREAD_MUTEX_INITIALIZER;
   pthread_mutex_lock(&mutex_inevitable);
   stm_start_sharedlock();
 
-  if (d->active < 0)
+  if (*d->active_ref < 0)
     {
       inev_mutex_release();
       AbortNowIfDelayed();
     }
 
   struct tx_descriptor *d = thread_descriptor;
-  assert(d->active >= 1);
+  assert(*d->active_ref >= 1);
 
   /* We need the collection_lock for the sequel; this is required notably
      because we're about to edit flags on a protected object.
 void SpinLoop(int num)
 {
   struct tx_descriptor *d = thread_descriptor;
-  assert(d->active >= 1);
+  assert(*d->active_ref >= 1);
   assert(num < SPINLOOP_REASONS);
   d->num_spinloops[num]++;
   smp_spinloop();
       assert(!stm_has_got_any_lock(d));
     }
 
-  assert(d->active != 0);
+  assert(*d->active_ref != 0);
   assert(!is_inevitable(d));
   assert(num < ABORT_REASONS);
   d->num_aborts[num]++;
   SpinLoop(SPLP_ABORT);
 
   /* make the transaction no longer active */
-  d->active = 0;
+  *d->active_ref = 0;
   d->atomic = 0;
 
   /* release the lock */
 
 void AbortTransactionAfterCollect(struct tx_descriptor *d, int reason)
 {
-  if (d->active >= 0)
+  if (*d->active_ref >= 0)
     {
       dprintf(("abort %d after collect!\n", reason));
-      assert(d->active == 1);   /* not 2, which means inevitable */
-      d->active = -reason;
+      assert(*d->active_ref == 1);   /* not 2, which means inevitable */
+      *d->active_ref = -reason;
     }
-  assert(d->active < 0);
+  assert(*d->active_ref < 0);
 }
 
 void AbortNowIfDelayed(void)
 {
   struct tx_descriptor *d = thread_descriptor;
-  if (d->active < 0)
+  if (*d->active_ref < 0)
     {
-      int reason = -d->active;
-      d->active = 1;
+      int reason = -*d->active_ref;
+      *d->active_ref = 1;
       AbortTransaction(reason);
     }
 }
 static void init_transaction(struct tx_descriptor *d)
 {
   assert(d->atomic == 0);
-  assert(d->active == 0);
+  assert(*d->active_ref == 0);
   stm_start_sharedlock();
-  assert(d->active == 0);
+  assert(*d->active_ref == 0);
 
   if (clock_gettime(CLOCK_MONOTONIC, &d->start_real_time) < 0) {
     d->start_real_time.tv_nsec = -1;
 {
   struct tx_descriptor *d = thread_descriptor;
   init_transaction(d);
-  d->active = 1;
+  *d->active_ref = 1;
   d->setjmp_buf = buf;
   d->longjmp_callback = longjmp_callback;
   d->old_thread_local_obj = stm_thread_local_obj;
 {   /* must save roots around this call */
   revision_t cur_time;
   struct tx_descriptor *d = thread_descriptor;
-  assert(d->active >= 1);
+  assert(*d->active_ref >= 1);
   assert(d->atomic == 0);
   dprintf(("CommitTransaction(%p)\n", d));
   spinlock_acquire(d->public_descriptor->collection_lock, 'C');  /*committing*/
 
   spinlock_release(d->public_descriptor->collection_lock);
   d->num_commits++;
-  d->active = 0;
+  *d->active_ref = 0;
   stm_stop_sharedlock();
 
   /* clear the list of callbacks that would have been called
 {
   d->setjmp_buf = NULL;
   d->old_thread_local_obj = NULL;
-  d->active = 2;
+  *d->active_ref = 2;
   d->reads_size_limit_nonatomic = 0;
   update_reads_size_limit(d);
   dprintf(("make_inevitable(%p)\n", d));
 {   /* must save roots around this call */
   revision_t cur_time;
   struct tx_descriptor *d = thread_descriptor;
-  if (d == NULL || d->active != 1)
+  if (d == NULL || *d->active_ref != 1)
     return;  /* I am already inevitable, or not in a transaction at all
                 (XXX statically we should know when we're outside
                 a transaction) */
       assert(d->my_lock & 1);
       assert(d->my_lock >= LOCKED);
       stm_private_rev_num = -d->my_lock;
+      d->active_ref = &stm_active;
+      d->nursery_current_ref = &stm_nursery_current;
+      d->nursery_nextlimit_ref = &stm_nursery_nextlimit;
       d->private_revision_ref = &stm_private_rev_num;
       d->read_barrier_cache_ref = &stm_read_barrier_cache;
       stm_thread_local_obj = NULL;
     revision_t i;
     struct tx_descriptor *d = thread_descriptor;
     assert(d != NULL);
-    assert(d->active == 0);
+    assert(*d->active_ref == 0);
 
     /* our nursery is empty at this point.  The list 'stolen_objects'
        should have been emptied at the previous minor collection and
   unsigned long count_reads;
   unsigned long reads_size_limit;        /* see should_break_tr. */
   unsigned long reads_size_limit_nonatomic;
-  int active;    /* 0 = inactive, 1 = regular, 2 = inevitable,
-                    negative = killed by collection */
+  int *active_ref;    /* 0 = inactive, 1 = regular, 2 = inevitable,
+                         negative = killed by collection */
   struct timespec start_real_time;
   int max_aborts;
   unsigned int num_commits;
 
 extern __thread struct tx_descriptor *thread_descriptor;
 extern __thread revision_t stm_private_rev_num;
+extern __thread int stm_active;
 extern struct tx_public_descriptor *stm_descriptor_array[];
 extern struct tx_descriptor *stm_tx_head;
 
 void stm_call_on_abort(void *key, void callback(void *))
 {
     struct tx_descriptor *d = thread_descriptor;
-    if (d == NULL || d->active != 1)
+    if (d == NULL || *d->active_ref != 1)
         return;   /* ignore callbacks if we're outside a transaction or
                      in an inevitable transaction (which cannot abort) */
     if (callback == NULL) {
 void stm_invoke_callbacks_on_abort(struct tx_descriptor *d)
 {
     wlog_t *item;
-    assert(d->active == 0);
+    assert(*d->active_ref == 0);
 
     G2L_LOOP_FORWARD(d->callbacks_on_abort, item) {
         void *key = (void *)item->addr;
         output->signature_packed = 127;
         output->elapsed_time = elapsed_time;
         output->abort_reason = abort_reason;
-        output->active = d->active;
+        output->active = *d->active_ref;
         output->atomic = d->atomic;
         output->count_reads = d->count_reads;
         output->reads_size_limit_nonatomic = d->reads_size_limit_nonatomic;
     /* If we're aborting this transaction anyway, we don't need to do
      * more here.
      */
-    if (d->active < 0) {
+    if (*d->active_ref < 0) {
         /* already "aborted" during forced minor collection
            clear list of read objects so that a possible minor collection 
            before the abort doesn't trip 
         return;
     }
 
-    if (d->active == 2) {
+    if (*d->active_ref == 2) {
         /* inevitable transaction: clear the list of read objects */
         gcptrlist_clear(&d->list_of_read_objects);
     }
 #include "stmimpl.h"
 
+
+__thread char *stm_nursery_current;
+__thread char *stm_nursery_nextlimit;
+
 int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj)
 {
     return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end);
     assert(d->nursery_base == NULL);
     d->nursery_base = stm_malloc(GC_NURSERY);       /* start of nursery */
     d->nursery_end = d->nursery_base + GC_NURSERY;  /* end of nursery */
-    d->nursery_current = d->nursery_base;           /* current position */
-    d->nursery_nextlimit = d->nursery_base;         /* next section limit */
+    *d->nursery_current_ref = d->nursery_base;           /* current position */
+    *d->nursery_nextlimit_ref = d->nursery_base;         /* next section limit */
     d->nursery_cleared = NC_REGULAR;
 
     dprintf(("minor: nursery is at [%p to %p]\n", d->nursery_base,
        this assert (committransaction() -> 
        updatechainheads() -> stub_malloc() -> ...): */
     assert(!minor_collect_anything_to_do(d)
-           || d->nursery_current == d->nursery_end);
+           || *d->nursery_current_ref == d->nursery_end);
     stm_free(d->nursery_base);
 
     gcptrlist_delete(&d->old_objects_to_trace);
 void stmgc_minor_collect_soon(void)
 {
     struct tx_descriptor *d = thread_descriptor;
-    d->nursery_current = d->nursery_end;
+    *d->nursery_current_ref = d->nursery_end;
 }
 
 inline static gcptr allocate_nursery(size_t size, revision_t tid)
     /* if 'tid == -1', we must not collect */
     struct tx_descriptor *d = thread_descriptor;
     gcptr P;
-    char *cur = d->nursery_current;
+    char *cur = *d->nursery_current_ref;
     char *end = cur + size;
     assert((size & 3) == 0);
-    d->nursery_current = end;
-    if (end > d->nursery_nextlimit) {
+    *d->nursery_current_ref = end;
+    if (end > *d->nursery_nextlimit_ref) {
         P = allocate_next_section(size, tid);
     }
     else {
 {
     /* XXX inline the fast path */
     assert(tid == (tid & STM_USER_TID_MASK));
-    assert(thread_descriptor->active > 0);
+    assert(*thread_descriptor->active_ref > 0);
     gcptr P = allocate_nursery(size, tid);
     P->h_revision = stm_private_rev_num;
     assert(P->h_original == 0);  /* null-initialized already */
     gcptr *items = d->list_of_read_objects.items;
     assert(d->list_of_read_objects.size >= limit);
 
-    if (d->active == 2) {
+    if (*d->active_ref == 2) {
         /* inevitable transaction: clear the list of read objects */
         gcptrlist_clear(&d->list_of_read_objects);
     }
        Second, if the thread is really idle, then its nursery is sent
        back to the system until it's really needed.
     */
-    if ((d->nursery_nextlimit - d->nursery_base) < GC_NURSERY / 10) {
+    if ((*d->nursery_nextlimit_ref - d->nursery_base) < GC_NURSERY / 10) {
         size_t already_cleared = 0;
         if (d->nursery_cleared == NC_ALREADY_CLEARED) {
-            already_cleared = d->nursery_end - d->nursery_current;
+            already_cleared = d->nursery_end - *d->nursery_current_ref;
         }
         stm_clear_large_memory_chunk(d->nursery_base, GC_NURSERY,
                                      already_cleared);
     else {
         d->nursery_cleared = NC_REGULAR;
 #if defined(_GC_DEBUG)
-        memset(d->nursery_current, 0xEE, d->nursery_end - d->nursery_current);
+        memset(*d->nursery_current_ref, 0xEE, d->nursery_end - *d->nursery_current_ref);
 #endif
     }
 
     if (d->nursery_cleared == NC_ALREADY_CLEARED)
         memset(d->nursery_base, 0, GC_NURSERY);
 #endif
-    d->nursery_current = d->nursery_base;
-    d->nursery_nextlimit = d->nursery_base;
+    *d->nursery_current_ref = d->nursery_base;
+    *d->nursery_nextlimit_ref = d->nursery_base;
 
     assert(!minor_collect_anything_to_do(d));
 }
 void stmgc_minor_collect(void)
 {
     struct tx_descriptor *d = thread_descriptor;
-    assert(d->active >= 1);
+    assert(*d->active_ref >= 1);
     minor_collect(d);
     AbortNowIfDelayed();
 }
 #ifndef NDEBUG
 int minor_collect_anything_to_do(struct tx_descriptor *d)
 {
-    if (d->nursery_current == d->nursery_base /*&&
+    if (*d->nursery_current_ref == d->nursery_base /*&&
         !g2l_any_entry(&d->young_objects_outside_nursery)*/ ) {
         /* there is no young object */
         assert(gcptrlist_size(&d->public_with_young_copy) == 0);
        First fix 'nursery_current', left to a bogus value by the caller.
     */
     struct tx_descriptor *d = thread_descriptor;
-    d->nursery_current -= allocate_size;
+    *d->nursery_current_ref -= allocate_size;
 
     /* Are we asking for a "reasonable" number of bytes, i.e. a value
        at most equal to one section?
     }
 
     /* Are we at the end of the nursery? */
-    if (d->nursery_nextlimit == d->nursery_end ||
-        d->nursery_current == d->nursery_end) {   // stmgc_minor_collect_soon()
+    if (*d->nursery_nextlimit_ref == d->nursery_end ||
+        *d->nursery_current_ref == d->nursery_end) {   // stmgc_minor_collect_soon()
         /* Yes */
         if (tid == -1)
             return NULL;    /* cannot collect */
         stmgc_minor_collect();
         stmgcpage_possibly_major_collect(0);
 
-        assert(d->nursery_current == d->nursery_base);
-        assert(d->nursery_nextlimit == d->nursery_base);
+        assert(*d->nursery_current_ref == d->nursery_base);
+        assert(*d->nursery_nextlimit_ref == d->nursery_base);
     }
 
     /* Clear the next section */
     if (d->nursery_cleared != NC_ALREADY_CLEARED)
-        memset(d->nursery_nextlimit, 0, GC_NURSERY_SECTION);
-    d->nursery_nextlimit += GC_NURSERY_SECTION;
+        memset(*d->nursery_nextlimit_ref, 0, GC_NURSERY_SECTION);
+    *d->nursery_nextlimit_ref += GC_NURSERY_SECTION;
 
     /* Return the object from there */
-    gcptr P = (gcptr)d->nursery_current;
-    d->nursery_current += allocate_size;
-    assert(d->nursery_current <= d->nursery_nextlimit);
+    gcptr P = (gcptr)*d->nursery_current_ref;
+    *d->nursery_current_ref += allocate_size;
+    assert(*d->nursery_current_ref <= *d->nursery_nextlimit_ref);
 
     P->h_tid = tid;
     assert_cleared(((char *)P) + sizeof(revision_t),
 
 #define NURSERY_FIELDS_DECL                                             \
     /* the nursery */                                                   \
-    char *nursery_current;                                              \
-    char *nursery_nextlimit;                                            \
+    char **nursery_current_ref;                                         \
+    char **nursery_nextlimit_ref;                                       \
     char *nursery_end;                                                  \
     char *nursery_base;                                                 \
     enum { NC_REGULAR, NC_ALREADY_CLEARED } nursery_cleared;            \
 
 
 struct tx_descriptor;  /* from et.h */
+extern __thread char *stm_nursery_current;
+extern __thread char *stm_nursery_nextlimit;
+
 
 void stmgc_init_nursery(void);
 void stmgc_done_nursery(void);
 /* macro functionality */
 
 extern __thread gcptr *stm_shadowstack;
+extern __thread int stm_active;
+extern __thread char *stm_nursery_current;
+extern __thread char *stm_nursery_nextlimit;
 
 #define stm_push_root(obj)  (*stm_shadowstack++ = (obj))
 #define stm_pop_root()      (*--stm_shadowstack)
 
 extern __thread revision_t stm_private_rev_num;
-extern __thread struct tx_descriptor *thread_descriptor; /* XXX: stm_ prefix */
 gcptr stm_DirectReadBarrier(gcptr);
 gcptr stm_WriteBarrier(gcptr);
 gcptr stm_RepeatReadBarrier(gcptr);
                                    d->reads_size_limit_nonatomic));
     /* if is_inevitable(), reads_size_limit_nonatomic should be 0
        (and thus reads_size_limit too, if !d->atomic.) */
-    if (d->active == 2)
+    if (*d->active_ref == 2)
         assert(d->reads_size_limit_nonatomic == 0);
 #endif
 
            has configured 'reads_size_limit_nonatomic' to a smaller value.
            When such a shortened transaction succeeds, the next one will
            see its length limit doubled, up to the maximum. */
-        if (counter == 0 && d->active != 2) {
+        if (counter == 0 && *d->active_ref != 2) {
             unsigned long limit = d->reads_size_limit_nonatomic;
             if (limit != 0 && limit < (stm_regular_length_limit >> 1))
                 limit = (limit << 1) | 1;
             /* atomic transaction: a common case is that callback() returned
                even though we are atomic because we need a major GC.  For
                that case, release and reaquire the rw lock here. */
-            assert(d->active >= 1);
+            assert(*d->active_ref >= 1);
             stm_possible_safe_point();
         }
 
 {   /* must save roots around this call */
     struct tx_descriptor *d = thread_descriptor;
     if (d->atomic) {
-        assert(d->active >= 1);
+        assert(*d->active_ref >= 1);
         stm_possible_safe_point();
     }
     else {
 int stm_in_transaction(void)
 {
     struct tx_descriptor *d = thread_descriptor;
-    return d && d->active;
+    return d && *d->active_ref;
 }
 
 /************************************************************/
 
     /* Warning, may block waiting for rwlock_in_transaction while another
        thread runs a major GC */
-    assert(thread_descriptor->active);
+    assert(*thread_descriptor->active_ref);
     assert(in_single_thread != thread_descriptor);
 
     stm_stop_sharedlock();