Source

stmgc / c4 / stmsync.c

Full commit
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
#include "stmimpl.h"


#define LENGTH_SHADOW_STACK   163840


__thread gcptr *stm_shadowstack;
static unsigned long stm_regular_length_limit = 10000;
static revision_t sync_required = 0;

void stm_set_transaction_length(long length_max)
{                               /* save roots around this call! */
    BecomeInevitable("set_transaction_length");
    if (length_max <= 0) {
        length_max = 1;
    }
    stm_regular_length_limit = length_max;
}

_Bool stm_should_break_transaction(void)
{
    struct tx_descriptor *d = thread_descriptor;

    /* a single comparison to handle all cases:

     - first, if sync_required == -1, this should return True.

     - if d->atomic, then we should return False.  This is done by
       forcing reads_size_limit to ULONG_MAX as soon as atomic > 0.

     - otherwise, if is_inevitable(), then we should return True.
       This is done by forcing both reads_size_limit and
       reads_size_limit_nonatomic to 0 in that case.

     - finally, the default case: return True if d->count_reads is
       greater than reads_size_limit == reads_size_limit_nonatomic.
    */
#ifdef _GC_DEBUG
    /* reads_size_limit is ULONG_MAX if d->atomic, or else it is equal to
       reads_size_limit_nonatomic. */
    assert(d->reads_size_limit == (d->atomic ? ULONG_MAX :
                                   d->reads_size_limit_nonatomic));
    /* if is_inevitable(), reads_size_limit_nonatomic should be 0
       (and thus reads_size_limit too, if !d->atomic.) */
    if (stm_active == 2)
        assert(d->reads_size_limit_nonatomic == 0);
#endif

    return (sync_required | d->count_reads) >= d->reads_size_limit;
}

static void init_shadowstack(void)
{
    struct tx_descriptor *d = thread_descriptor;
    d->shadowstack = stm_malloc(sizeof(gcptr) * LENGTH_SHADOW_STACK);
    if (!d->shadowstack) {
        stm_fatalerror("out of memory: shadowstack\n");
    }
    stm_shadowstack = d->shadowstack;
    d->shadowstack_end_ref = &stm_shadowstack;
    stm_push_root(END_MARKER_ON);
}

static void done_shadowstack(void)
{
    struct tx_descriptor *d = thread_descriptor;
    gcptr x = stm_pop_root();
    assert(x == END_MARKER_ON);
    assert(stm_shadowstack == d->shadowstack);
    stm_shadowstack = NULL;
    stm_free(d->shadowstack);
}

void stm_set_max_aborts(int max_aborts)
{
    struct tx_descriptor *d = thread_descriptor;
    d->max_aborts = max_aborts;
}

int stm_enter_callback_call(void)
{
    int token = (thread_descriptor == NULL);
    dprintf(("enter_callback_call(tok=%d)\n", token));
    if (token == 1) {
        stmgcpage_acquire_global_lock();
#ifdef STM_BARRIER_COUNT
        static int seen = 0;
        if (!seen) {
            seen = 1;
            atexit(&stm_print_barrier_count);
        }
#endif
        DescriptorInit();
        stmgc_init_nursery();
        init_shadowstack();
        stmgcpage_release_global_lock();
    }
    BeginInevitableTransaction(0);
    return token;
}

void stm_leave_callback_call(int token)
{
    dprintf(("leave_callback_call(%d)\n", token));
    if (token == 1)
        stmgc_minor_collect();   /* force everything out of the nursery */

    CommitTransaction(0);

    if (token == 1) {
        stmgcpage_acquire_global_lock();
        done_shadowstack();
        stmgc_done_nursery();
        DescriptorDone();
        stmgcpage_release_global_lock();
    }
}

void stm_initialize(void)
{
    int r = stm_enter_callback_call();
    if (r != 1)
        stm_fatalerror("stm_initialize: already initialized\n");
}

void stm_finalize(void)
{
    stm_leave_callback_call(1);
}

/************************************************************/

void stm_perform_transaction(gcptr arg, int (*callback)(gcptr, int))
{   /* must save roots around this call */
    jmp_buf _jmpbuf;
    long volatile v_counter = 0;
    gcptr *volatile v_saved_value = stm_shadowstack;

    stm_push_root(arg);
    stm_push_root(END_MARKER_OFF);

    if (!thread_descriptor->atomic)
        CommitTransaction(0);

#ifdef _GC_ON_CPYTHON
    volatile PyThreadState *v_ts = PyGILState_GetThisThreadState();
    volatile int v_recursion_depth = v_ts->recursion_depth;
#endif

    setjmp(_jmpbuf);

#ifdef _GC_ON_CPYTHON
    v_ts->recursion_depth = v_recursion_depth;
#endif

    /* After setjmp(), the local variables v_* are preserved because they
     * are volatile.  The other variables are only declared here. */
    struct tx_descriptor *d = thread_descriptor;
    long counter, result;
    counter = v_counter;
    stm_shadowstack = v_saved_value + 2;   /*skip the two values pushed above*/

    do {
        v_counter = counter + 1;
        /* If counter==0, initialize 'reads_size_limit_nonatomic' from the
           configured length limit.  If counter>0, we did an abort, which
           has configured 'reads_size_limit_nonatomic' to a smaller value.
           When such a shortened transaction succeeds, the next one will
           see its length limit doubled, up to the maximum. */
        if (counter == 0 && stm_active != 2) {
            unsigned long limit = d->reads_size_limit_nonatomic;
            if (limit != 0 && limit < (stm_regular_length_limit >> 1))
                limit = (limit << 1) | 1;
            else
                limit = stm_regular_length_limit;
            d->reads_size_limit_nonatomic = limit;
        }
        if (!d->atomic) {
            stm_begin_transaction(&_jmpbuf, NULL);
        }
        else {
            /* atomic transaction: a common case is that callback() returned
               even though we are atomic because we need a major GC.  For
               that case, release and reaquire the rw lock here. */
            assert(stm_active >= 1);
            stm_possible_safe_point();
        }

        /* invoke the callback in the new transaction */
        arg = v_saved_value[0];
        result = callback(arg, counter);
        assert(stm_shadowstack == v_saved_value + 2);

        if (!d->atomic)
            CommitTransaction(0);

        counter = 0;
    }
    while (result > 0);  /* continue as long as callback() returned > 0 */

    if (d->atomic) {
        if (d->setjmp_buf == &_jmpbuf) {
            BecomeInevitable("perform_transaction left with atomic");
        }
    }
    else {
        BeginInevitableTransaction(0);
    }

    gcptr x = stm_pop_root();   /* pop the END_MARKER */
    assert(x == END_MARKER_OFF || x == END_MARKER_ON);
    stm_pop_root();             /* pop the 'arg' */
    assert(stm_shadowstack == v_saved_value);
}

void stm_transaction_break(void *buf, void (*longjmp_callback)(void *))
{   /* must save roots around this call */
    struct tx_descriptor *d = thread_descriptor;
    if (d->atomic) {
        assert(stm_active >= 1);
        stm_possible_safe_point();
    }
    else {
        CommitTransaction(0);

        unsigned long limit = d->reads_size_limit_nonatomic;
        if (limit != 0 && limit < (stm_regular_length_limit >> 1))
            limit = (limit << 1) | 1;
        else
            limit = stm_regular_length_limit;
        d->reads_size_limit_nonatomic = limit;

        stm_begin_transaction(buf, longjmp_callback);
    }
}

void stm_invalidate_jmp_buf(void *buf)
{   /* must save roots around this call */
    struct tx_descriptor *d = thread_descriptor;
    if (d->setjmp_buf == buf) {
        BecomeInevitable("stm_invalidate_jmp_buf");
    }
}

void stm_commit_transaction(void)
{   /* must save roots around this call */
    struct tx_descriptor *d = thread_descriptor;
    if (!d->atomic)
        CommitTransaction(0);
    else
        BecomeInevitable("stm_commit_transaction but atomic");
}

void stm_begin_inevitable_transaction(void)
{   /* must save roots around this call */
    struct tx_descriptor *d = thread_descriptor;
    if (!d->atomic)
        BeginInevitableTransaction(0);
}

void stm_become_inevitable(const char *reason)
{
    BecomeInevitable(reason);
}

int stm_in_transaction(void)
{
    struct tx_descriptor *d = thread_descriptor;
    return d && stm_active;
}

/************************************************************/

/* a multi-reader, single-writer lock: transactions normally take a reader
   lock, so don't conflict with each other; when we need to do a global GC,
   we take a writer lock to "stop the world".  Note the initializer here,
   which should give the correct priority for stm_possible_safe_point(). */
static pthread_rwlock_t rwlock_shared =
    PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP;

struct tx_descriptor *in_single_thread = NULL;

void stm_start_sharedlock(void)
{
    int err = pthread_rwlock_rdlock(&rwlock_shared);
    if (err != 0)
        stm_fatalerror("stm_start_sharedlock: "
                       "pthread_rwlock_rdlock failure\n");
    //assert(stmgc_nursery_hiding(thread_descriptor, 0));
    dprintf(("stm_start_sharedlock\n"));
}

void stm_stop_sharedlock(void)
{
    dprintf(("stm_stop_sharedlock\n"));
    //assert(stmgc_nursery_hiding(thread_descriptor, 1));
    int err = pthread_rwlock_unlock(&rwlock_shared);
    if (err != 0)
        stm_fatalerror("stm_stop_sharedlock: "
                       "pthread_rwlock_unlock failure\n");
}

static void start_exclusivelock(void)
{
    int err = pthread_rwlock_wrlock(&rwlock_shared);
    if (err != 0)
        stm_fatalerror("start_exclusivelock: "
                       "pthread_rwlock_wrlock failure\n");
    dprintf(("start_exclusivelock\n"));
}

static void stop_exclusivelock(void)
{
    dprintf(("stop_exclusivelock\n"));
    int err = pthread_rwlock_unlock(&rwlock_shared);
    if (err != 0)
        stm_fatalerror("stop_exclusivelock: "
                       "pthread_rwlock_unlock failure\n");
}


static int single_thread_nesting = 0;
void stm_stop_all_other_threads(void)
{                               /* push gc roots! */
    struct tx_descriptor *d;

    BecomeInevitable("stop_all_other_threads");
    if (!single_thread_nesting) {
        stm_start_single_thread();
        
        for (d = stm_tx_head; d; d = d->tx_next) {
            if (*d->active_ref == 1) // && d != thread_descriptor) <- TRUE
                AbortTransactionAfterCollect(d, ABRT_OTHER_THREADS);
        }
    }
    single_thread_nesting++;
}


void stm_partial_commit_and_resume_other_threads(void)
{                               /* push gc roots! */
    struct tx_descriptor *d = thread_descriptor;
    assert(stm_active == 2);
    int atomic = d->atomic;

    single_thread_nesting--;
    if (single_thread_nesting == 0) {
        /* Give up atomicity during commit. This still works because
           we keep the inevitable status, thereby being guaranteed to 
           commit before all others. */
        stm_atomic(-atomic);
        
        /* Commit and start new inevitable transaction while never
           giving up the inevitable status. */
        CommitTransaction(1);       /* 1=stay_inevitable! */
        BeginInevitableTransaction(1);
        
        /* restore atomic-count */
        stm_atomic(atomic);
        
        stm_stop_single_thread();
    }
}

void stm_start_single_thread(void)
{                               /* push gc roots! */
    /* Called by the GC, just after a minor collection, when we need to do
       a major collection.  When it returns, it acquired the "write lock"
       which prevents any other thread from running in a transaction.
       Warning, may block waiting for rwlock_in_transaction while another
       thread runs a major GC itself! */
    ACCESS_ONCE(sync_required) = -1;
    stm_stop_sharedlock();
    start_exclusivelock();
    ACCESS_ONCE(sync_required) = 0;

    assert(in_single_thread == NULL);
    in_single_thread = thread_descriptor;
    assert(in_single_thread != NULL);
}

void stm_stop_single_thread(void)
{                               /* push gc roots! */
    /* Warning, may block waiting for rwlock_in_transaction while another
       thread runs a major GC */
    assert(in_single_thread == thread_descriptor);
    in_single_thread = NULL;

    stop_exclusivelock();
    stm_start_sharedlock();
}

void stm_possible_safe_point(void)
{
    if (!ACCESS_ONCE(sync_required))
        return;

    /* Warning, may block waiting for rwlock_in_transaction while another
       thread runs a major GC */
    assert(stm_active);
    assert(in_single_thread != thread_descriptor);

    stm_stop_sharedlock();
    /* another thread should be waiting in start_exclusivelock(),
       which takes priority here */
    stm_start_sharedlock();

    AbortNowIfDelayed();   /* if another thread ran a major GC */
}

void stm_minor_collect(void)
{
    stmgc_minor_collect();
    stmgcpage_possibly_major_collect(0);
}

void stm_major_collect(void)
{
    stmgc_minor_collect();
    stmgcpage_possibly_major_collect(1);
}

/************************************************************/

/***** Prebuilt roots, added in the list as the transaction that changed
       them commits *****/

struct GcPtrList stm_prebuilt_gcroots = {0};

void stm_add_prebuilt_root(gcptr obj)
{
    assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL);
    gcptrlist_insert(&stm_prebuilt_gcroots, obj);
}

void stm_clear_between_tests(void)
{
    dprintf(("\n"
            "===============================================================\n"
            "========================[  START  ]============================\n"
            "===============================================================\n"
            "\n"));
    gcptrlist_clear(&stm_prebuilt_gcroots);
}