OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [libitm/] [method-ml.cc] - Blame information for rev 801

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 737 jeremybenn
/* Copyright (C) 2012 Free Software Foundation, Inc.
2
   Contributed by Torvald Riegel <triegel@redhat.com>.
3
 
4
   This file is part of the GNU Transactional Memory Library (libitm).
5
 
6
   Libitm is free software; you can redistribute it and/or modify it
7
   under the terms of the GNU General Public License as published by
8
   the Free Software Foundation; either version 3 of the License, or
9
   (at your option) any later version.
10
 
11
   Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
12
   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13
   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14
   more details.
15
 
16
   Under Section 7 of GPL version 3, you are granted additional
17
   permissions described in the GCC Runtime Library Exception, version
18
   3.1, as published by the Free Software Foundation.
19
 
20
   You should have received a copy of the GNU General Public License and
21
   a copy of the GCC Runtime Library Exception along with this program;
22
   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23
   <http://www.gnu.org/licenses/>.  */
24
 
25
#include "libitm_i.h"
26
 
27
using namespace GTM;
28
 
29
namespace {
30
 
31
// This group consists of all TM methods that synchronize via multiple locks
32
// (or ownership records).
33
struct ml_mg : public method_group
34
{
35
  static const gtm_word LOCK_BIT = (~(gtm_word)0 >> 1) + 1;
36
  static const gtm_word INCARNATION_BITS = 3;
37
  static const gtm_word INCARNATION_MASK = 7;
38
  // Maximum time is all bits except the lock bit, the overflow reserve bit,
39
  // and the incarnation bits).
40
  static const gtm_word TIME_MAX = (~(gtm_word)0 >> (2 + INCARNATION_BITS));
41
  // The overflow reserve bit is the MSB of the timestamp part of an orec,
42
  // so we can have TIME_MAX+1 pending timestamp increases before we overflow.
43
  static const gtm_word OVERFLOW_RESERVE = TIME_MAX + 1;
44
 
45
  static bool is_locked(gtm_word o) { return o & LOCK_BIT; }
46
  static gtm_word set_locked(gtm_thread *tx)
47
  {
48
    return ((uintptr_t)tx >> 1) | LOCK_BIT;
49
  }
50
  // Returns a time that includes the lock bit, which is required by both
51
  // validate() and is_more_recent_or_locked().
52
  static gtm_word get_time(gtm_word o) { return o >> INCARNATION_BITS; }
53
  static gtm_word set_time(gtm_word time) { return time << INCARNATION_BITS; }
54
  static bool is_more_recent_or_locked(gtm_word o, gtm_word than_time)
55
  {
56
    // LOCK_BIT is the MSB; thus, if O is locked, it is larger than TIME_MAX.
57
    return get_time(o) > than_time;
58
  }
59
  static bool has_incarnation_left(gtm_word o)
60
  {
61
    return (o & INCARNATION_MASK) < INCARNATION_MASK;
62
  }
63
  static gtm_word inc_incarnation(gtm_word o) { return o + 1; }
64
 
65
  // The shared time base.
66
  atomic<gtm_word> time __attribute__((aligned(HW_CACHELINE_SIZE)));
67
 
68
  // The array of ownership records.
69
  atomic<gtm_word>* orecs __attribute__((aligned(HW_CACHELINE_SIZE)));
70
  char tailpadding[HW_CACHELINE_SIZE - sizeof(atomic<gtm_word>*)];
71
 
72
  // Location-to-orec mapping.  Stripes of 16B mapped to 2^19 orecs.
73
  static const gtm_word L2O_ORECS = 1 << 19;
74
  static const gtm_word L2O_SHIFT = 4;
75
  static size_t get_orec(const void* addr)
76
  {
77
    return ((uintptr_t)addr >> L2O_SHIFT) & (L2O_ORECS - 1);
78
  }
79
  static size_t get_next_orec(size_t orec)
80
  {
81
    return (orec + 1) & (L2O_ORECS - 1);
82
  }
83
  // Returns the next orec after the region.
84
  static size_t get_orec_end(const void* addr, size_t len)
85
  {
86
    return (((uintptr_t)addr + len + (1 << L2O_SHIFT) - 1) >> L2O_SHIFT)
87
        & (L2O_ORECS - 1);
88
  }
89
 
90
  virtual void init()
91
  {
92
    // We assume that an atomic<gtm_word> is backed by just a gtm_word, so
93
    // starting with zeroed memory is fine.
94
    orecs = (atomic<gtm_word>*) xcalloc(
95
        sizeof(atomic<gtm_word>) * L2O_ORECS, true);
96
    // This store is only executed while holding the serial lock, so relaxed
97
    // memory order is sufficient here.
98
    time.store(0, memory_order_relaxed);
99
  }
100
 
101
  virtual void fini()
102
  {
103
    free(orecs);
104
  }
105
 
106
  // We only re-initialize when our time base overflows.  Thus, only reset
107
  // the time base and the orecs but do not re-allocate the orec array.
108
  virtual void reinit()
109
  {
110
    // This store is only executed while holding the serial lock, so relaxed
111
    // memory order is sufficient here.  Same holds for the memset.
112
    time.store(0, memory_order_relaxed);
113
    memset(orecs, 0, sizeof(atomic<gtm_word>) * L2O_ORECS);
114
  }
115
};
116
 
117
static ml_mg o_ml_mg;
118
 
119
 
120
// The multiple lock, write-through TM method.
121
// Maps each memory location to one of the orecs in the orec array, and then
122
// acquires the associated orec eagerly before writing through.
123
// Writes require undo-logging because we are dealing with several locks/orecs
124
// and need to resolve deadlocks if necessary by aborting one of the
125
// transactions.
126
// Reads do time-based validation with snapshot time extensions.  Incarnation
127
// numbers are used to decrease contention on the time base (with those,
128
// aborted transactions do not need to acquire a new version number for the
129
// data that has been previously written in the transaction and needs to be
130
// rolled back).
131
// gtm_thread::shared_state is used to store a transaction's current
132
// snapshot time (or commit time). The serial lock uses ~0 for inactive
133
// transactions and 0 for active ones. Thus, we always have a meaningful
134
// timestamp in shared_state that can be used to implement quiescence-based
135
// privatization safety.
136
class ml_wt_dispatch : public abi_dispatch
137
{
138
protected:
139
  static void pre_write(gtm_thread *tx, const void *addr, size_t len)
140
  {
141
    gtm_word snapshot = tx->shared_state.load(memory_order_relaxed);
142
    gtm_word locked_by_tx = ml_mg::set_locked(tx);
143
 
144
    // Lock all orecs that cover the region.
145
    size_t orec = ml_mg::get_orec(addr);
146
    size_t orec_end = ml_mg::get_orec_end(addr, len);
147
    do
148
      {
149
        // Load the orec.  Relaxed memory order is sufficient here because
150
        // either we have acquired the orec or we will try to acquire it with
151
        // a CAS with stronger memory order.
152
        gtm_word o = o_ml_mg.orecs[orec].load(memory_order_relaxed);
153
 
154
        // Check whether we have acquired the orec already.
155
        if (likely (locked_by_tx != o))
156
          {
157
            // If not, acquire.  Make sure that our snapshot time is larger or
158
            // equal than the orec's version to avoid masking invalidations of
159
            // our snapshot with our own writes.
160
            if (unlikely (ml_mg::is_locked(o)))
161
              tx->restart(RESTART_LOCKED_WRITE);
162
 
163
            if (unlikely (ml_mg::get_time(o) > snapshot))
164
              {
165
                // We only need to extend the snapshot if we have indeed read
166
                // from this orec before.  Given that we are an update
167
                // transaction, we will have to extend anyway during commit.
168
                // ??? Scan the read log instead, aborting if we have read
169
                // from data covered by this orec before?
170
                snapshot = extend(tx);
171
              }
172
 
173
            // We need acquire memory order here to synchronize with other
174
            // (ownership) releases of the orec.  We do not need acq_rel order
175
            // because whenever another thread reads from this CAS'
176
            // modification, then it will abort anyway and does not rely on
177
            // any further happens-before relation to be established.
178
            if (unlikely (!o_ml_mg.orecs[orec].compare_exchange_strong(
179
                o, locked_by_tx, memory_order_acquire)))
180
              tx->restart(RESTART_LOCKED_WRITE);
181
 
182
            // We use an explicit fence here to avoid having to use release
183
            // memory order for all subsequent data stores.  This fence will
184
            // synchronize with loads of the data with acquire memory order.
185
            // See post_load() for why this is necessary.
186
            // Adding require memory order to the prior CAS is not sufficient,
187
            // at least according to the Batty et al. formalization of the
188
            // memory model.
189
            atomic_thread_fence(memory_order_release);
190
 
191
            // We log the previous value here to be able to use incarnation
192
            // numbers when we have to roll back.
193
            // ??? Reserve capacity early to avoid capacity checks here?
194
            gtm_rwlog_entry *e = tx->writelog.push();
195
            e->orec = o_ml_mg.orecs + orec;
196
            e->value = o;
197
          }
198
        orec = o_ml_mg.get_next_orec(orec);
199
      }
200
    while (orec != orec_end);
201
 
202
    // Do undo logging.  We do not know which region prior writes logged
203
    // (even if orecs have been acquired), so just log everything.
204
    tx->undolog.log(addr, len);
205
  }
206
 
207
  static void pre_write(const void *addr, size_t len)
208
  {
209
    gtm_thread *tx = gtm_thr();
210
    pre_write(tx, addr, len);
211
  }
212
 
213
  // Returns true iff all the orecs in our read log still have the same time
214
  // or have been locked by the transaction itself.
215
  static bool validate(gtm_thread *tx)
216
  {
217
    gtm_word locked_by_tx = ml_mg::set_locked(tx);
218
    // ??? This might get called from pre_load() via extend().  In that case,
219
    // we don't really need to check the new entries that pre_load() is
220
    // adding.  Stop earlier?
221
    for (gtm_rwlog_entry *i = tx->readlog.begin(), *ie = tx->readlog.end();
222
        i != ie; i++)
223
      {
224
        // Relaxed memory order is sufficient here because we do not need to
225
        // establish any new synchronizes-with relationships.  We only need
226
        // to read a value that is as least as current as enforced by the
227
        // callers: extend() loads global time with acquire, and trycommit()
228
        // increments global time with acquire.  Therefore, we will see the
229
        // most recent orec updates before the global time that we load.
230
        gtm_word o = i->orec->load(memory_order_relaxed);
231
        // We compare only the time stamp and the lock bit here.  We know that
232
        // we have read only committed data before, so we can ignore
233
        // intermediate yet rolled-back updates presented by the incarnation
234
        // number bits.
235
        if (ml_mg::get_time(o) != ml_mg::get_time(i->value)
236
            && o != locked_by_tx)
237
          return false;
238
      }
239
    return true;
240
  }
241
 
242
  // Tries to extend the snapshot to a more recent time.  Returns the new
243
  // snapshot time and updates TX->SHARED_STATE.  If the snapshot cannot be
244
  // extended to the current global time, TX is restarted.
245
  static gtm_word extend(gtm_thread *tx)
246
  {
247
    // We read global time here, even if this isn't strictly necessary
248
    // because we could just return the maximum of the timestamps that
249
    // validate sees.  However, the potential cache miss on global time is
250
    // probably a reasonable price to pay for avoiding unnecessary extensions
251
    // in the future.
252
    // We need acquire memory oder because we have to synchronize with the
253
    // increment of global time by update transactions, whose lock
254
    // acquisitions we have to observe (also see trycommit()).
255
    gtm_word snapshot = o_ml_mg.time.load(memory_order_acquire);
256
    if (!validate(tx))
257
      tx->restart(RESTART_VALIDATE_READ);
258
 
259
    // Update our public snapshot time.  Probably useful to decrease waiting
260
    // due to quiescence-based privatization safety.
261
    // Use release memory order to establish synchronizes-with with the
262
    // privatizers; prior data loads should happen before the privatizers
263
    // potentially modify anything.
264
    tx->shared_state.store(snapshot, memory_order_release);
265
    return snapshot;
266
  }
267
 
268
  // First pass over orecs.  Load and check all orecs that cover the region.
269
  // Write to read log, extend snapshot time if necessary.
270
  static gtm_rwlog_entry* pre_load(gtm_thread *tx, const void* addr,
271
      size_t len)
272
  {
273
    // Don't obtain an iterator yet because the log might get resized.
274
    size_t log_start = tx->readlog.size();
275
    gtm_word snapshot = tx->shared_state.load(memory_order_relaxed);
276
    gtm_word locked_by_tx = ml_mg::set_locked(tx);
277
 
278
    size_t orec = ml_mg::get_orec(addr);
279
    size_t orec_end = ml_mg::get_orec_end(addr, len);
280
    do
281
      {
282
        // We need acquire memory order here so that this load will
283
        // synchronize with the store that releases the orec in trycommit().
284
        // In turn, this makes sure that subsequent data loads will read from
285
        // a visible sequence of side effects that starts with the most recent
286
        // store to the data right before the release of the orec.
287
        gtm_word o = o_ml_mg.orecs[orec].load(memory_order_acquire);
288
 
289
        if (likely (!ml_mg::is_more_recent_or_locked(o, snapshot)))
290
          {
291
            success:
292
            gtm_rwlog_entry *e = tx->readlog.push();
293
            e->orec = o_ml_mg.orecs + orec;
294
            e->value = o;
295
          }
296
        else if (!ml_mg::is_locked(o))
297
          {
298
            // We cannot read this part of the region because it has been
299
            // updated more recently than our snapshot time.  If we can extend
300
            // our snapshot, then we can read.
301
            snapshot = extend(tx);
302
            goto success;
303
          }
304
        else
305
          {
306
            // If the orec is locked by us, just skip it because we can just
307
            // read from it.  Otherwise, restart the transaction.
308
            if (o != locked_by_tx)
309
              tx->restart(RESTART_LOCKED_READ);
310
          }
311
        orec = o_ml_mg.get_next_orec(orec);
312
      }
313
    while (orec != orec_end);
314
    return &tx->readlog[log_start];
315
  }
316
 
317
  // Second pass over orecs, verifying that the we had a consistent read.
318
  // Restart the transaction if any of the orecs is locked by another
319
  // transaction.
320
  static void post_load(gtm_thread *tx, gtm_rwlog_entry* log)
321
  {
322
    for (gtm_rwlog_entry *end = tx->readlog.end(); log != end; log++)
323
      {
324
        // Check that the snapshot is consistent.  We expect the previous data
325
        // load to have acquire memory order, or be atomic and followed by an
326
        // acquire fence.
327
        // As a result, the data load will synchronize with the release fence
328
        // issued by the transactions whose data updates the data load has read
329
        // from.  This forces the orec load to read from a visible sequence of
330
        // side effects that starts with the other updating transaction's
331
        // store that acquired the orec and set it to locked.
332
        // We therefore either read a value with the locked bit set (and
333
        // restart) or read an orec value that was written after the data had
334
        // been written.  Either will allow us to detect inconsistent reads
335
        // because it will have a higher/different value.
336
        // Also note that differently to validate(), we compare the raw value
337
        // of the orec here, including incarnation numbers.  We must prevent
338
        // returning uncommitted data from loads (whereas when validating, we
339
        // already performed a consistent load).
340
        gtm_word o = log->orec->load(memory_order_relaxed);
341
        if (log->value != o)
342
          tx->restart(RESTART_VALIDATE_READ);
343
      }
344
  }
345
 
346
  template <typename V> static V load(const V* addr, ls_modifier mod)
347
  {
348
    // Read-for-write should be unlikely, but we need to handle it or will
349
    // break later WaW optimizations.
350
    if (unlikely(mod == RfW))
351
      {
352
        pre_write(addr, sizeof(V));
353
        return *addr;
354
      }
355
    if (unlikely(mod == RaW))
356
      return *addr;
357
    // ??? Optimize for RaR?
358
 
359
    gtm_thread *tx = gtm_thr();
360
    gtm_rwlog_entry* log = pre_load(tx, addr, sizeof(V));
361
 
362
    // Load the data.
363
    // This needs to have acquire memory order (see post_load()).
364
    // Alternatively, we can put an acquire fence after the data load but this
365
    // is probably less efficient.
366
    // FIXME We would need an atomic load with acquire memory order here but
367
    // we can't just forge an atomic load for nonatomic data because this
368
    // might not work on all implementations of atomics.  However, we need
369
    // the acquire memory order and we can only establish this if we link
370
    // it to the matching release using a reads-from relation between atomic
371
    // loads.  Also, the compiler is allowed to optimize nonatomic accesses
372
    // differently than atomic accesses (e.g., if the load would be moved to
373
    // after the fence, we potentially don't synchronize properly anymore).
374
    // Instead of the following, just use an ordinary load followed by an
375
    // acquire fence, and hope that this is good enough for now:
376
    // V v = atomic_load_explicit((atomic<V>*)addr, memory_order_acquire);
377
    V v = *addr;
378
    atomic_thread_fence(memory_order_acquire);
379
 
380
    // ??? Retry the whole load if it wasn't consistent?
381
    post_load(tx, log);
382
 
383
    return v;
384
  }
385
 
386
  template <typename V> static void store(V* addr, const V value,
387
      ls_modifier mod)
388
  {
389
    if (likely(mod != WaW))
390
      pre_write(addr, sizeof(V));
391
    // FIXME We would need an atomic store here but we can't just forge an
392
    // atomic load for nonatomic data because this might not work on all
393
    // implementations of atomics.  However, we need this store to link the
394
    // release fence in pre_write() to the acquire operation in load, which
395
    // is only guaranteed if we have a reads-from relation between atomic
396
    // accesses.  Also, the compiler is allowed to optimize nonatomic accesses
397
    // differently than atomic accesses (e.g., if the store would be moved
398
    // to before the release fence in pre_write(), things could go wrong).
399
    // atomic_store_explicit((atomic<V>*)addr, value, memory_order_relaxed);
400
    *addr = value;
401
  }
402
 
403
public:
404
  static void memtransfer_static(void *dst, const void* src, size_t size,
405
      bool may_overlap, ls_modifier dst_mod, ls_modifier src_mod)
406
  {
407
    gtm_rwlog_entry* log = 0;
408
    gtm_thread *tx = 0;
409
 
410
    if (src_mod == RfW)
411
      {
412
        tx = gtm_thr();
413
        pre_write(tx, src, size);
414
      }
415
    else if (src_mod != RaW && src_mod != NONTXNAL)
416
      {
417
        tx = gtm_thr();
418
        log = pre_load(tx, src, size);
419
      }
420
    // ??? Optimize for RaR?
421
 
422
    if (dst_mod != NONTXNAL && dst_mod != WaW)
423
      {
424
        if (src_mod != RfW && (src_mod == RaW || src_mod == NONTXNAL))
425
          tx = gtm_thr();
426
        pre_write(tx, dst, size);
427
      }
428
 
429
    // FIXME We should use atomics here (see store()).  Let's just hope that
430
    // memcpy/memmove are good enough.
431
    if (!may_overlap)
432
      ::memcpy(dst, src, size);
433
    else
434
      ::memmove(dst, src, size);
435
 
436
    // ??? Retry the whole memtransfer if it wasn't consistent?
437
    if (src_mod != RfW && src_mod != RaW && src_mod != NONTXNAL)
438
      {
439
        // See load() for why we need the acquire fence here.
440
        atomic_thread_fence(memory_order_acquire);
441
        post_load(tx, log);
442
      }
443
  }
444
 
445
  static void memset_static(void *dst, int c, size_t size, ls_modifier mod)
446
  {
447
    if (mod != WaW)
448
      pre_write(dst, size);
449
    // FIXME We should use atomics here (see store()).  Let's just hope that
450
    // memset is good enough.
451
    ::memset(dst, c, size);
452
  }
453
 
454
  virtual gtm_restart_reason begin_or_restart()
455
  {
456
    // We don't need to do anything for nested transactions.
457
    gtm_thread *tx = gtm_thr();
458
    if (tx->parent_txns.size() > 0)
459
      return NO_RESTART;
460
 
461
    // Read the current time, which becomes our snapshot time.
462
    // Use acquire memory oder so that we see the lock acquisitions by update
463
    // transcations that incremented the global time (see trycommit()).
464
    gtm_word snapshot = o_ml_mg.time.load(memory_order_acquire);
465
    // Re-initialize method group on time overflow.
466
    if (snapshot >= o_ml_mg.TIME_MAX)
467
      return RESTART_INIT_METHOD_GROUP;
468
 
469
    // We don't need to enforce any ordering for the following store. There
470
    // are no earlier data loads in this transaction, so the store cannot
471
    // become visible before those (which could lead to the violation of
472
    // privatization safety). The store can become visible after later loads
473
    // but this does not matter because the previous value will have been
474
    // smaller or equal (the serial lock will set shared_state to zero when
475
    // marking the transaction as active, and restarts enforce immediate
476
    // visibility of a smaller or equal value with a barrier (see
477
    // rollback()).
478
    tx->shared_state.store(snapshot, memory_order_relaxed);
479
    return NO_RESTART;
480
  }
481
 
482
  virtual bool trycommit(gtm_word& priv_time)
483
  {
484
    gtm_thread* tx = gtm_thr();
485
 
486
    // If we haven't updated anything, we can commit.
487
    if (!tx->writelog.size())
488
      {
489
        tx->readlog.clear();
490
        return true;
491
      }
492
 
493
    // Get a commit time.
494
    // Overflow of o_ml_mg.time is prevented in begin_or_restart().
495
    // We need acq_rel here because (1) the acquire part is required for our
496
    // own subsequent call to validate(), and the release part is necessary to
497
    // make other threads' validate() work as explained there and in extend().
498
    gtm_word ct = o_ml_mg.time.fetch_add(1, memory_order_acq_rel) + 1;
499
 
500
    // Extend our snapshot time to at least our commit time.
501
    // Note that we do not need to validate if our snapshot time is right
502
    // before the commit time because we are never sharing the same commit
503
    // time with other transactions.
504
    // No need to reset shared_state, which will be modified by the serial
505
    // lock right after our commit anyway.
506
    gtm_word snapshot = tx->shared_state.load(memory_order_relaxed);
507
    if (snapshot < ct - 1 && !validate(tx))
508
      return false;
509
 
510
    // Release orecs.
511
    // See pre_load() / post_load() for why we need release memory order.
512
    // ??? Can we use a release fence and relaxed stores?
513
    gtm_word v = ml_mg::set_time(ct);
514
    for (gtm_rwlog_entry *i = tx->writelog.begin(), *ie = tx->writelog.end();
515
        i != ie; i++)
516
      i->orec->store(v, memory_order_release);
517
 
518
    // We're done, clear the logs.
519
    tx->writelog.clear();
520
    tx->readlog.clear();
521
 
522
    // Need to ensure privatization safety. Every other transaction must
523
    // have a snapshot time that is at least as high as our commit time
524
    // (i.e., our commit must be visible to them).
525
    priv_time = ct;
526
    return true;
527
  }
528
 
529
  virtual void rollback(gtm_transaction_cp *cp)
530
  {
531
    // We don't do anything for rollbacks of nested transactions.
532
    // ??? We could release locks here if we snapshot writelog size.  readlog
533
    // is similar.  This is just a performance optimization though.  Nested
534
    // aborts should be rather infrequent, so the additional save/restore
535
    // overhead for the checkpoints could be higher.
536
    if (cp != 0)
537
      return;
538
 
539
    gtm_thread *tx = gtm_thr();
540
    gtm_word overflow_value = 0;
541
 
542
    // Release orecs.
543
    for (gtm_rwlog_entry *i = tx->writelog.begin(), *ie = tx->writelog.end();
544
        i != ie; i++)
545
      {
546
        // If possible, just increase the incarnation number.
547
        // See pre_load() / post_load() for why we need release memory order.
548
        // ??? Can we use a release fence and relaxed stores?  (Same below.)
549
        if (ml_mg::has_incarnation_left(i->value))
550
          i->orec->store(ml_mg::inc_incarnation(i->value),
551
              memory_order_release);
552
        else
553
          {
554
            // We have an incarnation overflow.  Acquire a new timestamp, and
555
            // use it from now on as value for each orec whose incarnation
556
            // number cannot be increased.
557
            // Overflow of o_ml_mg.time is prevented in begin_or_restart().
558
            // See pre_load() / post_load() for why we need release memory
559
            // order.
560
            if (!overflow_value)
561
              // Release memory order is sufficient but required here.
562
              // In contrast to the increment in trycommit(), we need release
563
              // for the same reason but do not need the acquire because we
564
              // do not validate subsequently.
565
              overflow_value = ml_mg::set_time(
566
                  o_ml_mg.time.fetch_add(1, memory_order_release) + 1);
567
            i->orec->store(overflow_value, memory_order_release);
568
          }
569
      }
570
 
571
    // We need this release fence to ensure that privatizers see the
572
    // rolled-back original state (not any uncommitted values) when they read
573
    // the new snapshot time that we write in begin_or_restart().
574
    atomic_thread_fence(memory_order_release);
575
 
576
    // We're done, clear the logs.
577
    tx->writelog.clear();
578
    tx->readlog.clear();
579
  }
580
 
581
  virtual bool supports(unsigned number_of_threads)
582
  {
583
    // Each txn can commit and fail and rollback once before checking for
584
    // overflow, so this bounds the number of threads that we can support.
585
    // In practice, this won't be a problem but we check it anyway so that
586
    // we never break in the occasional weird situation.
587
    return (number_of_threads * 2 <= ml_mg::OVERFLOW_RESERVE);
588
  }
589
 
590
  CREATE_DISPATCH_METHODS(virtual, )
591
  CREATE_DISPATCH_METHODS_MEM()
592
 
593
  ml_wt_dispatch() : abi_dispatch(false, true, false, false, &o_ml_mg)
594
  { }
595
};
596
 
597
} // anon namespace
598
 
599
static const ml_wt_dispatch o_ml_wt_dispatch;
600
 
601
abi_dispatch *
602
GTM::dispatch_ml_wt ()
603
{
604
  return const_cast<ml_wt_dispatch *>(&o_ml_wt_dispatch);
605
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.