OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [tags/] [gnu-src/] [gcc-4.5.1/] [gcc-4.5.1-or32-1.0rc4/] [libstdc++-v3/] [include/] [std/] [atomic] - Blame information for rev 519

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 424 jeremybenn
// -*- C++ -*- header.
2
 
3
// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
4
//
5
// This file is part of the GNU ISO C++ Library.  This library is free
6
// software; you can redistribute it and/or modify it under the
7
// terms of the GNU General Public License as published by the
8
// Free Software Foundation; either version 3, or (at your option)
9
// any later version.
10
 
11
// This library is distributed in the hope that it will be useful,
12
// but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
// GNU General Public License for more details.
15
 
16
// Under Section 7 of GPL version 3, you are granted additional
17
// permissions described in the GCC Runtime Library Exception, version
18
// 3.1, as published by the Free Software Foundation.
19
 
20
// You should have received a copy of the GNU General Public License and
21
// a copy of the GCC Runtime Library Exception along with this program;
22
// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23
// .
24
 
25
/** @file atomic
26
 *  This is a Standard C++ Library header.
27
 */
28
 
29
// Based on "C++ Atomic Types and Operations" by Hans Boehm and Lawrence Crowl.
30
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html
31
 
32
#ifndef _GLIBCXX_ATOMIC
33
#define _GLIBCXX_ATOMIC 1
34
 
35
#pragma GCC system_header
36
 
37
#ifndef __GXX_EXPERIMENTAL_CXX0X__
38
# include 
39
#endif
40
 
41
#include 
42
#include 
43
 
44
_GLIBCXX_BEGIN_NAMESPACE(std)
45
 
46
  /**
47
   * @addtogroup atomics
48
   * @{
49
   */
50
 
51
  /// kill_dependency
52
  template
53
    inline _Tp
54
    kill_dependency(_Tp __y)
55
    {
56
      _Tp ret(__y);
57
      return ret;
58
    }
59
 
60
  inline memory_order
61
  __calculate_memory_order(memory_order __m)
62
  {
63
    const bool __cond1 = __m == memory_order_release;
64
    const bool __cond2 = __m == memory_order_acq_rel;
65
    memory_order __mo1(__cond1 ? memory_order_relaxed : __m);
66
    memory_order __mo2(__cond2 ? memory_order_acquire : __mo1);
67
    return __mo2;
68
  }
69
 
70
  //
71
  // Three nested namespaces for atomic implementation details.
72
  //
73
  // The nested namespace inlined into std:: is determined by the value
74
  // of the _GLIBCXX_ATOMIC_PROPERTY macro and the resulting
75
  // ATOMIC_*_LOCK_FREE macros. See file atomic_base.h.
76
  //
77
  // 0 == __atomic0 == Never lock-free
78
  // 1 == __atomic1 == Best available, sometimes lock-free
79
  // 2 == __atomic2 == Always lock-free
80
#include 
81
#include 
82
 
83
  /// atomic
84
  /// 29.4.3, Generic atomic type, primary class template.
85
  template
86
    struct atomic
87
    {
88
    private:
89
      _Tp _M_i;
90
 
91
    public:
92
      atomic() = default;
93
      ~atomic() = default;
94
      atomic(const atomic&) = delete;
95
      atomic& operator=(const atomic&) volatile = delete;
96
 
97
      atomic(_Tp __i) : _M_i(__i) { }
98
 
99
      operator _Tp() const;
100
 
101
      _Tp
102
      operator=(_Tp __i) { store(__i); return __i; }
103
 
104
      bool
105
      is_lock_free() const volatile;
106
 
107
      void
108
      store(_Tp, memory_order = memory_order_seq_cst) volatile;
109
 
110
      _Tp
111
      load(memory_order = memory_order_seq_cst) const volatile;
112
 
113
      _Tp
114
      exchange(_Tp __i, memory_order = memory_order_seq_cst) volatile;
115
 
116
      bool
117
      compare_exchange_weak(_Tp&, _Tp, memory_order, memory_order) volatile;
118
 
119
      bool
120
      compare_exchange_strong(_Tp&, _Tp, memory_order, memory_order) volatile;
121
 
122
      bool
123
      compare_exchange_weak(_Tp&, _Tp,
124
                            memory_order = memory_order_seq_cst) volatile;
125
 
126
      bool
127
      compare_exchange_strong(_Tp&, _Tp,
128
                              memory_order = memory_order_seq_cst) volatile;
129
    };
130
 
131
 
132
  /// Partial specialization for pointer types.
133
  template
134
    struct atomic<_Tp*> : atomic_address
135
    {
136
      atomic() = default;
137
      ~atomic() = default;
138
      atomic(const atomic&) = delete;
139
      atomic& operator=(const atomic&) volatile = delete;
140
 
141
      atomic(_Tp* __v) : atomic_address(__v) { }
142
 
143
      void
144
      store(_Tp*, memory_order = memory_order_seq_cst);
145
 
146
      _Tp*
147
      load(memory_order = memory_order_seq_cst) const;
148
 
149
      _Tp*
150
      exchange(_Tp*, memory_order = memory_order_seq_cst);
151
 
152
      bool
153
      compare_exchange_weak(_Tp*&, _Tp*, memory_order, memory_order);
154
 
155
      bool
156
      compare_exchange_strong(_Tp*&, _Tp*, memory_order, memory_order);
157
 
158
      bool
159
      compare_exchange_weak(_Tp*&, _Tp*, memory_order = memory_order_seq_cst);
160
 
161
      bool
162
      compare_exchange_strong(_Tp*&, _Tp*, memory_order = memory_order_seq_cst);
163
 
164
      _Tp*
165
      fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst);
166
 
167
      _Tp*
168
      fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst);
169
 
170
      operator _Tp*() const
171
      { return load(); }
172
 
173
      _Tp*
174
      operator=(_Tp* __v)
175
      {
176
        store(__v);
177
        return __v;
178
      }
179
 
180
      _Tp*
181
      operator++(int) { return fetch_add(1); }
182
 
183
      _Tp*
184
      operator--(int) { return fetch_sub(1); }
185
 
186
      _Tp*
187
      operator++() { return fetch_add(1) + 1; }
188
 
189
      _Tp*
190
      operator--() { return fetch_sub(1) - 1; }
191
 
192
      _Tp*
193
      operator+=(ptrdiff_t __d)
194
      { return fetch_add(__d) + __d; }
195
 
196
      _Tp*
197
      operator-=(ptrdiff_t __d)
198
      { return fetch_sub(__d) - __d; }
199
    };
200
 
201
 
202
  /// Explicit specialization for void*
203
  template<>
204
    struct atomic : public atomic_address
205
    {
206
      typedef void*                     __integral_type;
207
      typedef atomic_address            __base_type;
208
 
209
      atomic() = default;
210
      ~atomic() = default;
211
      atomic(const atomic&) = delete;
212
      atomic& operator=(const atomic&) volatile = delete;
213
 
214
      atomic(__integral_type __i) : __base_type(__i) { }
215
 
216
      using __base_type::operator __integral_type;
217
      using __base_type::operator=;
218
    };
219
 
220
  /// Explicit specialization for bool.
221
  template<>
222
    struct atomic : public atomic_bool
223
    {
224
      typedef bool                      __integral_type;
225
      typedef atomic_bool               __base_type;
226
 
227
      atomic() = default;
228
      ~atomic() = default;
229
      atomic(const atomic&) = delete;
230
      atomic& operator=(const atomic&) volatile = delete;
231
 
232
      atomic(__integral_type __i) : __base_type(__i) { }
233
 
234
      using __base_type::operator __integral_type;
235
      using __base_type::operator=;
236
    };
237
 
238
  /// Explicit specialization for char.
239
  template<>
240
    struct atomic : public atomic_char
241
    {
242
      typedef char                      __integral_type;
243
      typedef atomic_char               __base_type;
244
 
245
      atomic() = default;
246
      ~atomic() = default;
247
      atomic(const atomic&) = delete;
248
      atomic& operator=(const atomic&) volatile = delete;
249
 
250
      atomic(__integral_type __i) : __base_type(__i) { }
251
 
252
      using __base_type::operator __integral_type;
253
      using __base_type::operator=;
254
    };
255
 
256
  /// Explicit specialization for signed char.
257
  template<>
258
    struct atomic : public atomic_schar
259
    {
260
      typedef signed char               __integral_type;
261
      typedef atomic_schar              __base_type;
262
 
263
      atomic() = default;
264
      ~atomic() = default;
265
      atomic(const atomic&) = delete;
266
      atomic& operator=(const atomic&) volatile = delete;
267
 
268
      atomic(__integral_type __i) : __base_type(__i) { }
269
 
270
      using __base_type::operator __integral_type;
271
      using __base_type::operator=;
272
    };
273
 
274
  /// Explicit specialization for unsigned char.
275
  template<>
276
    struct atomic : public atomic_uchar
277
    {
278
      typedef unsigned char             __integral_type;
279
      typedef atomic_uchar              __base_type;
280
 
281
      atomic() = default;
282
      ~atomic() = default;
283
      atomic(const atomic&) = delete;
284
      atomic& operator=(const atomic&) volatile = delete;
285
 
286
      atomic(__integral_type __i) : __base_type(__i) { }
287
 
288
      using __base_type::operator __integral_type;
289
      using __base_type::operator=;
290
    };
291
 
292
  /// Explicit specialization for short.
293
  template<>
294
    struct atomic : public atomic_short
295
    {
296
      typedef short                     __integral_type;
297
      typedef atomic_short              __base_type;
298
 
299
      atomic() = default;
300
      ~atomic() = default;
301
      atomic(const atomic&) = delete;
302
      atomic& operator=(const atomic&) volatile = delete;
303
 
304
      atomic(__integral_type __i) : __base_type(__i) { }
305
 
306
      using __base_type::operator __integral_type;
307
      using __base_type::operator=;
308
    };
309
 
310
  /// Explicit specialization for unsigned short.
311
  template<>
312
    struct atomic : public atomic_ushort
313
    {
314
      typedef unsigned short            __integral_type;
315
      typedef atomic_ushort             __base_type;
316
 
317
      atomic() = default;
318
      ~atomic() = default;
319
      atomic(const atomic&) = delete;
320
      atomic& operator=(const atomic&) volatile = delete;
321
 
322
      atomic(__integral_type __i) : __base_type(__i) { }
323
 
324
      using __base_type::operator __integral_type;
325
      using __base_type::operator=;
326
    };
327
 
328
  /// Explicit specialization for int.
329
  template<>
330
    struct atomic : atomic_int
331
    {
332
      typedef int                       __integral_type;
333
      typedef atomic_int                __base_type;
334
 
335
      atomic() = default;
336
      ~atomic() = default;
337
      atomic(const atomic&) = delete;
338
      atomic& operator=(const atomic&) volatile = delete;
339
 
340
      atomic(__integral_type __i) : __base_type(__i) { }
341
 
342
      using __base_type::operator __integral_type;
343
      using __base_type::operator=;
344
    };
345
 
346
  /// Explicit specialization for unsigned int.
347
  template<>
348
    struct atomic : public atomic_uint
349
    {
350
      typedef unsigned int              __integral_type;
351
      typedef atomic_uint               __base_type;
352
 
353
      atomic() = default;
354
      ~atomic() = default;
355
      atomic(const atomic&) = delete;
356
      atomic& operator=(const atomic&) volatile = delete;
357
 
358
      atomic(__integral_type __i) : __base_type(__i) { }
359
 
360
      using __base_type::operator __integral_type;
361
      using __base_type::operator=;
362
    };
363
 
364
  /// Explicit specialization for long.
365
  template<>
366
    struct atomic : public atomic_long
367
    {
368
      typedef long                      __integral_type;
369
      typedef atomic_long               __base_type;
370
 
371
      atomic() = default;
372
      ~atomic() = default;
373
      atomic(const atomic&) = delete;
374
      atomic& operator=(const atomic&) volatile = delete;
375
 
376
      atomic(__integral_type __i) : __base_type(__i) { }
377
 
378
      using __base_type::operator __integral_type;
379
      using __base_type::operator=;
380
    };
381
 
382
  /// Explicit specialization for unsigned long.
383
  template<>
384
    struct atomic : public atomic_ulong
385
    {
386
      typedef unsigned long             __integral_type;
387
      typedef atomic_ulong              __base_type;
388
 
389
      atomic() = default;
390
      ~atomic() = default;
391
      atomic(const atomic&) = delete;
392
      atomic& operator=(const atomic&) volatile = delete;
393
 
394
      atomic(__integral_type __i) : __base_type(__i) { }
395
 
396
      using __base_type::operator __integral_type;
397
      using __base_type::operator=;
398
    };
399
 
400
  /// Explicit specialization for long long.
401
  template<>
402
    struct atomic : public atomic_llong
403
    {
404
      typedef long long                 __integral_type;
405
      typedef atomic_llong              __base_type;
406
 
407
      atomic() = default;
408
      ~atomic() = default;
409
      atomic(const atomic&) = delete;
410
      atomic& operator=(const atomic&) volatile = delete;
411
 
412
      atomic(__integral_type __i) : __base_type(__i) { }
413
 
414
      using __base_type::operator __integral_type;
415
      using __base_type::operator=;
416
    };
417
 
418
  /// Explicit specialization for unsigned long long.
419
  template<>
420
    struct atomic : public atomic_ullong
421
    {
422
      typedef unsigned long long        __integral_type;
423
      typedef atomic_ullong             __base_type;
424
 
425
      atomic() = default;
426
      ~atomic() = default;
427
      atomic(const atomic&) = delete;
428
      atomic& operator=(const atomic&) volatile = delete;
429
 
430
      atomic(__integral_type __i) : __base_type(__i) { }
431
 
432
      using __base_type::operator __integral_type;
433
      using __base_type::operator=;
434
    };
435
 
436
  /// Explicit specialization for wchar_t.
437
  template<>
438
    struct atomic : public atomic_wchar_t
439
    {
440
      typedef wchar_t                   __integral_type;
441
      typedef atomic_wchar_t            __base_type;
442
 
443
      atomic() = default;
444
      ~atomic() = default;
445
      atomic(const atomic&) = delete;
446
      atomic& operator=(const atomic&) volatile = delete;
447
 
448
      atomic(__integral_type __i) : __base_type(__i) { }
449
 
450
      using __base_type::operator __integral_type;
451
      using __base_type::operator=;
452
    };
453
 
454
  /// Explicit specialization for char16_t.
455
  template<>
456
    struct atomic : public atomic_char16_t
457
    {
458
      typedef char16_t                  __integral_type;
459
      typedef atomic_char16_t           __base_type;
460
 
461
      atomic() = default;
462
      ~atomic() = default;
463
      atomic(const atomic&) = delete;
464
      atomic& operator=(const atomic&) volatile = delete;
465
 
466
      atomic(__integral_type __i) : __base_type(__i) { }
467
 
468
      using __base_type::operator __integral_type;
469
      using __base_type::operator=;
470
    };
471
 
472
  /// Explicit specialization for char32_t.
473
  template<>
474
    struct atomic : public atomic_char32_t
475
    {
476
      typedef char32_t                  __integral_type;
477
      typedef atomic_char32_t           __base_type;
478
 
479
      atomic() = default;
480
      ~atomic() = default;
481
      atomic(const atomic&) = delete;
482
      atomic& operator=(const atomic&) volatile = delete;
483
 
484
      atomic(__integral_type __i) : __base_type(__i) { }
485
 
486
      using __base_type::operator __integral_type;
487
      using __base_type::operator=;
488
    };
489
 
490
 
491
  template
492
    _Tp*
493
    atomic<_Tp*>::load(memory_order __m) const
494
    { return static_cast<_Tp*>(atomic_address::load(__m)); }
495
 
496
  template
497
    _Tp*
498
    atomic<_Tp*>::exchange(_Tp* __v, memory_order __m)
499
    { return static_cast<_Tp*>(atomic_address::exchange(__v, __m)); }
500
 
501
  template
502
    bool
503
    atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m1,
504
                                        memory_order __m2)
505
    {
506
      void** __vr = reinterpret_cast(&__r);
507
      void* __vv = static_cast(__v);
508
      return atomic_address::compare_exchange_weak(*__vr, __vv, __m1, __m2);
509
    }
510
 
511
  template
512
    bool
513
    atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,
514
                                          memory_order __m1,
515
                                          memory_order __m2)
516
    {
517
      void** __vr = reinterpret_cast(&__r);
518
      void* __vv = static_cast(__v);
519
      return atomic_address::compare_exchange_strong(*__vr, __vv, __m1, __m2);
520
    }
521
 
522
  template
523
    bool
524
    atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v,
525
                                        memory_order __m)
526
    {
527
      return compare_exchange_weak(__r, __v, __m,
528
                                   __calculate_memory_order(__m));
529
    }
530
 
531
  template
532
    bool
533
    atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,
534
                                        memory_order __m)
535
    {
536
      return compare_exchange_strong(__r, __v, __m,
537
                                     __calculate_memory_order(__m));
538
    }
539
 
540
  template
541
    _Tp*
542
    atomic<_Tp*>::fetch_add(ptrdiff_t __d, memory_order __m)
543
    {
544
      void* __p = atomic_fetch_add_explicit(this, sizeof(_Tp) * __d, __m);
545
      return static_cast<_Tp*>(__p);
546
    }
547
 
548
  template
549
    _Tp*
550
    atomic<_Tp*>::fetch_sub(ptrdiff_t __d, memory_order __m)
551
    {
552
      void* __p = atomic_fetch_sub_explicit(this, sizeof(_Tp) * __d, __m);
553
      return static_cast<_Tp*>(__p);
554
    }
555
 
556
  // Convenience function definitions, atomic_flag.
557
  inline bool
558
  atomic_flag_test_and_set_explicit(atomic_flag* __a, memory_order __m)
559
  { return __a->test_and_set(__m); }
560
 
561
  inline void
562
  atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m)
563
  { return __a->clear(__m); }
564
 
565
 
566
  // Convenience function definitions, atomic_address.
567
  inline bool
568
  atomic_is_lock_free(const atomic_address* __a)
569
  { return __a->is_lock_free(); }
570
 
571
  inline void
572
  atomic_store(atomic_address* __a, void* __v)
573
  { __a->store(__v); }
574
 
575
  inline void
576
  atomic_store_explicit(atomic_address* __a, void* __v, memory_order __m)
577
  { __a->store(__v, __m); }
578
 
579
  inline void*
580
  atomic_load(const atomic_address* __a)
581
  { return __a->load(); }
582
 
583
  inline void*
584
  atomic_load_explicit(const atomic_address* __a, memory_order __m)
585
  { return __a->load(__m); }
586
 
587
  inline void*
588
  atomic_exchange(atomic_address* __a, void* __v)
589
  { return __a->exchange(__v); }
590
 
591
  inline void*
592
  atomic_exchange_explicit(atomic_address* __a, void* __v, memory_order __m)
593
  { return __a->exchange(__v, __m); }
594
 
595
  inline bool
596
  atomic_compare_exchange_weak(atomic_address* __a, void** __v1, void* __v2)
597
  {
598
    return __a->compare_exchange_weak(*__v1, __v2, memory_order_seq_cst,
599
                                      memory_order_seq_cst);
600
  }
601
 
602
  inline bool
603
  atomic_compare_exchange_strong(atomic_address* __a,
604
                               void** __v1, void* __v2)
605
  {
606
    return __a->compare_exchange_strong(*__v1, __v2, memory_order_seq_cst,
607
                                      memory_order_seq_cst);
608
  }
609
 
610
  inline bool
611
  atomic_compare_exchange_weak_explicit(atomic_address* __a,
612
                                        void** __v1, void* __v2,
613
                                        memory_order __m1, memory_order __m2)
614
  { return __a->compare_exchange_weak(*__v1, __v2, __m1, __m2); }
615
 
616
  inline bool
617
  atomic_compare_exchange_strong_explicit(atomic_address* __a,
618
                                          void** __v1, void* __v2,
619
                                          memory_order __m1, memory_order __m2)
620
  { return __a->compare_exchange_strong(*__v1, __v2, __m1, __m2); }
621
 
622
  inline void*
623
  atomic_fetch_add_explicit(atomic_address* __a, ptrdiff_t __d,
624
                            memory_order __m)
625
  { return __a->fetch_add(__d, __m); }
626
 
627
  inline void*
628
  atomic_fetch_add(atomic_address* __a, ptrdiff_t __d)
629
  { return __a->fetch_add(__d); }
630
 
631
  inline void*
632
  atomic_fetch_sub_explicit(atomic_address* __a, ptrdiff_t __d,
633
                            memory_order __m)
634
  { return __a->fetch_sub(__d, __m); }
635
 
636
  inline void*
637
  atomic_fetch_sub(atomic_address* __a, ptrdiff_t __d)
638
  { return __a->fetch_sub(__d); }
639
 
640
 
641
  // Convenience function definitions, atomic_bool.
642
  inline bool
643
  atomic_is_lock_free(const atomic_bool* __a)
644
  { return __a->is_lock_free(); }
645
 
646
  inline void
647
  atomic_store(atomic_bool* __a, bool __i)
648
  { __a->store(__i); }
649
 
650
  inline void
651
  atomic_store_explicit(atomic_bool* __a, bool __i, memory_order __m)
652
  { __a->store(__i, __m); }
653
 
654
  inline bool
655
  atomic_load(const atomic_bool* __a)
656
  { return __a->load(); }
657
 
658
  inline bool
659
  atomic_load_explicit(const atomic_bool* __a, memory_order __m)
660
  { return __a->load(__m); }
661
 
662
  inline bool
663
  atomic_exchange(atomic_bool* __a, bool __i)
664
  { return __a->exchange(__i); }
665
 
666
  inline bool
667
  atomic_exchange_explicit(atomic_bool* __a, bool __i, memory_order __m)
668
  { return __a->exchange(__i, __m); }
669
 
670
  inline bool
671
  atomic_compare_exchange_weak(atomic_bool* __a, bool* __i1, bool __i2)
672
  {
673
    return __a->compare_exchange_weak(*__i1, __i2, memory_order_seq_cst,
674
                                      memory_order_seq_cst);
675
  }
676
 
677
  inline bool
678
  atomic_compare_exchange_strong(atomic_bool* __a, bool* __i1, bool __i2)
679
  {
680
    return __a->compare_exchange_strong(*__i1, __i2, memory_order_seq_cst,
681
                                        memory_order_seq_cst);
682
  }
683
 
684
  inline bool
685
  atomic_compare_exchange_weak_explicit(atomic_bool* __a, bool* __i1,
686
                                        bool __i2, memory_order __m1,
687
                                        memory_order __m2)
688
  { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
689
 
690
  inline bool
691
  atomic_compare_exchange_strong_explicit(atomic_bool* __a,
692
                                          bool* __i1, bool __i2,
693
                                          memory_order __m1, memory_order __m2)
694
  { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
695
 
696
 
697
 
698
  // Free standing functions. Template argument should be constricted
699
  // to intergral types as specified in the standard.
700
  template
701
    inline void
702
    atomic_store_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m)
703
    { __a->store(__i, __m); }
704
 
705
  template
706
    inline _ITp
707
    atomic_load_explicit(const __atomic_base<_ITp>* __a, memory_order __m)
708
    { return __a->load(__m); }
709
 
710
  template
711
    inline _ITp
712
    atomic_exchange_explicit(__atomic_base<_ITp>* __a, _ITp __i,
713
                             memory_order __m)
714
    { return __a->exchange(__i, __m); }
715
 
716
  template
717
    inline bool
718
    atomic_compare_exchange_weak_explicit(__atomic_base<_ITp>* __a,
719
                                          _ITp* __i1, _ITp __i2,
720
                                          memory_order __m1, memory_order __m2)
721
    { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
722
 
723
  template
724
    inline bool
725
    atomic_compare_exchange_strong_explicit(__atomic_base<_ITp>* __a,
726
                                            _ITp* __i1, _ITp __i2,
727
                                            memory_order __m1,
728
                                            memory_order __m2)
729
    { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
730
 
731
  template
732
    inline _ITp
733
    atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i,
734
                              memory_order __m)
735
    { return __a->fetch_add(__i, __m); }
736
 
737
  template
738
    inline _ITp
739
    atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i,
740
                              memory_order __m)
741
    { return __a->fetch_sub(__i, __m); }
742
 
743
  template
744
    inline _ITp
745
    atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i,
746
                              memory_order __m)
747
    { return __a->fetch_and(__i, __m); }
748
 
749
  template
750
    inline _ITp
751
    atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i,
752
                             memory_order __m)
753
    { return __a->fetch_or(__i, __m); }
754
 
755
  template
756
    inline _ITp
757
    atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i,
758
                              memory_order __m)
759
    { return __a->fetch_xor(__i, __m); }
760
 
761
  template
762
    inline bool
763
    atomic_is_lock_free(const __atomic_base<_ITp>* __a)
764
    { return __a->is_lock_free(); }
765
 
766
  template
767
    inline void
768
    atomic_store(__atomic_base<_ITp>* __a, _ITp __i)
769
    { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
770
 
771
  template
772
    inline _ITp
773
    atomic_load(const __atomic_base<_ITp>* __a)
774
    { return atomic_load_explicit(__a, memory_order_seq_cst); }
775
 
776
  template
777
    inline _ITp
778
    atomic_exchange(__atomic_base<_ITp>* __a, _ITp __i)
779
    { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
780
 
781
  template
782
    inline bool
783
    atomic_compare_exchange_weak(__atomic_base<_ITp>* __a,
784
                                 _ITp* __i1, _ITp __i2)
785
    {
786
      return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
787
                                                   memory_order_seq_cst,
788
                                                   memory_order_seq_cst);
789
    }
790
 
791
  template
792
    inline bool
793
    atomic_compare_exchange_strong(__atomic_base<_ITp>* __a,
794
                                   _ITp* __i1, _ITp __i2)
795
    {
796
      return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
797
                                                     memory_order_seq_cst,
798
                                                     memory_order_seq_cst);
799
    }
800
 
801
  template
802
    inline _ITp
803
    atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i)
804
    { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
805
 
806
  template
807
    inline _ITp
808
    atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i)
809
    { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
810
 
811
  template
812
    inline _ITp
813
    atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i)
814
    { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
815
 
816
  template
817
    inline _ITp
818
    atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i)
819
    { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
820
 
821
  template
822
    inline _ITp
823
    atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i)
824
    { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
825
 
826
  // @} group atomics
827
 
828
_GLIBCXX_END_NAMESPACE
829
 
830
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.