OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [newlib-1.17.0/] [newlib/] [libc/] [sys/] [linux/] [machine/] [i386/] [atomic.h] - Blame information for rev 438

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 148 jeremybenn
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
2
   This file is part of the GNU C Library.
3
   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
 
5
   The GNU C Library is free software; you can redistribute it and/or
6
   modify it under the terms of the GNU Lesser General Public
7
   License as published by the Free Software Foundation; either
8
   version 2.1 of the License, or (at your option) any later version.
9
 
10
   The GNU C Library is distributed in the hope that it will be useful,
11
   but WITHOUT ANY WARRANTY; without even the implied warranty of
12
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
   Lesser General Public License for more details.
14
 
15
   You should have received a copy of the GNU Lesser General Public
16
   License along with the GNU C Library; if not, write to the Free
17
   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18
   02111-1307 USA.  */
19
 
20
#include <stdint.h>
21
 
22
 
23
typedef int8_t atomic8_t;
24
typedef uint8_t uatomic8_t;
25
typedef int_fast8_t atomic_fast8_t;
26
typedef uint_fast8_t uatomic_fast8_t;
27
 
28
typedef int16_t atomic16_t;
29
typedef uint16_t uatomic16_t;
30
typedef int_fast16_t atomic_fast16_t;
31
typedef uint_fast16_t uatomic_fast16_t;
32
 
33
typedef int32_t atomic32_t;
34
typedef uint32_t uatomic32_t;
35
typedef int_fast32_t atomic_fast32_t;
36
typedef uint_fast32_t uatomic_fast32_t;
37
 
38
typedef int64_t atomic64_t;
39
typedef uint64_t uatomic64_t;
40
typedef int_fast64_t atomic_fast64_t;
41
typedef uint_fast64_t uatomic_fast64_t;
42
 
43
typedef intptr_t atomicptr_t;
44
typedef uintptr_t uatomicptr_t;
45
typedef intmax_t atomic_max_t;
46
typedef uintmax_t uatomic_max_t;
47
 
48
 
49
#ifndef LOCK_PREFIX
50
# ifdef UP
51
#  define LOCK_PREFIX   /* nothing */
52
# else
53
#  define LOCK_PREFIX "lock;"
54
# endif
55
#endif
56
 
57
 
58
#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
59
  ({ __typeof (*mem) ret;                                                     \
60
     __asm __volatile (LOCK_PREFIX "cmpxchgb %b2, %1"                         \
61
                       : "=a" (ret), "=m" (*mem)                              \
62
                       : "q" (newval), "m" (*mem), "0" (oldval));              \
63
     ret; })
64
 
65
#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
66
  ({ __typeof (*mem) ret;                                                     \
67
     __asm __volatile (LOCK_PREFIX "cmpxchgw %w2, %1"                         \
68
                       : "=a" (ret), "=m" (*mem)                              \
69
                       : "r" (newval), "m" (*mem), "0" (oldval));              \
70
     ret; })
71
 
72
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
73
  ({ __typeof (*mem) ret;                                                     \
74
     __asm __volatile (LOCK_PREFIX "cmpxchgl %2, %1"                          \
75
                       : "=a" (ret), "=m" (*mem)                              \
76
                       : "r" (newval), "m" (*mem), "0" (oldval));              \
77
     ret; })
78
 
79
/* XXX We do not really need 64-bit compare-and-exchange.  At least
80
   not in the moment.  Using it would mean causing portability
81
   problems since not many other 32-bit architectures have support for
82
   such an operation.  So don't define any code for now.  If it is
83
   really going to be used the code below can be used on Intel Pentium
84
   and later, but NOT on i486.  */
85
#if 1
86
# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
87
  ({ __typeof (*mem) ret = *(mem); abort (); ret = (newval); ret = (oldval); })
88
#else
89
# ifdef __PIC__
90
#  define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
91
  ({ __typeof (*mem) ret;                                                     \
92
     __asm __volatile ("xchgl %2, %%ebx\n\t"                                  \
93
                       LOCK_PREFIX "cmpxchg8b %1\n\t"                         \
94
                       "xchgl %2, %%ebx"                                      \
95
                       : "=A" (ret), "=m" (*mem)                              \
96
                       : "DS" (((unsigned long long int) (newval))            \
97
                               & 0xffffffff),                                 \
98
                         "c" (((unsigned long long int) (newval)) >> 32),     \
99
                         "m" (*mem), "a" (((unsigned long long int) (oldval)) \
100
                                          & 0xffffffff),                      \
101
                         "d" (((unsigned long long int) (oldval)) >> 32));    \
102
     ret; })
103
# else
104
#  define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
105
  ({ __typeof (*mem) ret;                                                     \
106
     __asm __volatile (LOCK_PREFIX "cmpxchg8b %1"                             \
107
                       : "=A" (ret), "=m" (*mem)                              \
108
                       : "b" (((unsigned long long int) (newval))             \
109
                              & 0xffffffff),                                  \
110
                         "c" (((unsigned long long int) (newval)) >> 32),     \
111
                         "m" (*mem), "a" (((unsigned long long int) (oldval)) \
112
                                          & 0xffffffff),                      \
113
                         "d" (((unsigned long long int) (oldval)) >> 32));    \
114
     ret; })
115
# endif
116
#endif
117
 
118
 
119
/* Note that we need no lock prefix.  */
120
#define atomic_exchange_acq(mem, newvalue) \
121
  ({ __typeof (*mem) result;                                                  \
122
     if (sizeof (*mem) == 1)                                                  \
123
       __asm __volatile ("xchgb %b0, %1"                                      \
124
                         : "=r" (result), "=m" (*mem)                         \
125
                         : "0" (newvalue), "m" (*mem));                        \
126
     else if (sizeof (*mem) == 2)                                             \
127
       __asm __volatile ("xchgw %w0, %1"                                      \
128
                         : "=r" (result), "=m" (*mem)                         \
129
                         : "0" (newvalue), "m" (*mem));                        \
130
     else if (sizeof (*mem) == 4)                                             \
131
       __asm __volatile ("xchgl %0, %1"                                       \
132
                         : "=r" (result), "=m" (*mem)                         \
133
                         : "0" (newvalue), "m" (*mem));                        \
134
     else                                                                     \
135
       {                                                                      \
136
         result = 0;                                                           \
137
         abort ();                                                            \
138
       }                                                                      \
139
     result; })
140
 
141
 
142
#define atomic_exchange_and_add(mem, value) \
143
  ({ __typeof (*mem) __result;                                                \
144
     __typeof (value) __addval = (value);                                     \
145
     if (sizeof (*mem) == 1)                                                  \
146
       __asm __volatile (LOCK_PREFIX "xaddb %b0, %1"                          \
147
                         : "=r" (__result), "=m" (*mem)                       \
148
                         : "0" (__addval), "m" (*mem));                        \
149
     else if (sizeof (*mem) == 2)                                             \
150
       __asm __volatile (LOCK_PREFIX "xaddw %w0, %1"                          \
151
                         : "=r" (__result), "=m" (*mem)                       \
152
                         : "0" (__addval), "m" (*mem));                        \
153
     else if (sizeof (*mem) == 4)                                             \
154
       __asm __volatile (LOCK_PREFIX "xaddl %0, %1"                           \
155
                         : "=r" (__result), "=m" (*mem)                       \
156
                         : "0" (__addval), "m" (*mem));                        \
157
     else                                                                     \
158
       {                                                                      \
159
         __typeof (mem) __memp = (mem);                                       \
160
         __typeof (*mem) __tmpval;                                            \
161
         __result = *__memp;                                                  \
162
         do                                                                   \
163
           __tmpval = __result;                                               \
164
         while ((__result = __arch_compare_and_exchange_val_64_acq            \
165
                 (__memp, __result + __addval, __result)) == __tmpval);       \
166
       }                                                                      \
167
     __result; })
168
 
169
 
170
#define atomic_add(mem, value) \
171
  (void) ({ if (__builtin_constant_p (value) && (value) == 1)                 \
172
              atomic_increment (mem);                                         \
173
            else if (__builtin_constant_p (value) && (value) == -1)           \
174
              atomic_decrement (mem);                                         \
175
            else if (sizeof (*mem) == 1)                                      \
176
              __asm __volatile (LOCK_PREFIX "addb %b1, %0"                    \
177
                                : "=m" (*mem)                                 \
178
                                : "ir" (value), "m" (*mem));                  \
179
            else if (sizeof (*mem) == 2)                                      \
180
              __asm __volatile (LOCK_PREFIX "addw %w1, %0"                    \
181
                                : "=m" (*mem)                                 \
182
                                : "ir" (value), "m" (*mem));                  \
183
            else if (sizeof (*mem) == 4)                                      \
184
              __asm __volatile (LOCK_PREFIX "addl %1, %0"                     \
185
                                : "=m" (*mem)                                 \
186
                                : "ir" (value), "m" (*mem));                  \
187
            else                                                              \
188
              {                                                               \
189
                __typeof (value) __addval = (value);                          \
190
                __typeof (mem) __memp = (mem);                                \
191
                __typeof (*mem) __oldval = *__memp;                           \
192
                __typeof (*mem) __tmpval;                                     \
193
                do                                                            \
194
                  __tmpval = __oldval;                                        \
195
                while ((__oldval = __arch_compare_and_exchange_val_64_acq     \
196
                       (__memp, __oldval + __addval, __oldval)) == __tmpval); \
197
              }                                                               \
198
            })
199
 
200
 
201
#define atomic_add_negative(mem, value) \
202
  ({ unsigned char __result;                                                  \
203
     if (sizeof (*mem) == 1)                                                  \
204
       __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1"                  \
205
                         : "=m" (*mem), "=qm" (__result)                      \
206
                         : "iq" (value), "m" (*mem));                         \
207
     else if (sizeof (*mem) == 2)                                             \
208
       __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1"                  \
209
                         : "=m" (*mem), "=qm" (__result)                      \
210
                         : "ir" (value), "m" (*mem));                         \
211
     else if (sizeof (*mem) == 4)                                             \
212
       __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1"                   \
213
                         : "=m" (*mem), "=qm" (__result)                      \
214
                         : "ir" (value), "m" (*mem));                         \
215
     else                                                                     \
216
       abort ();                                                              \
217
     __result; })
218
 
219
 
220
#define atomic_add_zero(mem, value) \
221
  ({ unsigned char __result;                                                  \
222
     if (sizeof (*mem) == 1)                                                  \
223
       __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1"                  \
224
                         : "=m" (*mem), "=qm" (__result)                      \
225
                         : "ir" (value), "m" (*mem));                         \
226
     else if (sizeof (*mem) == 2)                                             \
227
       __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1"                  \
228
                         : "=m" (*mem), "=qm" (__result)                      \
229
                         : "ir" (value), "m" (*mem));                         \
230
     else if (sizeof (*mem) == 4)                                             \
231
       __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1"                   \
232
                         : "=m" (*mem), "=qm" (__result)                      \
233
                         : "ir" (value), "m" (*mem));                         \
234
     else                                                                     \
235
       abort ();                                                              \
236
     __result; })
237
 
238
 
239
#define atomic_increment(mem) \
240
  (void) ({ if (sizeof (*mem) == 1)                                           \
241
              __asm __volatile (LOCK_PREFIX "incb %b0"                        \
242
                                : "=m" (*mem)                                 \
243
                                : "m" (*mem));                                \
244
            else if (sizeof (*mem) == 2)                                      \
245
              __asm __volatile (LOCK_PREFIX "incw %w0"                        \
246
                                : "=m" (*mem)                                 \
247
                                : "m" (*mem));                                \
248
            else if (sizeof (*mem) == 4)                                      \
249
              __asm __volatile (LOCK_PREFIX "incl %0"                         \
250
                                : "=m" (*mem)                                 \
251
                                : "m" (*mem));                                \
252
            else                                                              \
253
              {                                                               \
254
                __typeof (mem) __memp = (mem);                                \
255
                __typeof (*mem) __oldval = *__memp;                           \
256
                __typeof (*mem) __tmpval;                                     \
257
                do                                                            \
258
                  __tmpval = __oldval;                                        \
259
                while ((__oldval = __arch_compare_and_exchange_val_64_acq     \
260
                       (__memp, __oldval + 1, __oldval)) == __tmpval);        \
261
              }                                                               \
262
            })
263
 
264
 
265
#define atomic_increment_and_test(mem) \
266
  ({ unsigned char __result;                                                  \
267
     if (sizeof (*mem) == 1)                                                  \
268
       __asm __volatile (LOCK_PREFIX "incb %0; sete %b1"                      \
269
                         : "=m" (*mem), "=qm" (__result)                      \
270
                         : "m" (*mem));                                       \
271
     else if (sizeof (*mem) == 2)                                             \
272
       __asm __volatile (LOCK_PREFIX "incw %0; sete %w1"                      \
273
                         : "=m" (*mem), "=qm" (__result)                      \
274
                         : "m" (*mem));                                       \
275
     else if (sizeof (*mem) == 4)                                             \
276
       __asm __volatile (LOCK_PREFIX "incl %0; sete %1"                       \
277
                         : "=m" (*mem), "=qm" (__result)                      \
278
                         : "m" (*mem));                                       \
279
     else                                                                     \
280
       abort ();                                                              \
281
     __result; })
282
 
283
 
284
#define atomic_decrement(mem) \
285
  (void) ({ if (sizeof (*mem) == 1)                                           \
286
              __asm __volatile (LOCK_PREFIX "decb %b0"                        \
287
                                : "=m" (*mem)                                 \
288
                                : "m" (*mem));                                \
289
            else if (sizeof (*mem) == 2)                                      \
290
              __asm __volatile (LOCK_PREFIX "decw %w0"                        \
291
                                : "=m" (*mem)                                 \
292
                                : "m" (*mem));                                \
293
            else if (sizeof (*mem) == 4)                                      \
294
              __asm __volatile (LOCK_PREFIX "decl %0"                         \
295
                                : "=m" (*mem)                                 \
296
                                : "m" (*mem));                                \
297
            else                                                              \
298
              {                                                               \
299
                __typeof (mem) __memp = (mem);                                \
300
                __typeof (*mem) __oldval = *__memp;                           \
301
                __typeof (*mem) __tmpval;                                     \
302
                do                                                            \
303
                  __tmpval = __oldval;                                        \
304
                while ((__oldval = __arch_compare_and_exchange_val_64_acq     \
305
                       (__memp, __oldval - 1, __oldval)) == __tmpval);        \
306
              }                                                               \
307
            })
308
 
309
 
310
#define atomic_decrement_and_test(mem) \
311
  ({ unsigned char __result;                                                  \
312
     if (sizeof (*mem) == 1)                                                  \
313
       __asm __volatile (LOCK_PREFIX "decb %b0; sete %1"                      \
314
                         : "=m" (*mem), "=qm" (__result)                      \
315
                         : "m" (*mem));                                       \
316
     else if (sizeof (*mem) == 2)                                             \
317
       __asm __volatile (LOCK_PREFIX "decw %w0; sete %1"                      \
318
                         : "=m" (*mem), "=qm" (__result)                      \
319
                         : "m" (*mem));                                       \
320
     else if (sizeof (*mem) == 4)                                             \
321
       __asm __volatile (LOCK_PREFIX "decl %0; sete %1"                       \
322
                         : "=m" (*mem), "=qm" (__result)                      \
323
                         : "m" (*mem));                                       \
324
     else                                                                     \
325
       abort ();                                                              \
326
     __result; })
327
 
328
 
329
#define atomic_bit_set(mem, bit) \
330
  (void) ({ if (sizeof (*mem) == 1)                                           \
331
              __asm __volatile (LOCK_PREFIX "orb %b2, %0"                     \
332
                                : "=m" (*mem)                                 \
333
                                : "m" (*mem), "ir" (1 << (bit)));             \
334
            else if (sizeof (*mem) == 2)                                      \
335
              __asm __volatile (LOCK_PREFIX "orw %w2, %0"                     \
336
                                : "=m" (*mem)                                 \
337
                                : "m" (*mem), "ir" (1 << (bit)));             \
338
            else if (sizeof (*mem) == 4)                                      \
339
              __asm __volatile (LOCK_PREFIX "orl %2, %0"                      \
340
                                : "=m" (*mem)                                 \
341
                                : "m" (*mem), "ir" (1 << (bit)));             \
342
            else                                                              \
343
              abort ();                                                       \
344
            })
345
 
346
 
347
#define atomic_bit_test_set(mem, bit) \
348
  ({ unsigned char __result;                                                  \
349
     if (sizeof (*mem) == 1)                                                  \
350
       __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0"                   \
351
                         : "=q" (__result), "=m" (*mem)                       \
352
                         : "m" (*mem), "ir" (bit));                           \
353
     else if (sizeof (*mem) == 2)                                             \
354
       __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0"                   \
355
                         : "=q" (__result), "=m" (*mem)                       \
356
                         : "m" (*mem), "ir" (bit));                           \
357
     else if (sizeof (*mem) == 4)                                             \
358
       __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0"                   \
359
                         : "=q" (__result), "=m" (*mem)                       \
360
                         : "m" (*mem), "ir" (bit));                           \
361
     else                                                                     \
362
       abort ();                                                              \
363
     __result; })
364
 
365
 
366
#define atomic_delay() asm ("rep; nop")

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.