OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [boehm-gc/] [include/] [private/] [gc_locks.h] - Blame information for rev 749

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 721 jeremybenn
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6
 *
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose,  provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17
 
18
#ifndef GC_LOCKS_H
19
#define GC_LOCKS_H
20
 
21
/*
22
 * Mutual exclusion between allocator/collector routines.
23
 * Needed if there is more than one allocator thread.
24
 * FASTLOCK() is assumed to try to acquire the lock in a cheap and
25
 * dirty way that is acceptable for a few instructions, e.g. by
26
 * inhibiting preemption.  This is assumed to have succeeded only
27
 * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
28
 * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
29
 * If signals cannot be tolerated with the FASTLOCK held, then
30
 * FASTLOCK should disable signals.  The code executed under
31
 * FASTLOCK is otherwise immune to interruption, provided it is
32
 * not restarted.
33
 * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
34
 * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
35
 * (There is currently no equivalent for FASTLOCK.)
36
 *
37
 * In the PARALLEL_MARK case, we also need to define a number of
38
 * other inline finctions here:
39
 *   GC_bool GC_compare_and_exchange( volatile GC_word *addr,
40
 *                                    GC_word old, GC_word new )
41
 *   GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
42
 *   void GC_memory_barrier( )
43
 *
44
 */
45
# ifdef THREADS
46
   void GC_noop1 GC_PROTO((word));
47
#  ifdef PCR_OBSOLETE   /* Faster, but broken with multiple lwp's       */
48
#    include  "th/PCR_Th.h"
49
#    include  "th/PCR_ThCrSec.h"
50
     extern struct PCR_Th_MLRep GC_allocate_ml;
51
#    define DCL_LOCK_STATE  PCR_sigset_t GC_old_sig_mask
52
#    define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml) 
53
#    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
54
#    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
55
#    define FASTLOCK() PCR_ThCrSec_EnterSys()
56
     /* Here we cheat (a lot): */
57
#        define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
58
                /* TRUE if nobody currently holds the lock */
59
#    define FASTUNLOCK() PCR_ThCrSec_ExitSys()
60
#  endif
61
#  ifdef PCR
62
#    include <base/PCR_Base.h>
63
#    include <th/PCR_Th.h>
64
     extern PCR_Th_ML GC_allocate_ml;
65
#    define DCL_LOCK_STATE \
66
         PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
67
#    define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
68
#    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
69
#    define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
70
#    define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
71
#    define FASTUNLOCK()  {\
72
        if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
73
#  endif
74
#  ifdef SRC_M3
75
     extern GC_word RT0u__inCritical;
76
#    define LOCK() RT0u__inCritical++
77
#    define UNLOCK() RT0u__inCritical--
78
#  endif
79
#  ifdef GC_SOLARIS_THREADS
80
#    include <thread.h>
81
#    include <signal.h>
82
     extern mutex_t GC_allocate_ml;
83
#    define LOCK() mutex_lock(&GC_allocate_ml);
84
#    define UNLOCK() mutex_unlock(&GC_allocate_ml);
85
#  endif
86
 
87
/* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock  */
88
/* acquisition and release.  We need this for correct operation of the  */
89
/* incremental GC.                                                      */
90
#  ifdef __GNUC__
91
#    if defined(I386)
92
       inline static int GC_test_and_set(volatile unsigned int *addr) {
93
          int oldval;
94
          /* Note: the "xchg" instruction does not need a "lock" prefix */
95
          __asm__ __volatile__("xchgl %0, %1"
96
                : "=r"(oldval), "=m"(*(addr))
97
                : "0"(1), "m"(*(addr)) : "memory");
98
          return oldval;
99
       }
100
#      define GC_TEST_AND_SET_DEFINED
101
#    endif
102
#    if defined(IA64)
103
#     include <ia64intrin.h>
104
       inline static int GC_test_and_set(volatile unsigned int *addr) {
105
          return __sync_lock_test_and_set(addr, 1);
106
       }
107
#      define GC_TEST_AND_SET_DEFINED
108
       inline static void GC_clear(volatile unsigned int *addr) {
109
          *addr = 0;
110
       }
111
#      define GC_CLEAR_DEFINED
112
#    endif
113
#    ifdef SPARC
114
       inline static int GC_test_and_set(volatile unsigned int *addr) {
115
         int oldval;
116
 
117
         __asm__ __volatile__("ldstub %1,%0"
118
         : "=r"(oldval), "=m"(*addr)
119
         : "m"(*addr) : "memory");
120
         return oldval;
121
       }
122
#      define GC_TEST_AND_SET_DEFINED
123
#    endif
124
#    ifdef M68K
125
       /* Contributed by Tony Mantler.  I'm not sure how well it was    */
126
       /* tested.                                                       */
127
       inline static int GC_test_and_set(volatile unsigned int *addr) {
128
          char oldval; /* this must be no longer than 8 bits */
129
 
130
          /* The return value is semi-phony. */
131
          /* 'tas' sets bit 7 while the return */
132
          /* value pretends bit 0 was set */
133
          __asm__ __volatile__(
134
                 "tas %1@; sne %0; negb %0"
135
                 : "=d" (oldval)
136
                 : "a" (addr) : "memory");
137
          return oldval;
138
       }
139
#      define GC_TEST_AND_SET_DEFINED
140
#    endif
141
#    if defined(POWERPC)
142
#     define GC_TEST_AND_SET_DEFINED
143
#     define GC_CLEAR_DEFINED
144
#     if (__GNUC__>4)||((__GNUC__==4)&&(__GNUC_MINOR__>=4))
145
#       define GC_test_and_set(addr) __sync_lock_test_and_set (addr, 1)
146
#       define GC_clear(addr) __sync_lock_release (addr)
147
#     else
148
        inline static int GC_test_and_set(volatile unsigned int *addr) {
149
          int oldval;
150
          int temp = 1; /* locked value */
151
 
152
          __asm__ __volatile__(
153
               "\n1:\n"
154
               "\tlwarx %0,%y3\n"      /* load and reserve, 32-bits      */
155
               "\tcmpwi %0, 0\n"       /* if load is                     */
156
               "\tbne 2f\n"            /*   non-zero, return already set */
157
               "\tstwcx. %2,%y3\n"     /* else store conditional         */
158
               "\tbne- 1b\n"           /* retry if lost reservation      */
159
               "\tsync\n"              /* import barrier                 */
160
               "2:\t\n"                /* oldval is zero if we set       */
161
              : "=&r"(oldval), "=m"(addr)
162
              : "r"(temp), "Z"(addr)
163
              : "cr0","memory");
164
          return oldval;
165
        }
166
      inline static void GC_clear(volatile unsigned int *addr) {
167
        __asm__ __volatile__("lwsync" : : : "memory");
168
        *(addr) = 0;
169
      }
170
#    endif
171
#    endif
172
#    if defined(ALPHA) 
173
        inline static int GC_test_and_set(volatile unsigned int * addr)
174
        {
175
          unsigned long oldvalue;
176
          unsigned long temp;
177
 
178
          __asm__ __volatile__(
179
                             "1:     ldl_l %0,%1\n"
180
                             "       and %0,%3,%2\n"
181
                             "       bne %2,2f\n"
182
                             "       xor %0,%3,%0\n"
183
                             "       stl_c %0,%1\n"
184
#       ifdef __ELF__
185
                             "       beq %0,3f\n"
186
#       else
187
                             "       beq %0,1b\n"
188
#       endif
189
                             "       mb\n"
190
                             "2:\n"
191
#       ifdef __ELF__
192
                             ".section .text2,\"ax\"\n"
193
                             "3:     br 1b\n"
194
                             ".previous"
195
#       endif
196
                             :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
197
                             :"Ir" (1), "m" (*addr)
198
                             :"memory");
199
 
200
          return oldvalue;
201
        }
202
#       define GC_TEST_AND_SET_DEFINED
203
        inline static void GC_clear(volatile unsigned int *addr) {
204
          __asm__ __volatile__("mb" : : : "memory");
205
          *(addr) = 0;
206
        }
207
#       define GC_CLEAR_DEFINED
208
#    endif /* ALPHA */
209
#    ifdef ARM32
210
#     define GC_TEST_AND_SET_DEFINED
211
#     if (__GNUC__>4)||((__GNUC__==4)&&(__GNUC_MINOR__>=5)) && defined(__ARM_EABI__)
212
#       define GC_CLEAR_DEFINED
213
#       define GC_test_and_set(addr) __sync_lock_test_and_set (addr, 1)
214
#       define GC_clear(addr) __sync_lock_release (addr)
215
#     else
216
        inline static int GC_test_and_set(volatile unsigned int *addr) {
217
          int oldval;
218
          /* SWP on ARM is very similar to XCHG on x86.  Doesn't lock the
219
           * bus because there are no SMP ARM machines.  If/when there are,
220
           * this code will likely need to be updated. */
221
          /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */
222
          __asm__ __volatile__("swp %0, %1, [%2]"
223
                             : "=r"(oldval)
224
                             : "0"(1), "r"(addr)
225
                             : "memory");
226
          return oldval;
227
        }
228
#     endif
229
#    endif /* ARM32 */
230
#    ifdef CRIS
231
        inline static int GC_test_and_set(volatile unsigned int *addr) {
232
          /* Ripped from linuxthreads/sysdeps/cris/pt-machine.h.        */
233
          /* Included with Hans-Peter Nilsson's permission.             */
234
          register unsigned long int ret;
235
 
236
          /* Note the use of a dummy output of *addr to expose the write.
237
           * The memory barrier is to stop *other* writes being moved past
238
           * this code.
239
           */
240
            __asm__ __volatile__("clearf\n"
241
                                 "0:\n\t"
242
                                 "movu.b [%2],%0\n\t"
243
                                 "ax\n\t"
244
                                 "move.b %3,[%2]\n\t"
245
                                 "bwf 0b\n\t"
246
                                 "clearf"
247
                                 : "=&r" (ret), "=m" (*addr)
248
                                 : "r" (addr), "r" ((int) 1), "m" (*addr)
249
                                 : "memory");
250
            return ret;
251
        }
252
#       define GC_TEST_AND_SET_DEFINED
253
#    endif /* CRIS */
254
#    ifdef S390
255
       inline static int GC_test_and_set(volatile unsigned int *addr) {
256
         int ret;
257
         __asm__ __volatile__ (
258
          "     l     %0,0(%2)\n"
259
          "0:   cs    %0,%1,0(%2)\n"
260
          "     jl    0b"
261
          : "=&d" (ret)
262
          : "d" (1), "a" (addr)
263
          : "cc", "memory");
264
         return ret;
265
       }
266
#    endif
267
#  endif /* __GNUC__ */
268
#  if (defined(ALPHA) && !defined(__GNUC__))
269
#    ifndef OSF1
270
        --> We currently assume that if gcc is not used, we are
271
        --> running under Tru64.
272
#    endif
273
#    include <machine/builtins.h>
274
#    include <c_asm.h>
275
#    define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1)
276
#    define GC_TEST_AND_SET_DEFINED
277
#    define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; }
278
#    define GC_CLEAR_DEFINED
279
#  endif
280
#  if defined(MSWIN32)
281
#    define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
282
#    define GC_TEST_AND_SET_DEFINED
283
#  endif
284
#  ifdef MIPS
285
#    ifdef LINUX
286
#      include <sys/tas.h>
287
#      define GC_test_and_set(addr) _test_and_set((int *) addr,1)
288
#      define GC_TEST_AND_SET_DEFINED
289
#    elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
290
        || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
291
#        ifdef __GNUC__
292
#          define GC_test_and_set(addr) _test_and_set((void *)addr,1)
293
#        else
294
#          define GC_test_and_set(addr) test_and_set((void *)addr,1)
295
#        endif
296
#    else
297
#        include <sgidefs.h>
298
#        include <mutex.h>
299
#        define GC_test_and_set(addr) __test_and_set32((void *)addr,1)
300
#        define GC_clear(addr) __lock_release(addr);
301
#        define GC_CLEAR_DEFINED
302
#    endif
303
#    define GC_TEST_AND_SET_DEFINED
304
#  endif /* MIPS */
305
#  if defined(_AIX)
306
#    include <sys/atomic_op.h>
307
#    if (defined(_POWER) || defined(_POWERPC)) 
308
#      if defined(__GNUC__)  
309
         inline static void GC_memsync() {
310
           __asm__ __volatile__ ("sync" : : : "memory");
311
         }
312
#      else
313
#        ifndef inline
314
#          define inline __inline
315
#        endif
316
#        pragma mc_func GC_memsync { \
317
           "7c0004ac" /* sync (same opcode used for dcs)*/ \
318
         }
319
#      endif
320
#    else 
321
#    error dont know how to memsync
322
#    endif
323
     inline static int GC_test_and_set(volatile unsigned int * addr) {
324
          int oldvalue = 0;
325
          if (compare_and_swap((void *)addr, &oldvalue, 1)) {
326
            GC_memsync();
327
            return 0;
328
          } else return 1;
329
     }
330
#    define GC_TEST_AND_SET_DEFINED
331
     inline static void GC_clear(volatile unsigned int *addr) {
332
          GC_memsync();
333
          *(addr) = 0;
334
     }
335
#    define GC_CLEAR_DEFINED
336
 
337
#  endif
338
#  if 0 /* defined(HP_PA) */
339
     /* The official recommendation seems to be to not use ldcw from    */
340
     /* user mode.  Since multithreaded incremental collection doesn't  */
341
     /* work anyway on HP_PA, this shouldn't be a major loss.           */
342
 
343
     /* "set" means 0 and "clear" means 1 here.         */
344
#    define GC_test_and_set(addr) !GC_test_and_clear(addr);
345
#    define GC_TEST_AND_SET_DEFINED
346
#    define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
347
        /* The above needs a memory barrier! */
348
#    define GC_CLEAR_DEFINED
349
#  endif
350
#  if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
351
#    ifdef __GNUC__
352
       inline static void GC_clear(volatile unsigned int *addr) {
353
         /* Try to discourage gcc from moving anything past this. */
354
         __asm__ __volatile__(" " : : : "memory");
355
         *(addr) = 0;
356
       }
357
#    else
358
            /* The function call in the following should prevent the    */
359
            /* compiler from moving assignments to below the UNLOCK.    */
360
#      define GC_clear(addr) GC_noop1((word)(addr)); \
361
                             *((volatile unsigned int *)(addr)) = 0;
362
#    endif
363
#    define GC_CLEAR_DEFINED
364
#  endif /* !GC_CLEAR_DEFINED */
365
 
366
#  if !defined(GC_TEST_AND_SET_DEFINED)
367
#    define USE_PTHREAD_LOCKS
368
#  endif
369
 
370
#  if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
371
      && !defined(GC_WIN32_THREADS)
372
#    define NO_THREAD (pthread_t)(-1)
373
#    include <pthread.h>
374
#    if defined(PARALLEL_MARK) 
375
      /* We need compare-and-swap to update mark bits, where it's       */
376
      /* performance critical.  If USE_MARK_BYTES is defined, it is     */
377
      /* no longer needed for this purpose.  However we use it in       */
378
      /* either case to implement atomic fetch-and-add, though that's   */
379
      /* less performance critical, and could perhaps be done with      */
380
      /* a lock.                                                        */
381
#     if defined(GENERIC_COMPARE_AND_SWAP)
382
        /* Probably not useful, except for debugging.   */
383
        /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we        */
384
        /* minimize its use.                                            */
385
        extern pthread_mutex_t GC_compare_and_swap_lock;
386
 
387
        /* Note that if GC_word updates are not atomic, a concurrent    */
388
        /* reader should acquire GC_compare_and_swap_lock.  On          */
389
        /* currently supported platforms, such updates are atomic.      */
390
        extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
391
                                               GC_word old, GC_word new_val);
392
#     endif /* GENERIC_COMPARE_AND_SWAP */
393
#     if defined(I386)
394
#      if !defined(GENERIC_COMPARE_AND_SWAP)
395
         /* Returns TRUE if the comparison succeeded. */
396
         inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
397
                                                       GC_word old,
398
                                                       GC_word new_val)
399
         {
400
           char result;
401
           __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
402
                : "+m"(*(addr)), "=r"(result)
403
                : "r" (new_val), "a"(old) : "memory");
404
           return (GC_bool) result;
405
         }
406
#      endif /* !GENERIC_COMPARE_AND_SWAP */
407
       inline static void GC_memory_barrier()
408
       {
409
         /* We believe the processor ensures at least processor */
410
         /* consistent ordering.  Thus a compiler barrier       */
411
         /* should suffice.                                     */
412
         __asm__ __volatile__("" : : : "memory");
413
       }
414
#     endif /* I386 */
415
 
416
#     if defined(POWERPC)
417
#      if !defined(GENERIC_COMPARE_AND_SWAP)
418
#       if CPP_WORDSZ == 64
419
        /* Returns TRUE if the comparison succeeded. */
420
        inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
421
            GC_word old, GC_word new_val)
422
        {
423
            unsigned long result, dummy;
424
            __asm__ __volatile__(
425
                "1:\tldarx %0,0,%5\n"
426
                  "\tcmpd %0,%4\n"
427
                  "\tbne  2f\n"
428
                  "\tstdcx. %3,0,%2\n"
429
                  "\tbne- 1b\n"
430
                  "\tsync\n"
431
                  "\tli %1, 1\n"
432
                  "\tb 3f\n"
433
                "2:\tli %1, 0\n"
434
                "3:\t\n"
435
                :  "=&r" (dummy), "=r" (result), "=p" (addr)
436
                :  "r" (new_val), "r" (old), "2"(addr)
437
                : "cr0","memory");
438
            return (GC_bool) result;
439
        }
440
#       else
441
        /* Returns TRUE if the comparison succeeded. */
442
        inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
443
            GC_word old, GC_word new_val)
444
        {
445
            int result, dummy;
446
            __asm__ __volatile__(
447
                "1:\tlwarx %0,0,%5\n"
448
                  "\tcmpw %0,%4\n"
449
                  "\tbne  2f\n"
450
                  "\tstwcx. %3,0,%2\n"
451
                  "\tbne- 1b\n"
452
                  "\tsync\n"
453
                  "\tli %1, 1\n"
454
                  "\tb 3f\n"
455
                "2:\tli %1, 0\n"
456
                "3:\t\n"
457
                :  "=&r" (dummy), "=r" (result), "=p" (addr)
458
                :  "r" (new_val), "r" (old), "2"(addr)
459
                : "cr0","memory");
460
            return (GC_bool) result;
461
        }
462
#       endif
463
#      endif /* !GENERIC_COMPARE_AND_SWAP */
464
        inline static void GC_memory_barrier()
465
        {
466
            __asm__ __volatile__("sync" : : : "memory");
467
        }
468
#     endif /* POWERPC */
469
 
470
#     if defined(IA64)
471
#      if !defined(GENERIC_COMPARE_AND_SWAP)
472
         inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
473
                                                       GC_word old,
474
                                                       GC_word new_val)
475
         {
476
           return __sync_bool_compare_and_swap (addr, old, new_val);
477
         }
478
#      endif /* !GENERIC_COMPARE_AND_SWAP */
479
#      if 0
480
        /* Shouldn't be needed; we use volatile stores instead. */
481
        inline static void GC_memory_barrier()
482
        {
483
          __sync_synchronize ();
484
        }
485
#      endif /* 0 */
486
#     endif /* IA64 */
487
#     if defined(ALPHA)
488
#      if !defined(GENERIC_COMPARE_AND_SWAP)
489
#        if defined(__GNUC__)
490
           inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
491
                                                         GC_word old, GC_word new_val)
492
           {
493
             unsigned long was_equal;
494
             unsigned long temp;
495
 
496
             __asm__ __volatile__(
497
                             "1:     ldq_l %0,%1\n"
498
                             "       cmpeq %0,%4,%2\n"
499
                             "       mov %3,%0\n"
500
                             "       beq %2,2f\n"
501
                             "       stq_c %0,%1\n"
502
                             "       beq %0,1b\n"
503
                             "2:\n"
504
                             "       mb\n"
505
                             :"=&r" (temp), "=m" (*addr), "=&r" (was_equal)
506
                             : "r" (new_val), "Ir" (old)
507
                             :"memory");
508
             return was_equal;
509
           }
510
#        else /* !__GNUC__ */
511
           inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
512
                                                         GC_word old, GC_word new_val)
513
          {
514
            return __CMP_STORE_QUAD(addr, old, new_val, addr);
515
          }
516
#        endif /* !__GNUC__ */
517
#      endif /* !GENERIC_COMPARE_AND_SWAP */
518
#      ifdef __GNUC__
519
         inline static void GC_memory_barrier()
520
         {
521
           __asm__ __volatile__("mb" : : : "memory");
522
         }
523
#      else
524
#        define GC_memory_barrier() asm("mb")
525
#      endif /* !__GNUC__ */
526
#     endif /* ALPHA */
527
#     if defined(S390)
528
#      if !defined(GENERIC_COMPARE_AND_SWAP)
529
         inline static GC_bool GC_compare_and_exchange(volatile C_word *addr,
530
                                         GC_word old, GC_word new_val)
531
         {
532
           int retval;
533
           __asm__ __volatile__ (
534
#            ifndef __s390x__
535
               "     cs  %1,%2,0(%3)\n"
536
#            else
537
               "     csg %1,%2,0(%3)\n"
538
#            endif
539
             "     ipm %0\n"
540
             "     srl %0,28\n"
541
             : "=&d" (retval), "+d" (old)
542
             : "d" (new_val), "a" (addr)
543
             : "cc", "memory");
544
           return retval == 0;
545
         }
546
#      endif
547
#     endif
548
#     if !defined(GENERIC_COMPARE_AND_SWAP)
549
        /* Returns the original value of *addr. */
550
        inline static GC_word GC_atomic_add(volatile GC_word *addr,
551
                                            GC_word how_much)
552
        {
553
          GC_word old;
554
          do {
555
            old = *addr;
556
          } while (!GC_compare_and_exchange(addr, old, old+how_much));
557
          return old;
558
        }
559
#     else /* GENERIC_COMPARE_AND_SWAP */
560
        /* So long as a GC_word can be atomically updated, it should    */
561
        /* be OK to read *addr without a lock.                          */
562
        extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
563
#     endif /* GENERIC_COMPARE_AND_SWAP */
564
 
565
#    endif /* PARALLEL_MARK */
566
 
567
#    if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
568
      /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to   */
569
      /* be held for long periods, if it is held at all.  Thus spinning */
570
      /* and sleeping for fixed periods are likely to result in         */
571
      /* significant wasted time.  We thus rely mostly on queued locks. */
572
#     define USE_SPIN_LOCK
573
      extern volatile unsigned int GC_allocate_lock;
574
      extern void GC_lock(void);
575
        /* Allocation lock holder.  Only set if acquired by client through */
576
        /* GC_call_with_alloc_lock.                                        */
577
#     ifdef GC_ASSERTIONS
578
#        define LOCK() \
579
                { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
580
                  SET_LOCK_HOLDER(); }
581
#        define UNLOCK() \
582
                { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
583
                  GC_clear(&GC_allocate_lock); }
584
#     else
585
#        define LOCK() \
586
                { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
587
#        define UNLOCK() \
588
                GC_clear(&GC_allocate_lock)
589
#     endif /* !GC_ASSERTIONS */
590
#     if 0
591
        /* Another alternative for OSF1 might be:               */
592
#       include <sys/mman.h>
593
        extern msemaphore GC_allocate_semaphore;
594
#       define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
595
                            != 0) GC_lock(); else GC_allocate_lock = 1; }
596
        /* The following is INCORRECT, since the memory model is too weak. */
597
        /* Is this true?  Presumably msem_unlock has the right semantics?  */
598
        /*              - HB                                               */
599
#       define UNLOCK() { GC_allocate_lock = 0; \
600
                          msem_unlock(&GC_allocate_semaphore, 0); }
601
#     endif /* 0 */
602
#    else /* THREAD_LOCAL_ALLOC  || USE_PTHREAD_LOCKS */
603
#      ifndef USE_PTHREAD_LOCKS
604
#        define USE_PTHREAD_LOCKS
605
#      endif
606
#    endif /* THREAD_LOCAL_ALLOC */
607
#   ifdef USE_PTHREAD_LOCKS
608
#      include <pthread.h>
609
       extern pthread_mutex_t GC_allocate_ml;
610
#      ifdef GC_ASSERTIONS
611
#        define LOCK() \
612
                { GC_lock(); \
613
                  SET_LOCK_HOLDER(); }
614
#        define UNLOCK() \
615
                { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
616
                  pthread_mutex_unlock(&GC_allocate_ml); }
617
#      else /* !GC_ASSERTIONS */
618
#        if defined(NO_PTHREAD_TRYLOCK)
619
#          define LOCK() GC_lock();
620
#        else /* !defined(NO_PTHREAD_TRYLOCK) */
621
#        define LOCK() \
622
           { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
623
#        endif
624
#        define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
625
#      endif /* !GC_ASSERTIONS */
626
#   endif /* USE_PTHREAD_LOCKS */
627
#   define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
628
#   define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
629
#   define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
630
    extern VOLATILE GC_bool GC_collecting;
631
#   define ENTER_GC() GC_collecting = 1;
632
#   define EXIT_GC() GC_collecting = 0;
633
    extern void GC_lock(void);
634
    extern pthread_t GC_lock_holder;
635
#   ifdef GC_ASSERTIONS
636
      extern pthread_t GC_mark_lock_holder;
637
#   endif
638
#  endif /* GC_PTHREADS with linux_threads.c implementation */
639
#  if defined(GC_WIN32_THREADS)
640
#    if defined(GC_PTHREADS)
641
#      include <pthread.h>
642
       extern pthread_mutex_t GC_allocate_ml;
643
#      define LOCK()   pthread_mutex_lock(&GC_allocate_ml)
644
#      define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
645
#    else
646
#      include <windows.h>
647
       GC_API CRITICAL_SECTION GC_allocate_ml;
648
#      define LOCK() EnterCriticalSection(&GC_allocate_ml);
649
#      define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
650
#    endif
651
#  endif
652
#  ifndef SET_LOCK_HOLDER
653
#      define SET_LOCK_HOLDER()
654
#      define UNSET_LOCK_HOLDER()
655
#      define I_HOLD_LOCK() FALSE
656
                /* Used on platforms were locks can be reacquired,      */
657
                /* so it doesn't matter if we lie.                      */
658
#  endif
659
# else /* !THREADS */
660
#    define LOCK()
661
#    define UNLOCK()
662
# endif /* !THREADS */
663
# ifndef SET_LOCK_HOLDER
664
#   define SET_LOCK_HOLDER()
665
#   define UNSET_LOCK_HOLDER()
666
#   define I_HOLD_LOCK() FALSE
667
                /* Used on platforms were locks can be reacquired,      */
668
                /* so it doesn't matter if we lie.                      */
669
# endif
670
# ifndef ENTER_GC
671
#   define ENTER_GC()
672
#   define EXIT_GC()
673
# endif
674
 
675
# ifndef DCL_LOCK_STATE
676
#   define DCL_LOCK_STATE
677
# endif
678
# ifndef FASTLOCK
679
#   define FASTLOCK() LOCK()
680
#   define FASTLOCK_SUCCEEDED() TRUE
681
#   define FASTUNLOCK() UNLOCK()
682
# endif
683
 
684
#endif /* GC_LOCKS_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.