OpenCores
URL https://opencores.org/ocsvn/scarts/scarts/trunk

Subversion Repositories scarts

[/] [scarts/] [trunk/] [toolchain/] [scarts-gcc/] [gcc-4.1.1/] [boehm-gc/] [include/] [private/] [gc_locks.h] - Blame information for rev 12

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 12 jlechner
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6
 *
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose,  provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17
 
18
#ifndef GC_LOCKS_H
19
#define GC_LOCKS_H
20
 
21
/*
22
 * Mutual exclusion between allocator/collector routines.
23
 * Needed if there is more than one allocator thread.
24
 * FASTLOCK() is assumed to try to acquire the lock in a cheap and
25
 * dirty way that is acceptable for a few instructions, e.g. by
26
 * inhibiting preemption.  This is assumed to have succeeded only
27
 * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
28
 * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
29
 * If signals cannot be tolerated with the FASTLOCK held, then
30
 * FASTLOCK should disable signals.  The code executed under
31
 * FASTLOCK is otherwise immune to interruption, provided it is
32
 * not restarted.
33
 * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
34
 * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
35
 * (There is currently no equivalent for FASTLOCK.)
36
 *
37
 * In the PARALLEL_MARK case, we also need to define a number of
38
 * other inline finctions here:
39
 *   GC_bool GC_compare_and_exchange( volatile GC_word *addr,
40
 *                                    GC_word old, GC_word new )
41
 *   GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
42
 *   void GC_memory_barrier( )
43
 *
44
 */
45
# ifdef THREADS
46
   void GC_noop1 GC_PROTO((word));
47
#  ifdef PCR_OBSOLETE   /* Faster, but broken with multiple lwp's       */
48
#    include  "th/PCR_Th.h"
49
#    include  "th/PCR_ThCrSec.h"
50
     extern struct PCR_Th_MLRep GC_allocate_ml;
51
#    define DCL_LOCK_STATE  PCR_sigset_t GC_old_sig_mask
52
#    define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml) 
53
#    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
54
#    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
55
#    define FASTLOCK() PCR_ThCrSec_EnterSys()
56
     /* Here we cheat (a lot): */
57
#        define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
58
                /* TRUE if nobody currently holds the lock */
59
#    define FASTUNLOCK() PCR_ThCrSec_ExitSys()
60
#  endif
61
#  ifdef PCR
62
#    include <base/PCR_Base.h>
63
#    include <th/PCR_Th.h>
64
     extern PCR_Th_ML GC_allocate_ml;
65
#    define DCL_LOCK_STATE \
66
         PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
67
#    define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
68
#    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
69
#    define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
70
#    define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
71
#    define FASTUNLOCK()  {\
72
        if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
73
#  endif
74
#  ifdef SRC_M3
75
     extern GC_word RT0u__inCritical;
76
#    define LOCK() RT0u__inCritical++
77
#    define UNLOCK() RT0u__inCritical--
78
#  endif
79
#  ifdef GC_SOLARIS_THREADS
80
#    include <thread.h>
81
#    include <signal.h>
82
     extern mutex_t GC_allocate_ml;
83
#    define LOCK() mutex_lock(&GC_allocate_ml);
84
#    define UNLOCK() mutex_unlock(&GC_allocate_ml);
85
#  endif
86
 
87
/* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock  */
88
/* acquisition and release.  We need this for correct operation of the  */
89
/* incremental GC.                                                      */
90
#  ifdef __GNUC__
91
#    if defined(I386)
92
       inline static int GC_test_and_set(volatile unsigned int *addr) {
93
          int oldval;
94
          /* Note: the "xchg" instruction does not need a "lock" prefix */
95
          __asm__ __volatile__("xchgl %0, %1"
96
                : "=r"(oldval), "=m"(*(addr))
97
                : "0"(1), "m"(*(addr)) : "memory");
98
          return oldval;
99
       }
100
#      define GC_TEST_AND_SET_DEFINED
101
#    endif
102
#    if defined(IA64)
103
#     include <ia64intrin.h>
104
       inline static int GC_test_and_set(volatile unsigned int *addr) {
105
          return __sync_lock_test_and_set(addr, 1);
106
       }
107
#      define GC_TEST_AND_SET_DEFINED
108
       inline static void GC_clear(volatile unsigned int *addr) {
109
          *addr = 0;
110
       }
111
#      define GC_CLEAR_DEFINED
112
#    endif
113
#    ifdef SPARC
114
       inline static int GC_test_and_set(volatile unsigned int *addr) {
115
         int oldval;
116
 
117
         __asm__ __volatile__("ldstub %1,%0"
118
         : "=r"(oldval), "=m"(*addr)
119
         : "m"(*addr) : "memory");
120
         return oldval;
121
       }
122
#      define GC_TEST_AND_SET_DEFINED
123
#    endif
124
#    ifdef M68K
125
       /* Contributed by Tony Mantler.  I'm not sure how well it was    */
126
       /* tested.                                                       */
127
       inline static int GC_test_and_set(volatile unsigned int *addr) {
128
          char oldval; /* this must be no longer than 8 bits */
129
 
130
          /* The return value is semi-phony. */
131
          /* 'tas' sets bit 7 while the return */
132
          /* value pretends bit 0 was set */
133
          __asm__ __volatile__(
134
                 "tas %1@; sne %0; negb %0"
135
                 : "=d" (oldval)
136
                 : "a" (addr) : "memory");
137
          return oldval;
138
       }
139
#      define GC_TEST_AND_SET_DEFINED
140
#    endif
141
#    if defined(POWERPC)
142
        inline static int GC_test_and_set(volatile unsigned int *addr) {
143
          int oldval;
144
          int temp = 1; /* locked value */
145
 
146
          __asm__ __volatile__(
147
               "1:\tlwarx %0,0,%3\n"   /* load and reserve               */
148
               "\tcmpwi %0, 0\n"       /* if load is                     */
149
               "\tbne 2f\n"            /*   non-zero, return already set */
150
               "\tstwcx. %2,0,%1\n"    /* else store conditional         */
151
               "\tbne- 1b\n"           /* retry if lost reservation      */
152
               "\tsync\n"              /* import barrier                 */
153
               "2:\t\n"                /* oldval is zero if we set       */
154
              : "=&r"(oldval), "=p"(addr)
155
              : "r"(temp), "1"(addr)
156
              : "cr0","memory");
157
          return oldval;
158
        }
159
#       define GC_TEST_AND_SET_DEFINED
160
        inline static void GC_clear(volatile unsigned int *addr) {
161
          __asm__ __volatile__("eieio" : : : "memory");
162
          *(addr) = 0;
163
        }
164
#       define GC_CLEAR_DEFINED
165
#    endif
166
#    if defined(ALPHA) 
167
        inline static int GC_test_and_set(volatile unsigned int * addr)
168
        {
169
          unsigned long oldvalue;
170
          unsigned long temp;
171
 
172
          __asm__ __volatile__(
173
                             "1:     ldl_l %0,%1\n"
174
                             "       and %0,%3,%2\n"
175
                             "       bne %2,2f\n"
176
                             "       xor %0,%3,%0\n"
177
                             "       stl_c %0,%1\n"
178
#       ifdef __ELF__
179
                             "       beq %0,3f\n"
180
#       else
181
                             "       beq %0,1b\n"
182
#       endif
183
                             "       mb\n"
184
                             "2:\n"
185
#       ifdef __ELF__
186
                             ".section .text2,\"ax\"\n"
187
                             "3:     br 1b\n"
188
                             ".previous"
189
#       endif
190
                             :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
191
                             :"Ir" (1), "m" (*addr)
192
                             :"memory");
193
 
194
          return oldvalue;
195
        }
196
#       define GC_TEST_AND_SET_DEFINED
197
        inline static void GC_clear(volatile unsigned int *addr) {
198
          __asm__ __volatile__("mb" : : : "memory");
199
          *(addr) = 0;
200
        }
201
#       define GC_CLEAR_DEFINED
202
#    endif /* ALPHA */
203
#    ifdef ARM32
204
        inline static int GC_test_and_set(volatile unsigned int *addr) {
205
          int oldval;
206
          /* SWP on ARM is very similar to XCHG on x86.  Doesn't lock the
207
           * bus because there are no SMP ARM machines.  If/when there are,
208
           * this code will likely need to be updated. */
209
          /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */
210
          __asm__ __volatile__("swp %0, %1, [%2]"
211
                             : "=r"(oldval)
212
                             : "0"(1), "r"(addr)
213
                             : "memory");
214
          return oldval;
215
        }
216
#       define GC_TEST_AND_SET_DEFINED
217
#    endif /* ARM32 */
218
#    ifdef CRIS
219
        inline static int GC_test_and_set(volatile unsigned int *addr) {
220
          /* Ripped from linuxthreads/sysdeps/cris/pt-machine.h.        */
221
          /* Included with Hans-Peter Nilsson's permission.             */
222
          register unsigned long int ret;
223
 
224
          /* Note the use of a dummy output of *addr to expose the write.
225
           * The memory barrier is to stop *other* writes being moved past
226
           * this code.
227
           */
228
            __asm__ __volatile__("clearf\n"
229
                                 "0:\n\t"
230
                                 "movu.b [%2],%0\n\t"
231
                                 "ax\n\t"
232
                                 "move.b %3,[%2]\n\t"
233
                                 "bwf 0b\n\t"
234
                                 "clearf"
235
                                 : "=&r" (ret), "=m" (*addr)
236
                                 : "r" (addr), "r" ((int) 1), "m" (*addr)
237
                                 : "memory");
238
            return ret;
239
        }
240
#       define GC_TEST_AND_SET_DEFINED
241
#    endif /* CRIS */
242
#    ifdef S390
243
       inline static int GC_test_and_set(volatile unsigned int *addr) {
244
         int ret;
245
         __asm__ __volatile__ (
246
          "     l     %0,0(%2)\n"
247
          "0:   cs    %0,%1,0(%2)\n"
248
          "     jl    0b"
249
          : "=&d" (ret)
250
          : "d" (1), "a" (addr)
251
          : "cc", "memory");
252
         return ret;
253
       }
254
#    endif
255
#  endif /* __GNUC__ */
256
#  if (defined(ALPHA) && !defined(__GNUC__))
257
#    ifndef OSF1
258
        --> We currently assume that if gcc is not used, we are
259
        --> running under Tru64.
260
#    endif
261
#    include <machine/builtins.h>
262
#    include <c_asm.h>
263
#    define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1)
264
#    define GC_TEST_AND_SET_DEFINED
265
#    define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; }
266
#    define GC_CLEAR_DEFINED
267
#  endif
268
#  if defined(MSWIN32)
269
#    define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
270
#    define GC_TEST_AND_SET_DEFINED
271
#  endif
272
#  ifdef MIPS
273
#    ifdef LINUX
274
#      include <sys/tas.h>
275
#      define GC_test_and_set(addr) _test_and_set((int *) addr,1)
276
#      define GC_TEST_AND_SET_DEFINED
277
#    elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
278
        || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
279
#        ifdef __GNUC__
280
#          define GC_test_and_set(addr) _test_and_set((void *)addr,1)
281
#        else
282
#          define GC_test_and_set(addr) test_and_set((void *)addr,1)
283
#        endif
284
#    else
285
#        define GC_test_and_set(addr) __test_and_set32((void *)addr,1)
286
#        define GC_clear(addr) __lock_release(addr);
287
#        define GC_CLEAR_DEFINED
288
#    endif
289
#    define GC_TEST_AND_SET_DEFINED
290
#  endif /* MIPS */
291
#  if defined(_AIX)
292
#    include <sys/atomic_op.h>
293
#    if (defined(_POWER) || defined(_POWERPC)) 
294
#      if defined(__GNUC__)  
295
         inline static void GC_memsync() {
296
           __asm__ __volatile__ ("sync" : : : "memory");
297
         }
298
#      else
299
#        ifndef inline
300
#          define inline __inline
301
#        endif
302
#        pragma mc_func GC_memsync { \
303
           "7c0004ac" /* sync (same opcode used for dcs)*/ \
304
         }
305
#      endif
306
#    else 
307
#    error dont know how to memsync
308
#    endif
309
     inline static int GC_test_and_set(volatile unsigned int * addr) {
310
          int oldvalue = 0;
311
          if (compare_and_swap((void *)addr, &oldvalue, 1)) {
312
            GC_memsync();
313
            return 0;
314
          } else return 1;
315
     }
316
#    define GC_TEST_AND_SET_DEFINED
317
     inline static void GC_clear(volatile unsigned int *addr) {
318
          GC_memsync();
319
          *(addr) = 0;
320
     }
321
#    define GC_CLEAR_DEFINED
322
 
323
#  endif
324
#  if 0 /* defined(HP_PA) */
325
     /* The official recommendation seems to be to not use ldcw from    */
326
     /* user mode.  Since multithreaded incremental collection doesn't  */
327
     /* work anyway on HP_PA, this shouldn't be a major loss.           */
328
 
329
     /* "set" means 0 and "clear" means 1 here.         */
330
#    define GC_test_and_set(addr) !GC_test_and_clear(addr);
331
#    define GC_TEST_AND_SET_DEFINED
332
#    define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
333
        /* The above needs a memory barrier! */
334
#    define GC_CLEAR_DEFINED
335
#  endif
336
#  if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
337
#    ifdef __GNUC__
338
       inline static void GC_clear(volatile unsigned int *addr) {
339
         /* Try to discourage gcc from moving anything past this. */
340
         __asm__ __volatile__(" " : : : "memory");
341
         *(addr) = 0;
342
       }
343
#    else
344
            /* The function call in the following should prevent the    */
345
            /* compiler from moving assignments to below the UNLOCK.    */
346
#      define GC_clear(addr) GC_noop1((word)(addr)); \
347
                             *((volatile unsigned int *)(addr)) = 0;
348
#    endif
349
#    define GC_CLEAR_DEFINED
350
#  endif /* !GC_CLEAR_DEFINED */
351
 
352
#  if !defined(GC_TEST_AND_SET_DEFINED)
353
#    define USE_PTHREAD_LOCKS
354
#  endif
355
 
356
#  if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
357
      && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS)
358
#    define NO_THREAD (pthread_t)(-1)
359
#    include <pthread.h>
360
#    if defined(PARALLEL_MARK) 
361
      /* We need compare-and-swap to update mark bits, where it's       */
362
      /* performance critical.  If USE_MARK_BYTES is defined, it is     */
363
      /* no longer needed for this purpose.  However we use it in       */
364
      /* either case to implement atomic fetch-and-add, though that's   */
365
      /* less performance critical, and could perhaps be done with      */
366
      /* a lock.                                                        */
367
#     if defined(GENERIC_COMPARE_AND_SWAP)
368
        /* Probably not useful, except for debugging.   */
369
        /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we        */
370
        /* minimize its use.                                            */
371
        extern pthread_mutex_t GC_compare_and_swap_lock;
372
 
373
        /* Note that if GC_word updates are not atomic, a concurrent    */
374
        /* reader should acquire GC_compare_and_swap_lock.  On          */
375
        /* currently supported platforms, such updates are atomic.      */
376
        extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
377
                                               GC_word old, GC_word new_val);
378
#     endif /* GENERIC_COMPARE_AND_SWAP */
379
#     if defined(I386)
380
#      if !defined(GENERIC_COMPARE_AND_SWAP)
381
         /* Returns TRUE if the comparison succeeded. */
382
         inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
383
                                                       GC_word old,
384
                                                       GC_word new_val)
385
         {
386
           char result;
387
           __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
388
                : "+m"(*(addr)), "=r"(result)
389
                : "r" (new_val), "a"(old) : "memory");
390
           return (GC_bool) result;
391
         }
392
#      endif /* !GENERIC_COMPARE_AND_SWAP */
393
       inline static void GC_memory_barrier()
394
       {
395
         /* We believe the processor ensures at least processor */
396
         /* consistent ordering.  Thus a compiler barrier       */
397
         /* should suffice.                                     */
398
         __asm__ __volatile__("" : : : "memory");
399
       }
400
#     endif /* I386 */
401
 
402
#     if defined(POWERPC)
403
#      if !defined(GENERIC_COMPARE_AND_SWAP)
404
        /* Returns TRUE if the comparison succeeded. */
405
        inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
406
            GC_word old, GC_word new_val)
407
        {
408
            int result, dummy;
409
            __asm__ __volatile__(
410
                "1:\tlwarx %0,0,%5\n"
411
                  "\tcmpw %0,%4\n"
412
                  "\tbne  2f\n"
413
                  "\tstwcx. %3,0,%2\n"
414
                  "\tbne- 1b\n"
415
                  "\tsync\n"
416
                  "\tli %1, 1\n"
417
                  "\tb 3f\n"
418
                "2:\tli %1, 0\n"
419
                "3:\t\n"
420
                :  "=&r" (dummy), "=r" (result), "=p" (addr)
421
                :  "r" (new_val), "r" (old), "2"(addr)
422
                : "cr0","memory");
423
            return (GC_bool) result;
424
        }
425
#      endif /* !GENERIC_COMPARE_AND_SWAP */
426
        inline static void GC_memory_barrier()
427
        {
428
            __asm__ __volatile__("sync" : : : "memory");
429
        }
430
#     endif /* POWERPC */
431
 
432
#     if defined(IA64)
433
#      if !defined(GENERIC_COMPARE_AND_SWAP)
434
         inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
435
                                                       GC_word old,
436
                                                       GC_word new_val)
437
         {
438
           return __sync_bool_compare_and_swap (addr, old, new_val);
439
         }
440
#      endif /* !GENERIC_COMPARE_AND_SWAP */
441
#      if 0
442
        /* Shouldn't be needed; we use volatile stores instead. */
443
        inline static void GC_memory_barrier()
444
        {
445
          __sync_synchronize ();
446
        }
447
#      endif /* 0 */
448
#     endif /* IA64 */
449
#     if defined(ALPHA)
450
#      if !defined(GENERIC_COMPARE_AND_SWAP)
451
#        if defined(__GNUC__)
452
           inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
453
                                                         GC_word old, GC_word new_val)
454
           {
455
             unsigned long was_equal;
456
             unsigned long temp;
457
 
458
             __asm__ __volatile__(
459
                             "1:     ldq_l %0,%1\n"
460
                             "       cmpeq %0,%4,%2\n"
461
                             "       mov %3,%0\n"
462
                             "       beq %2,2f\n"
463
                             "       stq_c %0,%1\n"
464
                             "       beq %0,1b\n"
465
                             "2:\n"
466
                             "       mb\n"
467
                             :"=&r" (temp), "=m" (*addr), "=&r" (was_equal)
468
                             : "r" (new_val), "Ir" (old)
469
                             :"memory");
470
             return was_equal;
471
           }
472
#        else /* !__GNUC__ */
473
           inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
474
                                                         GC_word old, GC_word new_val)
475
          {
476
            return __CMP_STORE_QUAD(addr, old, new_val, addr);
477
          }
478
#        endif /* !__GNUC__ */
479
#      endif /* !GENERIC_COMPARE_AND_SWAP */
480
#      ifdef __GNUC__
481
         inline static void GC_memory_barrier()
482
         {
483
           __asm__ __volatile__("mb" : : : "memory");
484
         }
485
#      else
486
#        define GC_memory_barrier() asm("mb")
487
#      endif /* !__GNUC__ */
488
#     endif /* ALPHA */
489
#     if defined(S390)
490
#      if !defined(GENERIC_COMPARE_AND_SWAP)
491
         inline static GC_bool GC_compare_and_exchange(volatile C_word *addr,
492
                                         GC_word old, GC_word new_val)
493
         {
494
           int retval;
495
           __asm__ __volatile__ (
496
#            ifndef __s390x__
497
               "     cs  %1,%2,0(%3)\n"
498
#            else
499
               "     csg %1,%2,0(%3)\n"
500
#            endif
501
             "     ipm %0\n"
502
             "     srl %0,28\n"
503
             : "=&d" (retval), "+d" (old)
504
             : "d" (new_val), "a" (addr)
505
             : "cc", "memory");
506
           return retval == 0;
507
         }
508
#      endif
509
#     endif
510
#     if !defined(GENERIC_COMPARE_AND_SWAP)
511
        /* Returns the original value of *addr. */
512
        inline static GC_word GC_atomic_add(volatile GC_word *addr,
513
                                            GC_word how_much)
514
        {
515
          GC_word old;
516
          do {
517
            old = *addr;
518
          } while (!GC_compare_and_exchange(addr, old, old+how_much));
519
          return old;
520
        }
521
#     else /* GENERIC_COMPARE_AND_SWAP */
522
        /* So long as a GC_word can be atomically updated, it should    */
523
        /* be OK to read *addr without a lock.                          */
524
        extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
525
#     endif /* GENERIC_COMPARE_AND_SWAP */
526
 
527
#    endif /* PARALLEL_MARK */
528
 
529
#    if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
530
      /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to   */
531
      /* be held for long periods, if it is held at all.  Thus spinning */
532
      /* and sleeping for fixed periods are likely to result in         */
533
      /* significant wasted time.  We thus rely mostly on queued locks. */
534
#     define USE_SPIN_LOCK
535
      extern volatile unsigned int GC_allocate_lock;
536
      extern void GC_lock(void);
537
        /* Allocation lock holder.  Only set if acquired by client through */
538
        /* GC_call_with_alloc_lock.                                        */
539
#     ifdef GC_ASSERTIONS
540
#        define LOCK() \
541
                { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
542
                  SET_LOCK_HOLDER(); }
543
#        define UNLOCK() \
544
                { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
545
                  GC_clear(&GC_allocate_lock); }
546
#     else
547
#        define LOCK() \
548
                { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
549
#        define UNLOCK() \
550
                GC_clear(&GC_allocate_lock)
551
#     endif /* !GC_ASSERTIONS */
552
#     if 0
553
        /* Another alternative for OSF1 might be:               */
554
#       include <sys/mman.h>
555
        extern msemaphore GC_allocate_semaphore;
556
#       define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
557
                            != 0) GC_lock(); else GC_allocate_lock = 1; }
558
        /* The following is INCORRECT, since the memory model is too weak. */
559
        /* Is this true?  Presumably msem_unlock has the right semantics?  */
560
        /*              - HB                                               */
561
#       define UNLOCK() { GC_allocate_lock = 0; \
562
                          msem_unlock(&GC_allocate_semaphore, 0); }
563
#     endif /* 0 */
564
#    else /* THREAD_LOCAL_ALLOC  || USE_PTHREAD_LOCKS */
565
#      ifndef USE_PTHREAD_LOCKS
566
#        define USE_PTHREAD_LOCKS
567
#      endif
568
#    endif /* THREAD_LOCAL_ALLOC */
569
#   ifdef USE_PTHREAD_LOCKS
570
#      include <pthread.h>
571
       extern pthread_mutex_t GC_allocate_ml;
572
#      ifdef GC_ASSERTIONS
573
#        define LOCK() \
574
                { GC_lock(); \
575
                  SET_LOCK_HOLDER(); }
576
#        define UNLOCK() \
577
                { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
578
                  pthread_mutex_unlock(&GC_allocate_ml); }
579
#      else /* !GC_ASSERTIONS */
580
#        if defined(NO_PTHREAD_TRYLOCK)
581
#          define LOCK() GC_lock();
582
#        else /* !defined(NO_PTHREAD_TRYLOCK) */
583
#        define LOCK() \
584
           { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
585
#        endif
586
#        define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
587
#      endif /* !GC_ASSERTIONS */
588
#   endif /* USE_PTHREAD_LOCKS */
589
#   define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
590
#   define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
591
#   define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
592
    extern VOLATILE GC_bool GC_collecting;
593
#   define ENTER_GC() GC_collecting = 1;
594
#   define EXIT_GC() GC_collecting = 0;
595
    extern void GC_lock(void);
596
    extern pthread_t GC_lock_holder;
597
#   ifdef GC_ASSERTIONS
598
      extern pthread_t GC_mark_lock_holder;
599
#   endif
600
#  endif /* GC_PTHREADS with linux_threads.c implementation */
601
#  if defined(GC_IRIX_THREADS)
602
#    include <pthread.h>
603
     /* This probably should never be included, but I can't test        */
604
     /* on Irix anymore.                                                */
605
#    include <mutex.h>
606
 
607
     extern volatile unsigned int GC_allocate_lock;
608
        /* This is not a mutex because mutexes that obey the (optional)         */
609
        /* POSIX scheduling rules are subject to convoys in high contention     */
610
        /* applications.  This is basically a spin lock.                        */
611
     extern pthread_t GC_lock_holder;
612
     extern void GC_lock(void);
613
        /* Allocation lock holder.  Only set if acquired by client through */
614
        /* GC_call_with_alloc_lock.                                        */
615
#    define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
616
#    define NO_THREAD (pthread_t)(-1)
617
#    define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
618
#    define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
619
#    define LOCK() { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
620
#    define UNLOCK() GC_clear(&GC_allocate_lock);
621
     extern VOLATILE GC_bool GC_collecting;
622
#    define ENTER_GC() \
623
                { \
624
                    GC_collecting = 1; \
625
                }
626
#    define EXIT_GC() GC_collecting = 0;
627
#  endif /* GC_IRIX_THREADS */
628
#  if defined(GC_WIN32_THREADS)
629
#    if defined(GC_PTHREADS)
630
#      include <pthread.h>
631
       extern pthread_mutex_t GC_allocate_ml;
632
#      define LOCK()   pthread_mutex_lock(&GC_allocate_ml)
633
#      define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
634
#    else
635
#      include <windows.h>
636
       GC_API CRITICAL_SECTION GC_allocate_ml;
637
#      define LOCK() EnterCriticalSection(&GC_allocate_ml);
638
#      define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
639
#    endif
640
#  endif
641
#  ifndef SET_LOCK_HOLDER
642
#      define SET_LOCK_HOLDER()
643
#      define UNSET_LOCK_HOLDER()
644
#      define I_HOLD_LOCK() FALSE
645
                /* Used on platforms were locks can be reacquired,      */
646
                /* so it doesn't matter if we lie.                      */
647
#  endif
648
# else /* !THREADS */
649
#    define LOCK()
650
#    define UNLOCK()
651
# endif /* !THREADS */
652
# ifndef SET_LOCK_HOLDER
653
#   define SET_LOCK_HOLDER()
654
#   define UNSET_LOCK_HOLDER()
655
#   define I_HOLD_LOCK() FALSE
656
                /* Used on platforms were locks can be reacquired,      */
657
                /* so it doesn't matter if we lie.                      */
658
# endif
659
# ifndef ENTER_GC
660
#   define ENTER_GC()
661
#   define EXIT_GC()
662
# endif
663
 
664
# ifndef DCL_LOCK_STATE
665
#   define DCL_LOCK_STATE
666
# endif
667
# ifndef FASTLOCK
668
#   define FASTLOCK() LOCK()
669
#   define FASTLOCK_SUCCEEDED() TRUE
670
#   define FASTUNLOCK() UNLOCK()
671
# endif
672
 
673
#endif /* GC_LOCKS_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.