OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [newlib-1.17.0/] [newlib/] [libc/] [sys/] [linux/] [bits/] [libc-lock.h] - Blame information for rev 438

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 148 jeremybenn
/* libc-internal interface for mutex locks.  LinuxThreads version.
2
   Copyright (C) 1996,1997,1998,1999,2000,2001,2002,2003
3
        Free Software Foundation, Inc.
4
   This file is part of the GNU C Library.
5
 
6
   The GNU C Library is free software; you can redistribute it and/or
7
   modify it under the terms of the GNU Lesser General Public License as
8
   published by the Free Software Foundation; either version 2.1 of the
9
   License, or (at your option) any later version.
10
 
11
   The GNU C Library is distributed in the hope that it will be useful,
12
   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
   Lesser General Public License for more details.
15
 
16
   You should have received a copy of the GNU Lesser General Public
17
   License along with the GNU C Library; see the file COPYING.LIB.  If not,
18
   write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19
   Boston, MA 02111-1307, USA.  */
20
 
21
#ifndef _BITS_LIBC_LOCK_H
22
#define _BITS_LIBC_LOCK_H 1
23
 
24
#include <pthread.h>
25
 
26
#if defined _LIBC && !defined NOT_IN_libc
27
#include <linuxthreads/internals.h>
28
#endif
29
 
30
#define _IO_MTSAFE_IO  /* add this as we always want this in newlib */
31
 
32
/* Mutex type.  */
33
#if defined(_LIBC) || defined(_IO_MTSAFE_IO)
34
typedef pthread_mutex_t __libc_lock_t;
35
typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
36
# ifdef __USE_UNIX98
37
typedef pthread_rwlock_t __libc_rwlock_t;
38
# else
39
typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
40
# endif
41
typedef __libc_lock_recursive_t __rtld_lock_recursive_t;
42
#else
43
typedef struct __libc_lock_opaque__ __libc_lock_t;
44
typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
45
typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
46
#endif
47
 
48
/* Type for key to thread-specific data.  */
49
typedef pthread_key_t __libc_key_t;
50
 
51
/* Define a lock variable NAME with storage class CLASS.  The lock must be
52
   initialized with __libc_lock_init before it can be used (or define it
53
   with __libc_lock_define_initialized, below).  Use `extern' for CLASS to
54
   declare a lock defined in another module.  In public structure
55
   definitions you must use a pointer to the lock structure (i.e., NAME
56
   begins with a `*'), because its storage size will not be known outside
57
   of libc.  */
58
#define __libc_lock_define(CLASS,NAME) \
59
  CLASS __libc_lock_t NAME;
60
#define __libc_rwlock_define(CLASS,NAME) \
61
  CLASS __libc_rwlock_t NAME;
62
#define __libc_lock_define_recursive(CLASS,NAME) \
63
  CLASS __libc_lock_recursive_t NAME;
64
#define __rtld_lock_define_recursive(CLASS,NAME) \
65
  CLASS __rtld_lock_recursive_t NAME;
66
 
67
/* Define an initialized lock variable NAME with storage class CLASS.
68
 
69
   For the C library we take a deeper look at the initializer.  For
70
   this implementation all fields are initialized to zero.  Therefore
71
   we don't initialize the variable which allows putting it into the
72
   BSS section.  (Except on PA-RISC and other odd architectures, where
73
   initialized locks must be set to one due to the lack of normal
74
   atomic operations.) */
75
 
76
#if __LT_SPINLOCK_INIT == 0
77
#  define __libc_lock_define_initialized(CLASS,NAME) \
78
  CLASS __libc_lock_t NAME;
79
#else
80
#  define __libc_lock_define_initialized(CLASS,NAME) \
81
  CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
82
#endif
83
 
84
#define __libc_rwlock_define_initialized(CLASS,NAME) \
85
  CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
86
 
87
/* Define an initialized recursive lock variable NAME with storage
88
   class CLASS.  */
89
#define __libc_lock_define_initialized_recursive(CLASS,NAME) \
90
  CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
91
#define _LIBC_LOCK_RECURSIVE_INITIALIZER \
92
  {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
93
 
94
#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
95
  CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
96
#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
97
  {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
98
 
99
#if defined _LIBC && defined IS_IN_libpthread
100
# define __libc_maybe_call(FUNC, ARGS, ELSE) FUNC ARGS
101
#else
102
# if defined __PIC__ || (defined _LIBC && defined SHARED)
103
#  define __libc_maybe_call(FUNC, ARGS, ELSE) \
104
  (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
105
                    _fn != NULL ? (*_fn) ARGS : ELSE; }))
106
# else
107
#  define __libc_maybe_call(FUNC, ARGS, ELSE) \
108
  (FUNC != NULL ? FUNC ARGS : ELSE)
109
# endif
110
#endif
111
#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
112
# define __libc_maybe_call2(FUNC, ARGS, ELSE) \
113
  ({__builtin_expect (__libc_pthread_functions.ptr_##FUNC != NULL, 0) \
114
    ? __libc_pthread_functions.ptr_##FUNC ARGS : ELSE; })
115
#else
116
# define __libc_maybe_call2(FUNC, ARGS, ELSE) __libc_maybe_call (__##FUNC, ARGS, ELSE)
117
#endif
118
 
119
/* Initialize the named lock variable, leaving it in a consistent, unlocked
120
   state.  */
121
#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
122
#define __libc_lock_init(NAME) \
123
  ({                                                                          \
124
    (NAME).__m_count = 0;                                                      \
125
    (NAME).__m_owner = NULL;                                                  \
126
    (NAME).__m_kind = PTHREAD_MUTEX_TIMED_NP;                                 \
127
    (NAME).__m_lock.__status = 0;                                              \
128
    (NAME).__m_lock.__spinlock = __LT_SPINLOCK_INIT;                          \
129
    0; })
130
#else
131
#define __libc_lock_init(NAME) \
132
  (__libc_maybe_call2 (pthread_mutex_init, (&(NAME), NULL), 0))
133
#endif
134
#define __libc_rwlock_init(NAME) \
135
  (__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0));
136
 
137
/* Same as last but this time we initialize a recursive mutex.  */
138
#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
139
#define __libc_lock_init_recursive(NAME) \
140
  ({                                                                          \
141
    (NAME).mutex.__m_count = 0;                                                \
142
    (NAME).mutex.__m_owner = NULL;                                            \
143
    (NAME).mutex.__m_kind = PTHREAD_MUTEX_RECURSIVE_NP;                       \
144
    (NAME).mutex.__m_lock.__status = 0;                                        \
145
    (NAME).mutex.__m_lock.__spinlock = __LT_SPINLOCK_INIT;                    \
146
    0; })
147
#else
148
#define __libc_lock_init_recursive(NAME) \
149
  do {                                                                        \
150
    if (__pthread_mutex_init != NULL)                                         \
151
      {                                                                       \
152
        pthread_mutexattr_t __attr;                                           \
153
        __pthread_mutexattr_init (&__attr);                                   \
154
        __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
155
        __pthread_mutex_init (&(NAME).mutex, &__attr);                        \
156
        __pthread_mutexattr_destroy (&__attr);                                \
157
      }                                                                       \
158
  } while (0);
159
#endif
160
#define __rtld_lock_init_recursive(NAME) \
161
  __libc_lock_init_recursive (NAME)
162
 
163
/* Finalize the named lock variable, which must be locked.  It cannot be
164
   used again until __libc_lock_init is called again on it.  This must be
165
   called on a lock variable before the containing storage is reused.  */
166
#define __libc_lock_fini(NAME) \
167
  (__libc_maybe_call2 (pthread_mutex_destroy, (&(NAME)), 0));
168
#define __libc_rwlock_fini(NAME) \
169
  (__libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0));
170
 
171
/* Finalize recursive named lock.  */
172
#define __libc_lock_fini_recursive(NAME) __libc_lock_fini ((NAME).mutex)
173
#define __rtld_lock_fini_recursive(NAME) __libc_lock_fini_recursive (NAME)
174
 
175
/* Lock the named lock variable.  */
176
#define __libc_lock_lock(NAME) \
177
  (__libc_maybe_call2 (pthread_mutex_lock, (&(NAME)), 0));
178
#define __libc_rwlock_rdlock(NAME) \
179
  (__libc_maybe_call (__pthread_rwlock_rdlock, (&(NAME)), 0));
180
#define __libc_rwlock_wrlock(NAME) \
181
  (__libc_maybe_call (__pthread_rwlock_wrlock, (&(NAME)), 0));
182
 
183
/* Lock the recursive named lock variable.  */
184
#define __libc_lock_lock_recursive(NAME) __libc_lock_lock ((NAME).mutex)
185
 
186
/* Try to lock the named lock variable.  */
187
#define __libc_lock_trylock(NAME) \
188
  (__libc_maybe_call2 (pthread_mutex_trylock, (&(NAME)), 0))
189
#define __libc_rwlock_tryrdlock(NAME) \
190
  (__libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0))
191
#define __libc_rwlock_trywrlock(NAME) \
192
  (__libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0))
193
 
194
/* Try to lock the recursive named lock variable.  */
195
#define __libc_lock_trylock_recursive(NAME) __libc_lock_trylock ((NAME).mutex)
196
#define __rtld_lock_trylock_recursive(NAME) \
197
  __libc_lock_trylock_recursive (NAME)
198
 
199
/* Unlock the named lock variable.  */
200
#define __libc_lock_unlock(NAME) \
201
  (__libc_maybe_call2 (pthread_mutex_unlock, (&(NAME)), 0));
202
#define __libc_rwlock_unlock(NAME) \
203
  (__libc_maybe_call (__pthread_rwlock_unlock, (&(NAME)), 0));
204
 
205
/* Unlock the recursive named lock variable.  */
206
#define __libc_lock_unlock_recursive(NAME) __libc_lock_unlock ((NAME).mutex)
207
 
208
#if defined _LIBC && defined SHARED
209
# define __rtld_lock_default_lock_recursive(lock) \
210
  ++((pthread_mutex_t *)(lock))->__m_count;
211
 
212
# define __rtld_lock_default_unlock_recursive(lock) \
213
  --((pthread_mutex_t *)(lock))->__m_count;
214
 
215
# define __rtld_lock_lock_recursive(NAME) \
216
  GL(dl_rtld_lock_recursive) (&(NAME).mutex)
217
 
218
# define __rtld_lock_unlock_recursive(NAME) \
219
  GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
220
#else
221
#define __rtld_lock_lock_recursive(NAME) __libc_lock_lock_recursive (NAME)
222
#define __rtld_lock_unlock_recursive(NAME) __libc_lock_unlock_recursive (NAME)
223
#endif
224
 
225
/* Define once control variable.  */
226
#if PTHREAD_ONCE_INIT == 0
227
/* Special case for static variables where we can avoid the initialization
228
   if it is zero.  */
229
# define __libc_once_define(CLASS, NAME) \
230
  CLASS pthread_once_t NAME
231
#else
232
# define __libc_once_define(CLASS, NAME) \
233
  CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
234
#endif
235
 
236
/* Call handler iff the first call.  */
237
#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
238
  do {                                                                        \
239
    if (__pthread_once != NULL)                                               \
240
      __pthread_once (&(ONCE_CONTROL), (INIT_FUNCTION));                      \
241
    else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) {                           \
242
      INIT_FUNCTION ();                                                       \
243
      (ONCE_CONTROL) = 2;                                                     \
244
    }                                                                         \
245
  } while (0)
246
 
247
 
248
/* Start critical region with cleanup.  */
249
#define __libc_cleanup_region_start(DOIT, FCT, ARG) \
250
  { struct _pthread_cleanup_buffer _buffer;                                   \
251
    int _avail = (DOIT) && _pthread_cleanup_push_defer != NULL;               \
252
    if (_avail) {                                                             \
253
      _pthread_cleanup_push_defer (&_buffer, (FCT), (ARG));                   \
254
    }
255
 
256
/* End critical region with cleanup.  */
257
#define __libc_cleanup_region_end(DOIT) \
258
    if (_avail) {                                                             \
259
      _pthread_cleanup_pop_restore (&_buffer, (DOIT));                        \
260
    }                                                                         \
261
  }
262
 
263
/* Sometimes we have to exit the block in the middle.  */
264
#define __libc_cleanup_end(DOIT) \
265
    if (_avail) {                                                             \
266
      _pthread_cleanup_pop_restore (&_buffer, (DOIT));                        \
267
    }
268
 
269
#define __libc_cleanup_push(fct, arg) \
270
    { struct _pthread_cleanup_buffer _buffer;                                 \
271
    __libc_maybe_call (_pthread_cleanup_push, (&_buffer, (fct), (arg)), 0)
272
 
273
#define __libc_cleanup_pop(execute) \
274
    __libc_maybe_call (_pthread_cleanup_pop, (&_buffer, execute), 0);          \
275
    }
276
 
277
/* Create thread-specific key.  */
278
#define __libc_key_create(KEY, DESTRUCTOR) \
279
  (__libc_maybe_call (__pthread_key_create, (KEY, DESTRUCTOR), 1))
280
 
281
/* Get thread-specific data.  */
282
#define __libc_getspecific(KEY) \
283
  (__libc_maybe_call (__pthread_getspecific, (KEY), NULL))
284
 
285
/* Set thread-specific data.  */
286
#define __libc_setspecific(KEY, VALUE) \
287
  (__libc_maybe_call (__pthread_setspecific, (KEY, VALUE), 0))
288
 
289
 
290
/* Register handlers to execute before and after `fork'.  */
291
#define __libc_atfork(PREPARE, PARENT, CHILD) \
292
  (__libc_maybe_call (__pthread_atfork, (PREPARE, PARENT, CHILD), 0))
293
 
294
/* Functions that are used by this file and are internal to the GNU C
295
   library.  */
296
 
297
extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
298
                                 __const pthread_mutexattr_t *__mutex_attr);
299
 
300
extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
301
 
302
extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
303
 
304
extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
305
 
306
extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
307
 
308
extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
309
 
310
extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
311
 
312
extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
313
                                        int __kind);
314
 
315
#ifdef __USE_UNIX98
316
extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
317
                                  __const pthread_rwlockattr_t *__attr);
318
 
319
extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
320
 
321
extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
322
 
323
extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
324
 
325
extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
326
 
327
extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
328
 
329
extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
330
#endif
331
 
332
extern int __pthread_key_create (pthread_key_t *__key,
333
                                 void (*__destr_function) (void *));
334
 
335
extern int __pthread_setspecific (pthread_key_t __key,
336
                                  __const void *__pointer);
337
 
338
extern void *__pthread_getspecific (pthread_key_t __key);
339
 
340
extern int __pthread_once (pthread_once_t *__once_control,
341
                           void (*__init_routine) (void));
342
 
343
extern int __pthread_atfork (void (*__prepare) (void),
344
                             void (*__parent) (void),
345
                             void (*__child) (void));
346
 
347
 
348
 
349
/* Make the pthread functions weak so that we can elide them from
350
   single-threaded processes.  */
351
#ifndef __NO_WEAK_PTHREAD_ALIASES
352
#  pragma weak __pthread_mutex_init
353
#  pragma weak __pthread_mutex_destroy
354
#  pragma weak __pthread_mutex_lock
355
#  pragma weak __pthread_mutex_trylock
356
#  pragma weak __pthread_mutex_unlock
357
#  pragma weak __pthread_mutexattr_init
358
#  pragma weak __pthread_mutexattr_destroy
359
#  pragma weak __pthread_mutexattr_settype
360
#  pragma weak __pthread_rwlock_destroy
361
#  pragma weak __pthread_rwlock_rdlock
362
#  pragma weak __pthread_rwlock_tryrdlock
363
#  pragma weak __pthread_rwlock_wrlock
364
#  pragma weak __pthread_rwlock_trywrlock
365
#  pragma weak __pthread_rwlock_unlock
366
#  pragma weak __pthread_key_create
367
#  pragma weak __pthread_setspecific
368
#  pragma weak __pthread_getspecific
369
#  pragma weak __pthread_once
370
#  pragma weak __pthread_initialize
371
#  pragma weak __pthread_atfork
372
#  pragma weak _pthread_cleanup_push_defer
373
#  pragma weak _pthread_cleanup_pop_restore
374
#  pragma weak _pthread_cleanup_push
375
#  pragma weak _pthread_cleanup_pop
376
#endif
377
 
378
/* We need portable names for some functions.  E.g., when they are
379
   used as argument to __libc_cleanup_region_start.  */
380
#define __libc_mutex_unlock __pthread_mutex_unlock
381
 
382
#endif  /* bits/libc-lock.h */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.