OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [newlib-1.18.0/] [newlib/] [libc/] [sys/] [linux/] [linuxthreads/] [pthread.c] - Blame information for rev 301

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 207 jeremybenn
 
2
/* Linuxthreads - a simple clone()-based implementation of Posix        */
3
/* threads for Linux.                                                   */
4
/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr)              */
5
/*                                                                      */
6
/* This program is free software; you can redistribute it and/or        */
7
/* modify it under the terms of the GNU Library General Public License  */
8
/* as published by the Free Software Foundation; either version 2       */
9
/* of the License, or (at your option) any later version.               */
10
/*                                                                      */
11
/* This program is distributed in the hope that it will be useful,      */
12
/* but WITHOUT ANY WARRANTY; without even the implied warranty of       */
13
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the        */
14
/* GNU Library General Public License for more details.                 */
15
 
16
/* Thread creation, initialization, and basic low-level routines */
17
 
18
#include <errno.h>
19
#include <stddef.h>
20
#include <stdio.h>
21
#include <stdlib.h>
22
#include <string.h>
23
#include <unistd.h>
24
#include <fcntl.h>
25
#include <sys/wait.h>
26
#include <sys/resource.h>
27
#include <sys/sysctl.h>
28
#include <shlib-compat.h>
29
#include "pthread.h"
30
#include "internals.h"
31
#include "spinlock.h"
32
#include "restart.h"
33
#include <machine/syscall.h>
34
 
35
/* for threading we use processes so we require a few EL/IX level 2 and
36
   level 3 syscalls.  We only allow this file to see them to preserve
37
   the interface. */
38
#if defined(_ELIX_LEVEL) && _ELIX_LEVEL < 3
39
static _syscall1_base(int,pipe,int *,filedes)
40
#endif /* _ELIX_LEVEL < 3 */
41
 
42
#if defined(_ELIX_LEVEL) && _ELIX_LEVEL < 2
43
static _syscall2_base(int,setrlimit,int,resource,const struct rlimit *,rlp)
44
int on_exit (void (*fn)(int, void *), void *arg)
45
{
46
  register struct _atexit *p;
47
  void (*x)(void) = (void (*)(void))fn;
48
 
49
/* _REENT_SMALL on_exit() doesn't allow more than the required 32 entries.  */
50
#ifndef _REENT_SMALL
51
  if ((p = _REENT->_atexit) == NULL)
52
    _REENT->_atexit = p = &_REENT->_atexit0;
53
  if (p->_ind >= _ATEXIT_SIZE)
54
    {
55
      if ((p = (struct _atexit *) malloc (sizeof *p)) == NULL)
56
        return -1;
57
      p->_ind = 0;
58
      p->_fntypes = 0;
59
      p->_next = _REENT->_atexit;
60
      _REENT->_atexit = p;
61
    }
62
#else
63
  p = &_REENT->_atexit;
64
  if (p->_ind >= _ATEXIT_SIZE)
65
    return -1;
66
#endif
67
  p->_fntypes |= (1 << p->_ind);
68
  p->_fnargs[p->_ind] = arg;
69
  p->_fns[p->_ind++] = x;
70
  return 0;
71
}
72
 
73
#endif /* _ELIX_LEVEL < 2 */
74
 
75
/* We need the global/static resolver state here.  */
76
#include <resolv.h>
77
#undef _res
78
 
79
/* FIXME: for now, set up _res here */
80
struct __res_state _res;
81
 
82
/* Sanity check.  */
83
#if __ASSUME_REALTIME_SIGNALS && !defined __SIGRTMIN
84
# error "This must not happen; new kernel assumed but old headers"
85
#endif
86
 
87
/* These variables are used by the setup code.  */
88
 
89
/* Descriptor of the initial thread */
90
 
91
struct _pthread_descr_struct __pthread_initial_thread = {
92
  {
93
    {
94
      &__pthread_initial_thread /* pthread_descr self */
95
    }
96
  },
97
  &__pthread_initial_thread,  /* pthread_descr p_nextlive */
98
  &__pthread_initial_thread,  /* pthread_descr p_prevlive */
99
  NULL,                       /* pthread_descr p_nextwaiting */
100
  NULL,                       /* pthread_descr p_nextlock */
101
  PTHREAD_THREADS_MAX,        /* pthread_t p_tid */
102
  0,                          /* int p_pid */
103
  0,                          /* int p_priority */
104
  &__pthread_handles[0].h_lock, /* struct _pthread_fastlock * p_lock */
105
  0,                          /* int p_signal */
106
  NULL,                       /* sigjmp_buf * p_signal_buf */
107
  NULL,                       /* sigjmp_buf * p_cancel_buf */
108
  0,                          /* char p_terminated */
109
  0,                          /* char p_detached */
110
  0,                          /* char p_exited */
111
  NULL,                       /* void * p_retval */
112
  0,                          /* int p_retval */
113
  NULL,                       /* pthread_descr p_joining */
114
  NULL,                       /* struct _pthread_cleanup_buffer * p_cleanup */
115
  0,                          /* char p_cancelstate */
116
  0,                          /* char p_canceltype */
117
  0,                          /* char p_canceled */
118
  &__pthread_initial_thread.p_reent, /* struct _reent *p_reentp */
119
  _REENT_INIT(__pthread_initial_thread.p_reent),  /* struct _reent p_reent */
120
  NULL,                       /* int *p_h_errnop */
121
  0,                          /* int p_h_errno */
122
  NULL,                       /* char * p_in_sighandler */
123
  0,                          /* char p_sigwaiting */
124
  PTHREAD_START_ARGS_INITIALIZER(NULL),
125
                              /* struct pthread_start_args p_start_args */
126
  {NULL},                     /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
127
  {NULL},                     /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
128
  1,                          /* int p_userstack */
129
  NULL,                       /* void * p_guardaddr */
130
  0,                          /* size_t p_guardsize */
131
  0,                          /* Always index 0 */
132
  0,                          /* int p_report_events */
133
  {{{0, }}, 0, NULL},         /* td_eventbuf_t p_eventbuf */
134
  __ATOMIC_INITIALIZER,       /* struct pthread_atomic p_resume_count */
135
  0,                          /* char p_woken_by_cancel */
136
  0,                          /* char p_condvar_avail */
137
  0,                          /* char p_sem_avail */
138
  NULL,                       /* struct pthread_extricate_if *p_extricate */
139
  NULL,                       /* pthread_readlock_info *p_readlock_list; */
140
  NULL,                       /* pthread_readlock_info *p_readlock_free; */
141
 
142
};
143
 
144
/* Descriptor of the manager thread; none of this is used but the error
145
   variables, the p_pid and p_priority fields,
146
   and the address for identification.  */
147
 
148
struct _pthread_descr_struct __pthread_manager_thread = {
149
  {
150
    {
151
      &__pthread_manager_thread /* pthread_descr self */
152
    }
153
  },
154
  NULL,                       /* pthread_descr p_nextlive */
155
  NULL,                       /* pthread_descr p_prevlive */
156
  NULL,                       /* pthread_descr p_nextwaiting */
157
  NULL,                       /* pthread_descr p_nextlock */
158
  0,                          /* int p_tid */
159
  0,                          /* int p_pid */
160
  0,                          /* int p_priority */
161
  &__pthread_handles[1].h_lock, /* struct _pthread_fastlock * p_lock */
162
  0,                          /* int p_signal */
163
  NULL,                       /* sigjmp_buf * p_signal_buf */
164
  NULL,                       /* sigjmp_buf * p_cancel_buf */
165
  0,                          /* char p_terminated */
166
  0,                          /* char p_detached */
167
  0,                          /* char p_exited */
168
  NULL,                       /* void * p_retval */
169
  0,                          /* int p_retval */
170
  NULL,                       /* pthread_descr p_joining */
171
  NULL,                       /* struct _pthread_cleanup_buffer * p_cleanup */
172
  0,                          /* char p_cancelstate */
173
  0,                          /* char p_canceltype */
174
  0,                          /* char p_canceled */
175
  &__pthread_manager_thread.p_reent, /* struct _reent *p_reentp */
176
  _REENT_INIT(__pthread_manager_thread.p_reent), /* struct _reent p_reent */
177
  NULL,                       /* int *p_h_errnop */
178
  0,                          /* int p_h_errno */
179
  NULL,                       /* char * p_in_sighandler */
180
  0,                          /* char p_sigwaiting */
181
  PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
182
                              /* struct pthread_start_args p_start_args */
183
  {NULL},                     /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
184
  {NULL},                     /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
185
  0,                          /* int p_userstack */
186
  NULL,                       /* void * p_guardaddr */
187
  0,                          /* size_t p_guardsize */
188
  1,                          /* Always index 1 */
189
  0,                          /* int p_report_events */
190
  {{{0, }}, 0, NULL},         /* td_eventbuf_t p_eventbuf */
191
  __ATOMIC_INITIALIZER,       /* struct pthread_atomic p_resume_count */
192
  0,                          /* char p_woken_by_cancel */
193
  0,                          /* char p_condvar_avail */
194
  0,                          /* char p_sem_avail */
195
  NULL,                       /* struct pthread_extricate_if *p_extricate */
196
  NULL,                       /* pthread_readlock_info *p_readlock_list; */
197
  NULL,                       /* pthread_readlock_info *p_readlock_free; */
198
 
199
};
200
 
201
/* Pointer to the main thread (the father of the thread manager thread) */
202
/* Originally, this is the initial thread, but this changes after fork() */
203
 
204
pthread_descr __pthread_main_thread = &__pthread_initial_thread;
205
 
206
/* Limit between the stack of the initial thread (above) and the
207
   stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
208
 
209
char *__pthread_initial_thread_bos;
210
 
211
/* File descriptor for sending requests to the thread manager. */
212
/* Initially -1, meaning that the thread manager is not running. */
213
 
214
int __pthread_manager_request = -1;
215
 
216
/* Other end of the pipe for sending requests to the thread manager. */
217
 
218
int __pthread_manager_reader;
219
 
220
/* Limits of the thread manager stack */
221
 
222
char *__pthread_manager_thread_bos;
223
char *__pthread_manager_thread_tos;
224
 
225
/* For process-wide exit() */
226
 
227
int __pthread_exit_requested;
228
int __pthread_exit_code;
229
 
230
/* Maximum stack size.  */
231
size_t __pthread_max_stacksize;
232
 
233
/* Nozero if the machine has more than one processor.  */
234
int __pthread_smp_kernel;
235
 
236
 
237
#if !__ASSUME_REALTIME_SIGNALS
238
/* Pointers that select new or old suspend/resume functions
239
   based on availability of rt signals. */
240
 
241
void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
242
void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
243
int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
244
#endif  /* __ASSUME_REALTIME_SIGNALS */
245
 
246
/* Communicate relevant LinuxThreads constants to gdb */
247
 
248
const int __pthread_threads_max = PTHREAD_THREADS_MAX;
249
const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
250
const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
251
                                              h_descr);
252
const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
253
                                            p_pid);
254
const int __linuxthreads_pthread_sizeof_descr
255
  = sizeof(struct _pthread_descr_struct);
256
 
257
/* Forward declarations */
258
 
259
static void pthread_onexit_process(int retcode, void *arg);
260
#ifndef HAVE_Z_NODELETE
261
static void pthread_atexit_process(void *arg, int retcode);
262
static void pthread_atexit_retcode(void *arg, int retcode);
263
#endif
264
static void pthread_handle_sigcancel(int sig);
265
static void pthread_handle_sigrestart(int sig);
266
static void pthread_handle_sigdebug(int sig);
267
 
268
/* CPU clock handling.  */
269
#if HP_TIMING_AVAIL
270
extern hp_timing_t _dl_cpuclock_offset;
271
#endif
272
 
273
/* Signal numbers used for the communication.
274
   In these variables we keep track of the used variables.  If the
275
   platform does not support any real-time signals we will define the
276
   values to some unreasonable value which will signal failing of all
277
   the functions below.  */
278
#ifndef __SIGRTMIN
279
static int current_rtmin = -1;
280
static int current_rtmax = -1;
281
int __pthread_sig_restart = SIGUSR1;
282
int __pthread_sig_cancel = SIGUSR2;
283
int __pthread_sig_debug;
284
#else
285
static int current_rtmin;
286
static int current_rtmax;
287
 
288
#if __SIGRTMAX - __SIGRTMIN >= 3
289
int __pthread_sig_restart = __SIGRTMIN;
290
int __pthread_sig_cancel = __SIGRTMIN + 1;
291
int __pthread_sig_debug = __SIGRTMIN + 2;
292
#else
293
int __pthread_sig_restart = SIGUSR1;
294
int __pthread_sig_cancel = SIGUSR2;
295
int __pthread_sig_debug;
296
#endif
297
 
298
static int rtsigs_initialized;
299
 
300
#if !__ASSUME_REALTIME_SIGNALS
301
# include "testrtsig.h"
302
#endif
303
 
304
static void
305
init_rtsigs (void)
306
{
307
#if !__ASSUME_REALTIME_SIGNALS
308
  if (__builtin_expect (!kernel_has_rtsig (), 0))
309
    {
310
      current_rtmin = -1;
311
      current_rtmax = -1;
312
# if __SIGRTMAX - __SIGRTMIN >= 3
313
      __pthread_sig_restart = SIGUSR1;
314
      __pthread_sig_cancel = SIGUSR2;
315
      __pthread_sig_debug = 0;
316
# endif
317
    }
318
  else
319
#endif  /* __ASSUME_REALTIME_SIGNALS */
320
    {
321
#if __SIGRTMAX - __SIGRTMIN >= 3
322
      current_rtmin = __SIGRTMIN + 3;
323
# if !__ASSUME_REALTIME_SIGNALS
324
      __pthread_restart = __pthread_restart_new;
325
      __pthread_suspend = __pthread_wait_for_restart_signal;
326
      __pthread_timedsuspend = __pthread_timedsuspend_new;
327
# endif /* __ASSUME_REALTIME_SIGNALS */
328
#else
329
      current_rtmin = __SIGRTMIN;
330
#endif
331
 
332
      current_rtmax = __SIGRTMAX;
333
    }
334
 
335
  rtsigs_initialized = 1;
336
}
337
#endif
338
 
339
/* Return number of available real-time signal with highest priority.  */
340
int
341
__libc_current_sigrtmin (void)
342
{
343
#ifdef __SIGRTMIN
344
  if (__builtin_expect (!rtsigs_initialized, 0))
345
    init_rtsigs ();
346
#endif
347
  return current_rtmin;
348
}
349
 
350
/* Return number of available real-time signal with lowest priority.  */
351
int
352
__libc_current_sigrtmax (void)
353
{
354
#ifdef __SIGRTMIN
355
  if (__builtin_expect (!rtsigs_initialized, 0))
356
    init_rtsigs ();
357
#endif
358
  return current_rtmax;
359
}
360
 
361
/* Allocate real-time signal with highest/lowest available
362
   priority.  Please note that we don't use a lock since we assume
363
   this function to be called at program start.  */
364
int
365
__libc_allocate_rtsig (int high)
366
{
367
#ifndef __SIGRTMIN
368
  return -1;
369
#else
370
  if (__builtin_expect (!rtsigs_initialized, 0))
371
    init_rtsigs ();
372
  if (__builtin_expect (current_rtmin == -1, 0)
373
      || __builtin_expect (current_rtmin > current_rtmax, 0))
374
    /* We don't have anymore signal available.  */
375
    return -1;
376
 
377
  return high ? current_rtmin++ : current_rtmax--;
378
#endif
379
}
380
 
381
/* The function we use to get the kernel revision.  */
382
extern int __sysctl (int *name, int nlen, void *oldval, size_t *oldlenp,
383
                     void *newval, size_t newlen);
384
 
385
/* Test whether the machine has more than one processor.  This is not the
386
   best test but good enough.  More complicated tests would require `malloc'
387
   which is not available at that time.  */
388
static int
389
is_smp_system (void)
390
{
391
  static const int sysctl_args[] = { CTL_KERN, KERN_VERSION };
392
  char buf[512];
393
  size_t reslen = sizeof (buf);
394
 
395
  /* Try reading the number using `sysctl' first.  */
396
  if (__sysctl ((int *) sysctl_args,
397
                sizeof (sysctl_args) / sizeof (sysctl_args[0]),
398
                buf, &reslen, NULL, 0) < 0)
399
    {
400
      /* This was not successful.  Now try reading the /proc filesystem.  */
401
      int fd = __open ("/proc/sys/kernel/version", O_RDONLY);
402
      if (__builtin_expect (fd, 0) == -1
403
          || (reslen = __read (fd, buf, sizeof (buf))) <= 0)
404
        /* This also didn't work.  We give up and say it's a UP machine.  */
405
        buf[0] = '\0';
406
 
407
      __close (fd);
408
    }
409
 
410
  return strstr (buf, "SMP") != NULL;
411
}
412
 
413
 
414
/* Initialize the pthread library.
415
   Initialization is split in two functions:
416
   - a constructor function that blocks the __pthread_sig_restart signal
417
     (must do this very early, since the program could capture the signal
418
      mask with e.g. sigsetjmp before creating the first thread);
419
   - a regular function called from pthread_create when needed. */
420
 
421
static void pthread_initialize(void) __attribute__((constructor));
422
 
423
#ifndef HAVE_Z_NODELETE
424
extern void *__dso_handle __attribute__ ((weak));
425
#endif
426
 
427
 
428
/* Do some minimal initialization which has to be done during the
429
   startup of the C library.  */
430
void
431
__pthread_initialize_minimal(void)
432
{
433
  /* If we have special thread_self processing, initialize that for the
434
     main thread now.  */
435
#ifdef INIT_THREAD_SELF
436
  INIT_THREAD_SELF(&__pthread_initial_thread, 0);
437
#endif
438
#if HP_TIMING_AVAIL
439
  __pthread_initial_thread.p_cpuclock_offset = _dl_cpuclock_offset;
440
#endif
441
}
442
 
443
 
444
void
445
__pthread_init_max_stacksize(void)
446
{
447
  struct rlimit limit;
448
  size_t max_stack;
449
 
450
  getrlimit(RLIMIT_STACK, &limit);
451
#ifdef FLOATING_STACKS
452
  if (limit.rlim_cur == RLIM_INFINITY)
453
    limit.rlim_cur = ARCH_STACK_MAX_SIZE;
454
# ifdef NEED_SEPARATE_REGISTER_STACK
455
  max_stack = limit.rlim_cur / 2;
456
# else
457
  max_stack = limit.rlim_cur;
458
# endif
459
#else
460
  /* Play with the stack size limit to make sure that no stack ever grows
461
     beyond STACK_SIZE minus one page (to act as a guard page). */
462
# ifdef NEED_SEPARATE_REGISTER_STACK
463
  /* STACK_SIZE bytes hold both the main stack and register backing
464
     store. The rlimit value applies to each individually.  */
465
  max_stack = STACK_SIZE/2 - __getpagesize ();
466
# else
467
  max_stack = STACK_SIZE - __getpagesize();
468
# endif
469
  if (limit.rlim_cur > max_stack) {
470
    limit.rlim_cur = max_stack;
471
    __libc_setrlimit(RLIMIT_STACK, &limit);
472
  }
473
#endif
474
  __pthread_max_stacksize = max_stack;
475
}
476
 
477
 
478
static void pthread_initialize(void)
479
{
480
  struct sigaction sa;
481
  sigset_t mask;
482
 
483
  /* If already done (e.g. by a constructor called earlier!), bail out */
484
  if (__pthread_initial_thread_bos != NULL) return;
485
#ifdef TEST_FOR_COMPARE_AND_SWAP
486
  /* Test if compare-and-swap is available */
487
  __pthread_has_cas = compare_and_swap_is_available();
488
#endif
489
#ifdef FLOATING_STACKS
490
  /* We don't need to know the bottom of the stack.  Give the pointer some
491
     value to signal that initialization happened.  */
492
  __pthread_initial_thread_bos = (void *) -1l;
493
#else
494
  /* Determine stack size limits .  */
495
  __pthread_init_max_stacksize ();
496
# ifdef _STACK_GROWS_UP
497
  /* The initial thread already has all the stack it needs */
498
  __pthread_initial_thread_bos = (char *)
499
    ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
500
# else
501
  /* For the initial stack, reserve at least STACK_SIZE bytes of stack
502
     below the current stack address, and align that on a
503
     STACK_SIZE boundary. */
504
  __pthread_initial_thread_bos =
505
    (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
506
# endif
507
#endif
508
  /* Update the descriptor for the initial thread. */
509
  __pthread_initial_thread.p_pid = __getpid();
510
  /* Likewise for the resolver state _res.  */
511
  __pthread_initial_thread.p_resp = &_res;
512
#ifdef __SIGRTMIN
513
  /* Initialize real-time signals. */
514
  init_rtsigs ();
515
#endif
516
  /* Setup signal handlers for the initial thread.
517
     Since signal handlers are shared between threads, these settings
518
     will be inherited by all other threads. */
519
  sa.sa_handler = pthread_handle_sigrestart;
520
  sigemptyset(&sa.sa_mask);
521
  sa.sa_flags = 0;
522
  __libc_sigaction(__pthread_sig_restart, &sa, NULL);
523
  sa.sa_handler = pthread_handle_sigcancel;
524
  // sa.sa_flags = 0;
525
  __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
526
  if (__pthread_sig_debug > 0) {
527
    sa.sa_handler = pthread_handle_sigdebug;
528
    sigemptyset(&sa.sa_mask);
529
    // sa.sa_flags = 0;
530
    __libc_sigaction(__pthread_sig_debug, &sa, NULL);
531
  }
532
  /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
533
  sigemptyset(&mask);
534
  sigaddset(&mask, __pthread_sig_restart);
535
  sigprocmask(SIG_BLOCK, &mask, NULL);
536
  /* Register an exit function to kill all other threads. */
537
  /* Do it early so that user-registered atexit functions are called
538
     before pthread_*exit_process. */
539
#ifndef HAVE_Z_NODELETE
540
  if (__builtin_expect (&__dso_handle != NULL, 1))
541
    __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
542
                  __dso_handle);
543
  else
544
#endif
545
    on_exit (pthread_onexit_process, NULL);
546
  /* How many processors.  */
547
  __pthread_smp_kernel = is_smp_system ();
548
}
549
 
550
void __pthread_initialize(void)
551
{
552
  pthread_initialize();
553
}
554
 
555
int __pthread_initialize_manager(void)
556
{
557
  int manager_pipe[2];
558
  int pid;
559
  struct pthread_request request;
560
 
561
#ifndef HAVE_Z_NODELETE
562
  if (__builtin_expect (&__dso_handle != NULL, 1))
563
    __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
564
                  __dso_handle);
565
#endif
566
 
567
  if (__pthread_max_stacksize == 0)
568
    __pthread_init_max_stacksize ();
569
  /* If basic initialization not done yet (e.g. we're called from a
570
     constructor run before our constructor), do it now */
571
  if (__pthread_initial_thread_bos == NULL) pthread_initialize();
572
  /* Setup stack for thread manager */
573
  __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
574
  if (__pthread_manager_thread_bos == NULL) return -1;
575
  __pthread_manager_thread_tos =
576
    __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
577
  /* Setup pipe to communicate with thread manager */
578
  if (__libc_pipe(manager_pipe) == -1) {
579
    free(__pthread_manager_thread_bos);
580
    return -1;
581
  }
582
  /* Start the thread manager */
583
  pid = 0;
584
  if (__builtin_expect (__pthread_initial_thread.p_report_events, 0))
585
    {
586
      /* It's a bit more complicated.  We have to report the creation of
587
         the manager thread.  */
588
      int idx = __td_eventword (TD_CREATE);
589
      uint32_t mask = __td_eventmask (TD_CREATE);
590
 
591
      if ((mask & (__pthread_threads_events.event_bits[idx]
592
                   | __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx]))
593
          != 0)
594
        {
595
          __pthread_lock(__pthread_manager_thread.p_lock, NULL);
596
 
597
#ifdef NEED_SEPARATE_REGISTER_STACK
598
          pid = __clone2(__pthread_manager_event,
599
                         (void **) __pthread_manager_thread_bos,
600
                         THREAD_MANAGER_STACK_SIZE,
601
                         CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
602
                         (void *)(long)manager_pipe[0]);
603
#elif _STACK_GROWS_UP
604
          pid = __clone(__pthread_manager_event,
605
                        (void **) __pthread_manager_thread_bos,
606
                        CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
607
                        (void *)(long)manager_pipe[0]);
608
#else
609
          pid = __clone(__pthread_manager_event,
610
                        (void **) __pthread_manager_thread_tos,
611
                        CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
612
                        (void *)(long)manager_pipe[0]);
613
#endif
614
 
615
          if (pid != -1)
616
            {
617
              /* Now fill in the information about the new thread in
618
                 the newly created thread's data structure.  We cannot let
619
                 the new thread do this since we don't know whether it was
620
                 already scheduled when we send the event.  */
621
              __pthread_manager_thread.p_eventbuf.eventdata =
622
                &__pthread_manager_thread;
623
              __pthread_manager_thread.p_eventbuf.eventnum = TD_CREATE;
624
              __pthread_last_event = &__pthread_manager_thread;
625
              __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
626
              __pthread_manager_thread.p_pid = pid;
627
 
628
              /* Now call the function which signals the event.  */
629
              __linuxthreads_create_event ();
630
            }
631
 
632
          /* Now restart the thread.  */
633
          __pthread_unlock(__pthread_manager_thread.p_lock);
634
        }
635
    }
636
 
637
  if (__builtin_expect (pid, 0) == 0)
638
    {
639
#ifdef NEED_SEPARATE_REGISTER_STACK
640
      pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
641
                     THREAD_MANAGER_STACK_SIZE,
642
                     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
643
                     (void *)(long)manager_pipe[0]);
644
#elif _STACK_GROWS_UP
645
      pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
646
                    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
647
                    (void *)(long)manager_pipe[0]);
648
#else
649
      pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
650
                    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
651
                    (void *)(long)manager_pipe[0]);
652
#endif
653
    }
654
  if (__builtin_expect (pid, 0) == -1) {
655
    free(__pthread_manager_thread_bos);
656
    __libc_close(manager_pipe[0]);
657
    __libc_close(manager_pipe[1]);
658
    return -1;
659
  }
660
  __pthread_manager_request = manager_pipe[1]; /* writing end */
661
  __pthread_manager_reader = manager_pipe[0]; /* reading end */
662
  __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
663
  __pthread_manager_thread.p_pid = pid;
664
  /* Make gdb aware of new thread manager */
665
  if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
666
    {
667
      raise(__pthread_sig_debug);
668
      /* We suspend ourself and gdb will wake us up when it is
669
         ready to handle us. */
670
      __pthread_wait_for_restart_signal(thread_self());
671
    }
672
  /* Synchronize debugging of the thread manager */
673
  request.req_kind = REQ_DEBUG;
674
  TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
675
                                  (char *) &request, sizeof(request)));
676
  return 0;
677
}
678
 
679
/* Thread creation */
680
 
681
int __pthread_create_2_1(pthread_t *thread, const pthread_attr_t *attr,
682
                         void * (*start_routine)(void *), void *arg)
683
{
684
  pthread_descr self = thread_self();
685
  struct pthread_request request;
686
  int retval;
687
  if (__builtin_expect (__pthread_manager_request, 0) < 0) {
688
    if (__pthread_initialize_manager() < 0) return EAGAIN;
689
  }
690
  request.req_thread = self;
691
  request.req_kind = REQ_CREATE;
692
  request.req_args.create.attr = attr;
693
  request.req_args.create.fn = start_routine;
694
  request.req_args.create.arg = arg;
695
  sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
696
              &request.req_args.create.mask);
697
  TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
698
                                  (char *) &request, sizeof(request)));
699
  suspend(self);
700
  retval = THREAD_GETMEM(self, p_retcode);
701
  if (__builtin_expect (retval, 0) == 0)
702
    *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
703
  return retval;
704
}
705
 
706
versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
707
 
708
#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
709
 
710
int __pthread_create_2_0(pthread_t *thread, const pthread_attr_t *attr,
711
                         void * (*start_routine)(void *), void *arg)
712
{
713
  /* The ATTR attribute is not really of type `pthread_attr_t *'.  It has
714
     the old size and access to the new members might crash the program.
715
     We convert the struct now.  */
716
  pthread_attr_t new_attr;
717
 
718
  if (attr != NULL)
719
    {
720
      size_t ps = __getpagesize ();
721
 
722
      memcpy (&new_attr, attr,
723
              (size_t) &(((pthread_attr_t*)NULL)->__guardsize));
724
      new_attr.__guardsize = ps;
725
      new_attr.__stackaddr_set = 0;
726
      new_attr.__stackaddr = NULL;
727
      new_attr.__stacksize = STACK_SIZE - ps;
728
      attr = &new_attr;
729
    }
730
  return __pthread_create_2_1 (thread, attr, start_routine, arg);
731
}
732
compat_symbol (libpthread, __pthread_create_2_0, pthread_create, GLIBC_2_0);
733
#endif
734
 
735
/* Simple operations on thread identifiers */
736
 
737
pthread_t pthread_self(void)
738
{
739
  pthread_descr self = thread_self();
740
  return THREAD_GETMEM(self, p_tid);
741
}
742
 
743
int pthread_equal(pthread_t thread1, pthread_t thread2)
744
{
745
  return thread1 == thread2;
746
}
747
 
748
/* Helper function for thread_self in the case of user-provided stacks */
749
 
750
#ifndef THREAD_SELF
751
 
752
pthread_descr __pthread_find_self(void)
753
{
754
  char * sp = CURRENT_STACK_FRAME;
755
  pthread_handle h;
756
 
757
  /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
758
     the manager threads handled specially in thread_self(), so start at 2 */
759
  h = __pthread_handles + 2;
760
  while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
761
  return h->h_descr;
762
}
763
 
764
#else
765
 
766
static pthread_descr thread_self_stack(void)
767
{
768
  char *sp = CURRENT_STACK_FRAME;
769
  pthread_handle h;
770
 
771
  if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
772
    return &__pthread_manager_thread;
773
  h = __pthread_handles + 2;
774
  while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
775
    h++;
776
  return h->h_descr;
777
}
778
 
779
#endif
780
 
781
/* Thread scheduling */
782
 
783
int pthread_setschedparam(pthread_t thread, int policy,
784
                          const struct sched_param *param)
785
{
786
  pthread_handle handle = thread_handle(thread);
787
  pthread_descr th;
788
 
789
  __pthread_lock(&handle->h_lock, NULL);
790
  if (__builtin_expect (invalid_handle(handle, thread), 0)) {
791
    __pthread_unlock(&handle->h_lock);
792
    return ESRCH;
793
  }
794
  th = handle->h_descr;
795
  if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
796
                        0)) {
797
    __pthread_unlock(&handle->h_lock);
798
    return errno;
799
  }
800
  th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
801
  __pthread_unlock(&handle->h_lock);
802
  if (__pthread_manager_request >= 0)
803
    __pthread_manager_adjust_prio(th->p_priority);
804
  return 0;
805
}
806
 
807
int pthread_getschedparam(pthread_t thread, int *policy,
808
                          struct sched_param *param)
809
{
810
  pthread_handle handle = thread_handle(thread);
811
  int pid, pol;
812
 
813
  __pthread_lock(&handle->h_lock, NULL);
814
  if (__builtin_expect (invalid_handle(handle, thread), 0)) {
815
    __pthread_unlock(&handle->h_lock);
816
    return ESRCH;
817
  }
818
  pid = handle->h_descr->p_pid;
819
  __pthread_unlock(&handle->h_lock);
820
  pol = __sched_getscheduler(pid);
821
  if (__builtin_expect (pol, 0) == -1) return errno;
822
  if (__sched_getparam(pid, param) == -1) return errno;
823
  *policy = pol;
824
  return 0;
825
}
826
 
827
int __pthread_yield (void)
828
{
829
  /* For now this is equivalent with the POSIX call.  */
830
  return sched_yield ();
831
}
832
weak_alias (__pthread_yield, pthread_yield)
833
 
834
/* Process-wide exit() request */
835
 
836
static void pthread_onexit_process(int retcode, void *arg)
837
{
838
  if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
839
    struct pthread_request request;
840
    pthread_descr self = thread_self();
841
 
842
    request.req_thread = self;
843
    request.req_kind = REQ_PROCESS_EXIT;
844
    request.req_args.exit.code = retcode;
845
    TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
846
                                    (char *) &request, sizeof(request)));
847
    suspend(self);
848
    /* Main thread should accumulate times for thread manager and its
849
       children, so that timings for main thread account for all threads. */
850
    if (self == __pthread_main_thread)
851
      {
852
        __waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
853
        /* Since all threads have been asynchronously terminated
854
           (possibly holding locks), free cannot be used any more.  */
855
        /*free (__pthread_manager_thread_bos);*/
856
        __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
857
      }
858
  }
859
}
860
 
861
#ifndef HAVE_Z_NODELETE
862
static int __pthread_atexit_retcode;
863
 
864
static void pthread_atexit_process(void *arg, int retcode)
865
{
866
  pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
867
}
868
 
869
static void pthread_atexit_retcode(void *arg, int retcode)
870
{
871
  __pthread_atexit_retcode = retcode;
872
}
873
#endif
874
 
875
/* The handler for the RESTART signal just records the signal received
876
   in the thread descriptor, and optionally performs a siglongjmp
877
   (for pthread_cond_timedwait). */
878
 
879
static void pthread_handle_sigrestart(int sig)
880
{
881
  pthread_descr self = thread_self();
882
  THREAD_SETMEM(self, p_signal, sig);
883
  if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
884
    siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
885
}
886
 
887
/* The handler for the CANCEL signal checks for cancellation
888
   (in asynchronous mode), for process-wide exit and exec requests.
889
   For the thread manager thread, redirect the signal to
890
   __pthread_manager_sighandler. */
891
 
892
static void pthread_handle_sigcancel(int sig)
893
{
894
  pthread_descr self = thread_self();
895
  sigjmp_buf * jmpbuf;
896
 
897
  if (self == &__pthread_manager_thread)
898
    {
899
#ifdef THREAD_SELF
900
      /* A new thread might get a cancel signal before it is fully
901
         initialized, so that the thread register might still point to the
902
         manager thread.  Double check that this is really the manager
903
         thread.  */
904
      pthread_descr real_self = thread_self_stack();
905
      if (real_self == &__pthread_manager_thread)
906
        {
907
          __pthread_manager_sighandler(sig);
908
          return;
909
        }
910
      /* Oops, thread_self() isn't working yet..  */
911
      self = real_self;
912
# ifdef INIT_THREAD_SELF
913
      INIT_THREAD_SELF(self, self->p_nr);
914
# endif
915
#else
916
      __pthread_manager_sighandler(sig);
917
      return;
918
#endif
919
    }
920
  if (__builtin_expect (__pthread_exit_requested, 0)) {
921
    /* Main thread should accumulate times for thread manager and its
922
       children, so that timings for main thread account for all threads. */
923
    if (self == __pthread_main_thread)
924
      __waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
925
    _exit(__pthread_exit_code);
926
  }
927
  if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
928
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
929
    if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
930
      __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
931
    jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
932
    if (jmpbuf != NULL) {
933
      THREAD_SETMEM(self, p_cancel_jmp, NULL);
934
      siglongjmp(*jmpbuf, 1);
935
    }
936
  }
937
}
938
 
939
/* Handler for the DEBUG signal.
940
   The debugging strategy is as follows:
941
   On reception of a REQ_DEBUG request (sent by new threads created to
942
   the thread manager under debugging mode), the thread manager throws
943
   __pthread_sig_debug to itself. The debugger (if active) intercepts
944
   this signal, takes into account new threads and continue execution
945
   of the thread manager by propagating the signal because it doesn't
946
   know what it is specifically done for. In the current implementation,
947
   the thread manager simply discards it. */
948
 
949
static void pthread_handle_sigdebug(int sig)
950
{
951
  /* Nothing */
952
}
953
 
954
/* Reset the state of the thread machinery after a fork().
955
   Close the pipe used for requests and set the main thread to the forked
956
   thread.
957
   Notice that we can't free the stack segments, as the forked thread
958
   may hold pointers into them. */
959
 
960
void __pthread_reset_main_thread(void)
961
{
962
  pthread_descr self = thread_self();
963
  struct rlimit limit;
964
 
965
  if (__pthread_manager_request != -1) {
966
    /* Free the thread manager stack */
967
    free(__pthread_manager_thread_bos);
968
    __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
969
    /* Close the two ends of the pipe */
970
    __libc_close(__pthread_manager_request);
971
    __libc_close(__pthread_manager_reader);
972
    __pthread_manager_request = __pthread_manager_reader = -1;
973
  }
974
 
975
  /* Update the pid of the main thread */
976
  THREAD_SETMEM(self, p_pid, __getpid());
977
  /* Make the forked thread the main thread */
978
  __pthread_main_thread = self;
979
  THREAD_SETMEM(self, p_nextlive, self);
980
  THREAD_SETMEM(self, p_prevlive, self);
981
  /* Now this thread modifies the global variables.  */
982
  THREAD_SETMEM(self, p_resp, &_res);
983
 
984
  if (getrlimit (RLIMIT_STACK, &limit) == 0
985
      && limit.rlim_cur != limit.rlim_max) {
986
    limit.rlim_cur = limit.rlim_max;
987
    __libc_setrlimit(RLIMIT_STACK, &limit);
988
  }
989
}
990
 
991
/* Process-wide exec() request */
992
 
993
#if !defined(_ELIX_LEVEL) || _ELIX_LEVEL >= 2
994
 
995
void __pthread_kill_other_threads_np(void)
996
{
997
  struct sigaction sa;
998
  /* Terminate all other threads and thread manager */
999
  pthread_onexit_process(0, NULL);
1000
  /* Make current thread the main thread in case the calling thread
1001
     changes its mind, does not exec(), and creates new threads instead. */
1002
  __pthread_reset_main_thread();
1003
 
1004
  /* Reset the signal handlers behaviour for the signals the
1005
     implementation uses since this would be passed to the new
1006
     process.  */
1007
  sigemptyset(&sa.sa_mask);
1008
  sa.sa_flags = 0;
1009
  sa.sa_handler = SIG_DFL;
1010
  __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1011
  __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
1012
  if (__pthread_sig_debug > 0)
1013
    __libc_sigaction(__pthread_sig_debug, &sa, NULL);
1014
}
1015
weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1016
 
1017
#endif /* !_ELIX_LEVEL || _ELIX_LEVEL >= 2 */
1018
 
1019
/* Concurrency symbol level.  */
1020
static int current_level;
1021
 
1022
int __pthread_setconcurrency(int level)
1023
{
1024
  /* We don't do anything unless we have found a useful interpretation.  */
1025
  current_level = level;
1026
  return 0;
1027
}
1028
weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1029
 
1030
int __pthread_getconcurrency(void)
1031
{
1032
  return current_level;
1033
}
1034
weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1035
 
1036
/* Primitives for controlling thread execution */
1037
 
1038
void __pthread_wait_for_restart_signal(pthread_descr self)
1039
{
1040
  sigset_t mask;
1041
 
1042
  sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1043
  sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
1044
  THREAD_SETMEM(self, p_signal, 0);
1045
  do {
1046
    sigsuspend(&mask);                   /* Wait for signal */
1047
  } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
1048
 
1049
  READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1050
}
1051
 
1052
#if !__ASSUME_REALTIME_SIGNALS
1053
/* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1054
   signals.
1055
   On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1056
   Since the restart signal does not queue, we use an atomic counter to create
1057
   queuing semantics. This is needed to resolve a rare race condition in
1058
   pthread_cond_timedwait_relative. */
1059
 
1060
void __pthread_restart_old(pthread_descr th)
1061
{
1062
  if (atomic_increment(&th->p_resume_count) == -1)
1063
    kill(th->p_pid, __pthread_sig_restart);
1064
}
1065
 
1066
void __pthread_suspend_old(pthread_descr self)
1067
{
1068
  if (atomic_decrement(&self->p_resume_count) <= 0)
1069
    __pthread_wait_for_restart_signal(self);
1070
}
1071
 
1072
int
1073
__pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1074
{
1075
  sigset_t unblock, initial_mask;
1076
  int was_signalled = 0;
1077
  sigjmp_buf jmpbuf;
1078
 
1079
  if (atomic_decrement(&self->p_resume_count) == 0) {
1080
    /* Set up a longjmp handler for the restart signal, unblock
1081
       the signal and sleep. */
1082
 
1083
    if (sigsetjmp(jmpbuf, 1) == 0) {
1084
      THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1085
      THREAD_SETMEM(self, p_signal, 0);
1086
      /* Unblock the restart signal */
1087
      sigemptyset(&unblock);
1088
      sigaddset(&unblock, __pthread_sig_restart);
1089
      sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1090
 
1091
      while (1) {
1092
        struct timeval now;
1093
        struct timespec reltime;
1094
 
1095
        /* Compute a time offset relative to now.  */
1096
        __gettimeofday (&now, NULL);
1097
        reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1098
        reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1099
        if (reltime.tv_nsec < 0) {
1100
          reltime.tv_nsec += 1000000000;
1101
          reltime.tv_sec -= 1;
1102
        }
1103
 
1104
        /* Sleep for the required duration. If woken by a signal,
1105
           resume waiting as required by Single Unix Specification.  */
1106
        if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1107
          break;
1108
      }
1109
 
1110
      /* Block the restart signal again */
1111
      sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1112
      was_signalled = 0;
1113
    } else {
1114
      was_signalled = 1;
1115
    }
1116
    THREAD_SETMEM(self, p_signal_jmp, NULL);
1117
  }
1118
 
1119
  /* Now was_signalled is true if we exited the above code
1120
     due to the delivery of a restart signal.  In that case,
1121
     we know we have been dequeued and resumed and that the
1122
     resume count is balanced.  Otherwise, there are some
1123
     cases to consider. First, try to bump up the resume count
1124
     back to zero. If it goes to 1, it means restart() was
1125
     invoked on this thread. The signal must be consumed
1126
     and the count bumped down and everything is cool. We
1127
     can return a 1 to the caller.
1128
     Otherwise, no restart was delivered yet, so a potential
1129
     race exists; we return a 0 to the caller which must deal
1130
     with this race in an appropriate way; for example by
1131
     atomically removing the thread from consideration for a
1132
     wakeup---if such a thing fails, it means a restart is
1133
     being delivered. */
1134
 
1135
  if (!was_signalled) {
1136
    if (atomic_increment(&self->p_resume_count) != -1) {
1137
      __pthread_wait_for_restart_signal(self);
1138
      atomic_decrement(&self->p_resume_count); /* should be zero now! */
1139
      /* woke spontaneously and consumed restart signal */
1140
      return 1;
1141
    }
1142
    /* woke spontaneously but did not consume restart---caller must resolve */
1143
    return 0;
1144
  }
1145
  /* woken due to restart signal */
1146
  return 1;
1147
}
1148
#endif /* __ASSUME_REALTIME_SIGNALS */
1149
 
1150
void __pthread_restart_new(pthread_descr th)
1151
{
1152
  /* The barrier is proabably not needed, in which case it still documents
1153
     our assumptions. The intent is to commit previous writes to shared
1154
     memory so the woken thread will have a consistent view.  Complementary
1155
     read barriers are present to the suspend functions. */
1156
  WRITE_MEMORY_BARRIER();
1157
  kill(th->p_pid, __pthread_sig_restart);
1158
}
1159
 
1160
/* There is no __pthread_suspend_new because it would just
1161
   be a wasteful wrapper for __pthread_wait_for_restart_signal */
1162
 
1163
int
1164
__pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1165
{
1166
  sigset_t unblock, initial_mask;
1167
  int was_signalled = 0;
1168
  sigjmp_buf jmpbuf;
1169
 
1170
  if (sigsetjmp(jmpbuf, 1) == 0) {
1171
    THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1172
    THREAD_SETMEM(self, p_signal, 0);
1173
    /* Unblock the restart signal */
1174
    sigemptyset(&unblock);
1175
    sigaddset(&unblock, __pthread_sig_restart);
1176
    sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1177
 
1178
    while (1) {
1179
      struct timeval now;
1180
      struct timespec reltime;
1181
 
1182
      /* Compute a time offset relative to now.  */
1183
      __gettimeofday (&now, NULL);
1184
      reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1185
      reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1186
      if (reltime.tv_nsec < 0) {
1187
        reltime.tv_nsec += 1000000000;
1188
        reltime.tv_sec -= 1;
1189
      }
1190
 
1191
      /* Sleep for the required duration. If woken by a signal,
1192
         resume waiting as required by Single Unix Specification.  */
1193
      if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1194
        break;
1195
    }
1196
 
1197
    /* Block the restart signal again */
1198
    sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1199
    was_signalled = 0;
1200
  } else {
1201
    was_signalled = 1;
1202
  }
1203
  THREAD_SETMEM(self, p_signal_jmp, NULL);
1204
 
1205
  /* Now was_signalled is true if we exited the above code
1206
     due to the delivery of a restart signal.  In that case,
1207
     everything is cool. We have been removed from whatever
1208
     we were waiting on by the other thread, and consumed its signal.
1209
 
1210
     Otherwise we this thread woke up spontaneously, or due to a signal other
1211
     than restart. This is an ambiguous case  that must be resolved by
1212
     the caller; the thread is still eligible for a restart wakeup
1213
     so there is a race. */
1214
 
1215
  READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1216
  return was_signalled;
1217
}
1218
 
1219
 
1220
/* Debugging aid */
1221
 
1222
#ifdef DEBUG
1223
#include <stdarg.h>
1224
 
1225
void __pthread_message(char * fmt, ...)
1226
{
1227
  char buffer[1024];
1228
  va_list args;
1229
  sprintf(buffer, "%05d : ", __getpid());
1230
  va_start(args, fmt);
1231
  vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1232
  va_end(args);
1233
  TEMP_FAILURE_RETRY(__libc_write(2, buffer, strlen(buffer)));
1234
}
1235
 
1236
#endif
1237
 
1238
 
1239
#ifndef SHARED
1240
/* We need a hook to force the cancelation wrappers and file locking
1241
   to be linked in when static libpthread is used.  */
1242
extern const int __pthread_provide_wrappers;
1243
static const int *const __pthread_require_wrappers =
1244
  &__pthread_provide_wrappers;
1245
extern const int __pthread_provide_lockfile;
1246
static const int *const __pthread_require_lockfile =
1247
  &__pthread_provide_lockfile;
1248
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.