OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [tags/] [gnu-src/] [newlib-1.18.0/] [newlib-1.18.0-or32-1.0rc1/] [newlib/] [libc/] [sys/] [linux/] [linuxthreads/] [manager.c] - Blame information for rev 345

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 207 jeremybenn
/* Linuxthreads - a simple clone()-based implementation of Posix        */
2
/* threads for Linux.                                                   */
3
/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr)              */
4
/*                                                                      */
5
/* This program is free software; you can redistribute it and/or        */
6
/* modify it under the terms of the GNU Library General Public License  */
7
/* as published by the Free Software Foundation; either version 2       */
8
/* of the License, or (at your option) any later version.               */
9
/*                                                                      */
10
/* This program is distributed in the hope that it will be useful,      */
11
/* but WITHOUT ANY WARRANTY; without even the implied warranty of       */
12
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the        */
13
/* GNU Library General Public License for more details.                 */
14
 
15
/* The "thread manager" thread: manages creation and termination of threads */
16
 
17
#include <errno.h>
18
#define __USE_MISC
19
#include <sched.h>
20
#include <stddef.h>
21
#include <stdio.h>
22
#include <stdlib.h>
23
#include <string.h>
24
#include <unistd.h>
25
#include <sys/poll.h>           /* for poll */
26
#include <sys/mman.h>           /* for mmap */
27
#include <sys/param.h>
28
#include <sys/time.h>
29
#include <sys/wait.h>           /* for waitpid macros */
30
 
31
#include "pthread.h"
32
#include "internals.h"
33
#include "spinlock.h"
34
#include "restart.h"
35
#include "semaphore.h"
36
 
37
/* Array of active threads. Entry 0 is reserved for the initial thread. */
38
struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX] =
39
{ { __LOCK_INITIALIZER, &__pthread_initial_thread, 0},
40
  { __LOCK_INITIALIZER, &__pthread_manager_thread, 0}, /* All NULLs */ };
41
 
42
/* For debugging purposes put the maximum number of threads in a variable.  */
43
const int __linuxthreads_pthread_threads_max = PTHREAD_THREADS_MAX;
44
 
45
#ifndef THREAD_SELF
46
/* Indicate whether at least one thread has a user-defined stack (if 1),
47
   or if all threads have stacks supplied by LinuxThreads (if 0). */
48
int __pthread_nonstandard_stacks;
49
#endif
50
 
51
/* Number of active entries in __pthread_handles (used by gdb) */
52
volatile int __pthread_handles_num = 2;
53
 
54
/* Whether to use debugger additional actions for thread creation
55
   (set to 1 by gdb) */
56
volatile int __pthread_threads_debug;
57
 
58
/* Globally enabled events.  */
59
volatile td_thr_events_t __pthread_threads_events;
60
 
61
/* Pointer to thread descriptor with last event.  */
62
volatile pthread_descr __pthread_last_event;
63
 
64
/* Mapping from stack segment to thread descriptor. */
65
/* Stack segment numbers are also indices into the __pthread_handles array. */
66
/* Stack segment number 0 is reserved for the initial thread. */
67
 
68
#if FLOATING_STACKS
69
# define thread_segment(seq) NULL
70
#else
71
static inline pthread_descr thread_segment(int seg)
72
{
73
  return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
74
         - 1;
75
}
76
#endif
77
 
78
/* Flag set in signal handler to record child termination */
79
 
80
static volatile int terminated_children;
81
 
82
/* Flag set when the initial thread is blocked on pthread_exit waiting
83
   for all other threads to terminate */
84
 
85
static int main_thread_exiting;
86
 
87
/* Counter used to generate unique thread identifier.
88
   Thread identifier is pthread_threads_counter + segment. */
89
 
90
static pthread_t pthread_threads_counter;
91
 
92
/* Forward declarations */
93
 
94
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
95
                                 void * (*start_routine)(void *), void *arg,
96
                                 sigset_t *mask, int father_pid,
97
                                 int report_events,
98
                                 td_thr_events_t *event_maskp);
99
static void pthread_handle_free(pthread_t th_id);
100
static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
101
     __attribute__ ((noreturn));
102
static void pthread_reap_children(void);
103
static void pthread_kill_all_threads(int sig, int main_thread_also);
104
static void pthread_for_each_thread(void *arg,
105
    void (*fn)(void *, pthread_descr));
106
 
107
/* The server thread managing requests for thread creation and termination */
108
 
109
int
110
__attribute__ ((noreturn))
111
__pthread_manager(void *arg)
112
{
113
  int reqfd = (int) (long int) arg;
114
  struct pollfd ufd;
115
  sigset_t manager_mask;
116
  int n;
117
  struct pthread_request request;
118
 
119
  /* If we have special thread_self processing, initialize it.  */
120
#ifdef INIT_THREAD_SELF
121
  INIT_THREAD_SELF(&__pthread_manager_thread, 1);
122
#endif
123
  /* Set the error variable.  */
124
  __pthread_manager_thread.p_reentp = &__pthread_manager_thread.p_reent;
125
  __pthread_manager_thread.p_h_errnop = &__pthread_manager_thread.p_h_errno;
126
  /* Block all signals except __pthread_sig_cancel and SIGTRAP */
127
  sigfillset(&manager_mask);
128
  sigdelset(&manager_mask, __pthread_sig_cancel); /* for thread termination */
129
  sigdelset(&manager_mask, SIGTRAP);            /* for debugging purposes */
130
  if (__pthread_threads_debug && __pthread_sig_debug > 0)
131
    sigdelset(&manager_mask, __pthread_sig_debug);
132
  sigprocmask(SIG_SETMASK, &manager_mask, NULL);
133
  /* Raise our priority to match that of main thread */
134
  __pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
135
  /* Synchronize debugging of the thread manager */
136
  n = TEMP_FAILURE_RETRY(__libc_read(reqfd, (char *)&request,
137
                                     sizeof(request)));
138
  ASSERT(n == sizeof(request) && request.req_kind == REQ_DEBUG);
139
  ufd.fd = reqfd;
140
  ufd.events = POLLIN;
141
  /* Enter server loop */
142
  while(1) {
143
    n = __poll(&ufd, 1, 2000);
144
 
145
    /* Check for termination of the main thread */
146
    if (getppid() == 1) {
147
      pthread_kill_all_threads(SIGKILL, 0);
148
      _exit(0);
149
    }
150
    /* Check for dead children */
151
    if (terminated_children) {
152
      terminated_children = 0;
153
      pthread_reap_children();
154
    }
155
    /* Read and execute request */
156
    if (n == 1 && (ufd.revents & POLLIN)) {
157
      n = TEMP_FAILURE_RETRY(__libc_read(reqfd, (char *)&request,
158
                                         sizeof(request)));
159
#ifdef DEBUG
160
      if (n < 0) {
161
        char d[64];
162
        write(STDERR_FILENO, d, snprintf(d, sizeof(d), "*** read err %m\n"));
163
      } else if (n != sizeof(request)) {
164
        write(STDERR_FILENO, "*** short read in manager\n", 26);
165
      }
166
#endif
167
 
168
      switch(request.req_kind) {
169
      case REQ_CREATE:
170
        request.req_thread->p_retcode =
171
          pthread_handle_create((pthread_t *) &request.req_thread->p_retval,
172
                                request.req_args.create.attr,
173
                                request.req_args.create.fn,
174
                                request.req_args.create.arg,
175
                                &request.req_args.create.mask,
176
                                request.req_thread->p_pid,
177
                                request.req_thread->p_report_events,
178
                                &request.req_thread->p_eventbuf.eventmask);
179
        restart(request.req_thread);
180
        break;
181
      case REQ_FREE:
182
        pthread_handle_free(request.req_args.free.thread_id);
183
        break;
184
      case REQ_PROCESS_EXIT:
185
        pthread_handle_exit(request.req_thread,
186
                            request.req_args.exit.code);
187
        /* NOTREACHED */
188
        break;
189
      case REQ_MAIN_THREAD_EXIT:
190
        main_thread_exiting = 1;
191
        /* Reap children in case all other threads died and the signal handler
192
           went off before we set main_thread_exiting to 1, and therefore did
193
           not do REQ_KICK. */
194
        pthread_reap_children();
195
 
196
        if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
197
          restart(__pthread_main_thread);
198
          /* The main thread will now call exit() which will trigger an
199
             __on_exit handler, which in turn will send REQ_PROCESS_EXIT
200
             to the thread manager. In case you are wondering how the
201
             manager terminates from its loop here. */
202
        }
203
        break;
204
      case REQ_POST:
205
        __new_sem_post(request.req_args.post);
206
        break;
207
      case REQ_DEBUG:
208
        /* Make gdb aware of new thread and gdb will restart the
209
           new thread when it is ready to handle the new thread. */
210
        if (__pthread_threads_debug && __pthread_sig_debug > 0)
211
          raise(__pthread_sig_debug);
212
        break;
213
      case REQ_KICK:
214
        /* This is just a prod to get the manager to reap some
215
           threads right away, avoiding a potential delay at shutdown. */
216
        break;
217
      case REQ_FOR_EACH_THREAD:
218
        pthread_for_each_thread(request.req_args.for_each.arg,
219
                                request.req_args.for_each.fn);
220
        restart(request.req_thread);
221
        break;
222
      }
223
    }
224
  }
225
}
226
 
227
int __pthread_manager_event(void *arg)
228
{
229
  /* If we have special thread_self processing, initialize it.  */
230
#ifdef INIT_THREAD_SELF
231
  INIT_THREAD_SELF(&__pthread_manager_thread, 1);
232
#endif
233
 
234
  /* Get the lock the manager will free once all is correctly set up.  */
235
  __pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL);
236
  /* Free it immediately.  */
237
  __pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
238
 
239
  return __pthread_manager(arg);
240
}
241
 
242
/* Process creation */
243
 
244
static int
245
__attribute__ ((noreturn))
246
pthread_start_thread(void *arg)
247
{
248
  pthread_descr self = (pthread_descr) arg;
249
  struct pthread_request request;
250
  void * outcome;
251
#if HP_TIMING_AVAIL
252
  hp_timing_t tmpclock;
253
#endif
254
  /* Initialize special thread_self processing, if any.  */
255
#ifdef INIT_THREAD_SELF
256
  INIT_THREAD_SELF(self, self->p_nr);
257
#endif
258
#if HP_TIMING_AVAIL
259
  HP_TIMING_NOW (tmpclock);
260
  THREAD_SETMEM (self, p_cpuclock_offset, tmpclock);
261
#endif
262
  /* Make sure our pid field is initialized, just in case we get there
263
     before our father has initialized it. */
264
  THREAD_SETMEM(self, p_pid, __getpid());
265
  /* Initial signal mask is that of the creating thread. (Otherwise,
266
     we'd just inherit the mask of the thread manager.) */
267
  sigprocmask(SIG_SETMASK, &self->p_start_args.mask, NULL);
268
  /* Set the scheduling policy and priority for the new thread, if needed */
269
  if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
270
    /* Explicit scheduling attributes were provided: apply them */
271
    __sched_setscheduler(THREAD_GETMEM(self, p_pid),
272
                         THREAD_GETMEM(self, p_start_args.schedpolicy),
273
                         &self->p_start_args.schedparam);
274
  else if (__pthread_manager_thread.p_priority > 0)
275
    /* Default scheduling required, but thread manager runs in realtime
276
       scheduling: switch new thread to SCHED_OTHER policy */
277
    {
278
      struct sched_param default_params;
279
      default_params.sched_priority = 0;
280
      __sched_setscheduler(THREAD_GETMEM(self, p_pid),
281
                           SCHED_OTHER, &default_params);
282
    }
283
  /* Make gdb aware of new thread */
284
  if (__pthread_threads_debug && __pthread_sig_debug > 0) {
285
    request.req_thread = self;
286
    request.req_kind = REQ_DEBUG;
287
    TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
288
                                    (char *) &request, sizeof(request)));
289
    suspend(self);
290
  }
291
  /* Run the thread code */
292
  outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
293
                                                           p_start_args.arg));
294
  /* Exit with the given return value */
295
  __pthread_do_exit(outcome, CURRENT_STACK_FRAME);
296
}
297
 
298
static int
299
__attribute__ ((noreturn))
300
pthread_start_thread_event(void *arg)
301
{
302
  pthread_descr self = (pthread_descr) arg;
303
 
304
#ifdef INIT_THREAD_SELF
305
  INIT_THREAD_SELF(self, self->p_nr);
306
#endif
307
  /* Make sure our pid field is initialized, just in case we get there
308
     before our father has initialized it. */
309
  THREAD_SETMEM(self, p_pid, __getpid());
310
  /* Get the lock the manager will free once all is correctly set up.  */
311
  __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
312
  /* Free it immediately.  */
313
  __pthread_unlock (THREAD_GETMEM(self, p_lock));
314
 
315
  /* Continue with the real function.  */
316
  pthread_start_thread (arg);
317
}
318
 
319
static int pthread_allocate_stack(const pthread_attr_t *attr,
320
                                  pthread_descr default_new_thread,
321
                                  int pagesize,
322
                                  pthread_descr * out_new_thread,
323
                                  char ** out_new_thread_bottom,
324
                                  char ** out_guardaddr,
325
                                  size_t * out_guardsize)
326
{
327
  pthread_descr new_thread;
328
  char * new_thread_bottom;
329
  char * guardaddr;
330
  size_t stacksize, guardsize;
331
 
332
  if (attr != NULL && attr->__stackaddr_set)
333
    {
334
#ifdef _STACK_GROWS_UP
335
      /* The user provided a stack. */
336
      new_thread = (pthread_descr) attr->__stackaddr;
337
      new_thread_bottom = (char *) (new_thread + 1);
338
      guardaddr = attr->__stackaddr + attr->__stacksize;
339
      guardsize = 0;
340
#else
341
      /* The user provided a stack.  For now we interpret the supplied
342
         address as 1 + the highest addr. in the stack segment.  If a
343
         separate register stack is needed, we place it at the low end
344
         of the segment, relying on the associated stacksize to
345
         determine the low end of the segment.  This differs from many
346
         (but not all) other pthreads implementations.  The intent is
347
         that on machines with a single stack growing toward higher
348
         addresses, stackaddr would be the lowest address in the stack
349
         segment, so that it is consistently close to the initial sp
350
         value. */
351
      new_thread =
352
        (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1;
353
      new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize;
354
      guardaddr = new_thread_bottom;
355
      guardsize = 0;
356
#endif
357
#ifndef THREAD_SELF
358
      __pthread_nonstandard_stacks = 1;
359
#endif
360
      /* Clear the thread data structure.  */
361
      memset (new_thread, '\0', sizeof (*new_thread));
362
    }
363
  else
364
    {
365
#ifdef NEED_SEPARATE_REGISTER_STACK
366
      size_t granularity = 2 * pagesize;
367
      /* Try to make stacksize/2 a multiple of pagesize */
368
#else
369
      size_t granularity = pagesize;
370
#endif
371
      void *map_addr;
372
 
373
      /* Allocate space for stack and thread descriptor at default address */
374
#if FLOATING_STACKS
375
      if (attr != NULL)
376
        {
377
          guardsize = page_roundup (attr->__guardsize, granularity);
378
          stacksize = __pthread_max_stacksize - guardsize;
379
          stacksize = MIN (stacksize,
380
                           page_roundup (attr->__stacksize, granularity));
381
        }
382
      else
383
        {
384
          guardsize = granularity;
385
          stacksize = __pthread_max_stacksize - guardsize;
386
        }
387
 
388
      map_addr = mmap(NULL, stacksize + guardsize,
389
                      PROT_READ | PROT_WRITE | PROT_EXEC,
390
                      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
391
      if (map_addr == MAP_FAILED)
392
        /* No more memory available.  */
393
        return -1;
394
 
395
# ifdef NEED_SEPARATE_REGISTER_STACK
396
      guardaddr = map_addr + stacksize / 2;
397
      if (guardsize > 0)
398
        mprotect (guardaddr, guardsize, PROT_NONE);
399
 
400
      new_thread_bottom = (char *) map_addr;
401
      new_thread = ((pthread_descr) (new_thread_bottom + stacksize
402
                                     + guardsize)) - 1;
403
# elif _STACK_GROWS_DOWN
404
      guardaddr = map_addr;
405
      if (guardsize > 0)
406
        mprotect (guardaddr, guardsize, PROT_NONE);
407
 
408
      new_thread_bottom = (char *) map_addr + guardsize;
409
      new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
410
# elif _STACK_GROWS_UP
411
      guardaddr = map_addr + stacksize;
412
      if (guardsize > 0)
413
        mprotect (guardaddr, guardsize, PROT_NONE);
414
 
415
      new_thread = (pthread_descr) map_addr;
416
      new_thread_bottom = (char *) (new_thread + 1);
417
# else
418
#  error You must define a stack direction
419
# endif /* Stack direction */
420
#else /* !FLOATING_STACKS */
421
      void *res_addr;
422
 
423
      if (attr != NULL)
424
        {
425
          guardsize = page_roundup (attr->__guardsize, granularity);
426
          stacksize = STACK_SIZE - guardsize;
427
          stacksize = MIN (stacksize,
428
                           page_roundup (attr->__stacksize, granularity));
429
        }
430
      else
431
        {
432
          guardsize = granularity;
433
          stacksize = STACK_SIZE - granularity;
434
        }
435
 
436
# ifdef NEED_SEPARATE_REGISTER_STACK
437
      new_thread = default_new_thread;
438
      new_thread_bottom = (char *) (new_thread + 1) - stacksize - guardsize;
439
      /* Includes guard area, unlike the normal case.  Use the bottom
440
       end of the segment as backing store for the register stack.
441
       Needed on IA64.  In this case, we also map the entire stack at
442
       once.  According to David Mosberger, that's cheaper.  It also
443
       avoids the risk of intermittent failures due to other mappings
444
       in the same region.  The cost is that we might be able to map
445
       slightly fewer stacks.  */
446
 
447
      /* First the main stack: */
448
      map_addr = (caddr_t)((char *)(new_thread + 1) - stacksize / 2);
449
      res_addr = mmap(map_addr, stacksize / 2,
450
                      PROT_READ | PROT_WRITE | PROT_EXEC,
451
                      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
452
      if (res_addr != map_addr)
453
        {
454
          /* Bad luck, this segment is already mapped. */
455
          if (res_addr != MAP_FAILED)
456
            munmap(res_addr, stacksize / 2);
457
          return -1;
458
        }
459
      /* Then the register stack:       */
460
      map_addr = (caddr_t)new_thread_bottom;
461
      res_addr = mmap(map_addr, stacksize/2,
462
                      PROT_READ | PROT_WRITE | PROT_EXEC,
463
                      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
464
      if (res_addr != map_addr)
465
        {
466
          if (res_addr != MAP_FAILED)
467
            munmap(res_addr, stacksize / 2);
468
          munmap((caddr_t)((char *)(new_thread + 1) - stacksize/2),
469
                 stacksize/2);
470
          return -1;
471
        }
472
 
473
      guardaddr = new_thread_bottom + stacksize/2;
474
      /* We leave the guard area in the middle unmapped.        */
475
# else  /* !NEED_SEPARATE_REGISTER_STACK */
476
#  ifdef _STACK_GROWS_DOWN
477
      new_thread = default_new_thread;
478
      new_thread_bottom = (char *) (new_thread + 1) - stacksize;
479
      map_addr = new_thread_bottom - guardsize;
480
      res_addr = mmap(map_addr, stacksize + guardsize,
481
                      PROT_READ | PROT_WRITE | PROT_EXEC,
482
                      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
483
      if (res_addr != map_addr)
484
        {
485
          /* Bad luck, this segment is already mapped. */
486
          if (res_addr != MAP_FAILED)
487
            munmap (res_addr, stacksize + guardsize);
488
          return -1;
489
        }
490
 
491
      /* We manage to get a stack.  Protect the guard area pages if
492
         necessary.  */
493
      guardaddr = map_addr;
494
      if (guardsize > 0)
495
        mprotect (guardaddr, guardsize, PROT_NONE);
496
#  else
497
      /* The thread description goes at the bottom of this area, and
498
       * the stack starts directly above it.
499
       */
500
      new_thread = (pthread_descr)((unsigned long)default_new_thread &~ (STACK_SIZE - 1));
501
      map_addr = mmap(new_thread, stacksize + guardsize,
502
                      PROT_READ | PROT_WRITE | PROT_EXEC,
503
                      MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
504
      if (map_addr == MAP_FAILED)
505
          return -1;
506
 
507
      new_thread_bottom = map_addr + sizeof(*new_thread);
508
      guardaddr = map_addr + stacksize;
509
      if (guardsize > 0)
510
          mprotect (guardaddr, guardsize, PROT_NONE);
511
 
512
#  endif /* stack direction */
513
# endif  /* !NEED_SEPARATE_REGISTER_STACK */
514
#endif   /* !FLOATING_STACKS */
515
    }
516
  *out_new_thread = new_thread;
517
  *out_new_thread_bottom = new_thread_bottom;
518
  *out_guardaddr = guardaddr;
519
  *out_guardsize = guardsize;
520
  return 0;
521
}
522
 
523
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
524
                                 void * (*start_routine)(void *), void *arg,
525
                                 sigset_t * mask, int father_pid,
526
                                 int report_events,
527
                                 td_thr_events_t *event_maskp)
528
{
529
  size_t sseg;
530
  int pid;
531
  pthread_descr new_thread;
532
  char * new_thread_bottom;
533
  pthread_t new_thread_id;
534
  char *guardaddr = NULL;
535
  size_t guardsize = 0;
536
  int pagesize = __getpagesize();
537
 
538
  /* First check whether we have to change the policy and if yes, whether
539
     we can  do this.  Normally this should be done by examining the
540
     return value of the __sched_setscheduler call in pthread_start_thread
541
     but this is hard to implement.  FIXME  */
542
  if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
543
    return EPERM;
544
  /* Find a free segment for the thread, and allocate a stack if needed */
545
  for (sseg = 2; ; sseg++)
546
    {
547
      if (sseg >= PTHREAD_THREADS_MAX)
548
        return EAGAIN;
549
      if (__pthread_handles[sseg].h_descr != NULL)
550
        continue;
551
      if (pthread_allocate_stack(attr, thread_segment(sseg),
552
                                 pagesize,
553
                                 &new_thread, &new_thread_bottom,
554
                                 &guardaddr, &guardsize) == 0)
555
        break;
556
    }
557
  __pthread_handles_num++;
558
  /* Allocate new thread identifier */
559
  pthread_threads_counter += PTHREAD_THREADS_MAX;
560
  new_thread_id = sseg + pthread_threads_counter;
561
  /* Initialize the thread descriptor.  Elements which have to be
562
     initialized to zero already have this value.  */
563
  new_thread->p_tid = new_thread_id;
564
  new_thread->p_lock = &(__pthread_handles[sseg].h_lock);
565
  new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
566
  new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
567
  new_thread->p_reentp = &new_thread->p_reent;
568
  _REENT_INIT_PTR(new_thread->p_reentp);
569
  new_thread->p_h_errnop = &new_thread->p_h_errno;
570
  new_thread->p_resp = &new_thread->p_res;
571
  new_thread->p_guardaddr = guardaddr;
572
  new_thread->p_guardsize = guardsize;
573
  new_thread->p_header.data.self = new_thread;
574
  new_thread->p_nr = sseg;
575
  new_thread->p_inheritsched = attr ? attr->__inheritsched : 0;
576
  /* Initialize the thread handle */
577
  __pthread_init_lock(&__pthread_handles[sseg].h_lock);
578
  __pthread_handles[sseg].h_descr = new_thread;
579
  __pthread_handles[sseg].h_bottom = new_thread_bottom;
580
  /* Determine scheduling parameters for the thread */
581
  new_thread->p_start_args.schedpolicy = -1;
582
  if (attr != NULL) {
583
    new_thread->p_detached = attr->__detachstate;
584
    new_thread->p_userstack = attr->__stackaddr_set;
585
 
586
    switch(attr->__inheritsched) {
587
    case PTHREAD_EXPLICIT_SCHED:
588
      new_thread->p_start_args.schedpolicy = attr->__schedpolicy;
589
      memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam,
590
              sizeof (struct sched_param));
591
      break;
592
    case PTHREAD_INHERIT_SCHED:
593
      new_thread->p_start_args.schedpolicy = __sched_getscheduler(father_pid);
594
      __sched_getparam(father_pid, &new_thread->p_start_args.schedparam);
595
      break;
596
    }
597
    new_thread->p_priority =
598
      new_thread->p_start_args.schedparam.__sched_priority;
599
  }
600
  /* Finish setting up arguments to pthread_start_thread */
601
  new_thread->p_start_args.start_routine = start_routine;
602
  new_thread->p_start_args.arg = arg;
603
  new_thread->p_start_args.mask = *mask;
604
  /* Make the new thread ID available already now.  If any of the later
605
     functions fail we return an error value and the caller must not use
606
     the stored thread ID.  */
607
  *thread = new_thread_id;
608
  /* Raise priority of thread manager if needed */
609
  __pthread_manager_adjust_prio(new_thread->p_priority);
610
  /* Do the cloning.  We have to use two different functions depending
611
     on whether we are debugging or not.  */
612
  pid = 0;       /* Note that the thread never can have PID zero.  */
613
  if (report_events)
614
    {
615
      /* See whether the TD_CREATE event bit is set in any of the
616
         masks.  */
617
      int idx = __td_eventword (TD_CREATE);
618
      uint32_t mask = __td_eventmask (TD_CREATE);
619
 
620
      if ((mask & (__pthread_threads_events.event_bits[idx]
621
                   | event_maskp->event_bits[idx])) != 0)
622
        {
623
          /* Lock the mutex the child will use now so that it will stop.  */
624
          __pthread_lock(new_thread->p_lock, NULL);
625
 
626
          /* We have to report this event.  */
627
#ifdef NEED_SEPARATE_REGISTER_STACK
628
          /* Perhaps this version should be used on all platforms. But
629
           this requires that __clone2 be uniformly supported
630
           everywhere.
631
 
632
           And there is some argument for changing the __clone2
633
           interface to pass sp and bsp instead, making it more IA64
634
           specific, but allowing stacks to grow outward from each
635
           other, to get less paging and fewer mmaps.  */
636
          pid = __clone2(pthread_start_thread_event,
637
                 (void **)new_thread_bottom,
638
                         (char *)new_thread - new_thread_bottom,
639
                         CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
640
                         __pthread_sig_cancel, new_thread);
641
#elif _STACK_GROWS_UP
642
          pid = __clone(pthread_start_thread_event, (void **) new_thread_bottom,
643
                        CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
644
                        __pthread_sig_cancel, new_thread);
645
#else
646
          pid = __clone(pthread_start_thread_event, (void **) new_thread,
647
                        CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
648
                        __pthread_sig_cancel, new_thread);
649
#endif
650
          if (pid != -1)
651
            {
652
              /* Now fill in the information about the new thread in
653
                 the newly created thread's data structure.  We cannot let
654
                 the new thread do this since we don't know whether it was
655
                 already scheduled when we send the event.  */
656
              new_thread->p_eventbuf.eventdata = new_thread;
657
              new_thread->p_eventbuf.eventnum = TD_CREATE;
658
              __pthread_last_event = new_thread;
659
 
660
              /* We have to set the PID here since the callback function
661
                 in the debug library will need it and we cannot guarantee
662
                 the child got scheduled before the debugger.  */
663
              new_thread->p_pid = pid;
664
 
665
              /* Now call the function which signals the event.  */
666
              __linuxthreads_create_event ();
667
 
668
              /* Now restart the thread.  */
669
              __pthread_unlock(new_thread->p_lock);
670
            }
671
        }
672
    }
673
  if (pid == 0)
674
    {
675
#ifdef NEED_SEPARATE_REGISTER_STACK
676
      pid = __clone2(pthread_start_thread,
677
                     (void **)new_thread_bottom,
678
                     (char *)new_thread - new_thread_bottom,
679
                     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
680
                     __pthread_sig_cancel, new_thread);
681
#elif _STACK_GROWS_UP
682
      pid = __clone(pthread_start_thread, (void **) new_thread_bottom,
683
                    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
684
                    __pthread_sig_cancel, new_thread);
685
#else
686
      pid = __clone(pthread_start_thread, (void **) new_thread,
687
                    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
688
                    __pthread_sig_cancel, new_thread);
689
#endif /* !NEED_SEPARATE_REGISTER_STACK */
690
    }
691
  /* Check if cloning succeeded */
692
  if (pid == -1) {
693
    /* Free the stack if we allocated it */
694
    if (attr == NULL || !attr->__stackaddr_set)
695
      {
696
#ifdef NEED_SEPARATE_REGISTER_STACK
697
        size_t stacksize = ((char *)(new_thread->p_guardaddr)
698
                            - new_thread_bottom);
699
        munmap((caddr_t)new_thread_bottom,
700
               2 * stacksize + new_thread->p_guardsize);
701
#elif _STACK_GROWS_UP
702
        size_t stacksize = guardaddr - (char *)new_thread;
703
        munmap(new_thread, stacksize + guardsize);
704
#else
705
        size_t stacksize = (char *)(new_thread+1) - new_thread_bottom;
706
        munmap(new_thread_bottom - guardsize, guardsize + stacksize);
707
#endif
708
      }
709
    __pthread_handles[sseg].h_descr = NULL;
710
    __pthread_handles[sseg].h_bottom = NULL;
711
    __pthread_handles_num--;
712
    return errno;
713
  }
714
  /* Insert new thread in doubly linked list of active threads */
715
  new_thread->p_prevlive = __pthread_main_thread;
716
  new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
717
  __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
718
  __pthread_main_thread->p_nextlive = new_thread;
719
  /* Set pid field of the new thread, in case we get there before the
720
     child starts. */
721
  new_thread->p_pid = pid;
722
  return 0;
723
}
724
 
725
 
726
/* Try to free the resources of a thread when requested by pthread_join
727
   or pthread_detach on a terminated thread. */
728
 
729
static void pthread_free(pthread_descr th)
730
{
731
  pthread_handle handle;
732
  pthread_readlock_info *iter, *next;
733
 
734
  ASSERT(th->p_exited);
735
  /* Make the handle invalid */
736
  handle =  thread_handle(th->p_tid);
737
  __pthread_lock(&handle->h_lock, NULL);
738
  handle->h_descr = NULL;
739
  handle->h_bottom = (char *)(-1L);
740
  __pthread_unlock(&handle->h_lock);
741
#ifdef FREE_THREAD
742
  FREE_THREAD(th, th->p_nr);
743
#endif
744
  /* One fewer threads in __pthread_handles */
745
  __pthread_handles_num--;
746
 
747
  /* Destroy read lock list, and list of free read lock structures.
748
     If the former is not empty, it means the thread exited while
749
     holding read locks! */
750
 
751
  for (iter = th->p_readlock_list; iter != NULL; iter = next)
752
    {
753
      next = iter->pr_next;
754
      free(iter);
755
    }
756
 
757
  for (iter = th->p_readlock_free; iter != NULL; iter = next)
758
    {
759
      next = iter->pr_next;
760
      free(iter);
761
    }
762
 
763
  /* If initial thread, nothing to free */
764
  if (!th->p_userstack)
765
    {
766
      size_t guardsize = th->p_guardsize;
767
      /* Free the stack and thread descriptor area */
768
      char *guardaddr = th->p_guardaddr;
769
#ifdef _STACK_GROWS_UP
770
      size_t stacksize = guardaddr - (char *)th;
771
      guardaddr = (char *)th;
772
#else
773
      /* Guardaddr is always set, even if guardsize is 0.  This allows
774
         us to compute everything else.  */
775
      size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
776
#ifdef NEED_SEPARATE_REGISTER_STACK
777
      /* Take account of the register stack, which is below guardaddr.  */
778
      guardaddr -= stacksize;
779
      stacksize *= 2;
780
#endif
781
#endif
782
      /* Unmap the stack.  */
783
      munmap(guardaddr, stacksize + guardsize);
784
    }
785
}
786
 
787
/* Handle threads that have exited */
788
 
789
static void pthread_exited(pid_t pid)
790
{
791
  pthread_descr th;
792
  int detached;
793
  /* Find thread with that pid */
794
  for (th = __pthread_main_thread->p_nextlive;
795
       th != __pthread_main_thread;
796
       th = th->p_nextlive) {
797
    if (th->p_pid == pid) {
798
      /* Remove thread from list of active threads */
799
      th->p_nextlive->p_prevlive = th->p_prevlive;
800
      th->p_prevlive->p_nextlive = th->p_nextlive;
801
      /* Mark thread as exited, and if detached, free its resources */
802
      __pthread_lock(th->p_lock, NULL);
803
      th->p_exited = 1;
804
      /* If we have to signal this event do it now.  */
805
      if (th->p_report_events)
806
        {
807
          /* See whether TD_REAP is in any of the mask.  */
808
          int idx = __td_eventword (TD_REAP);
809
          uint32_t mask = __td_eventmask (TD_REAP);
810
 
811
          if ((mask & (__pthread_threads_events.event_bits[idx]
812
                       | th->p_eventbuf.eventmask.event_bits[idx])) != 0)
813
            {
814
              /* Yep, we have to signal the reapage.  */
815
              th->p_eventbuf.eventnum = TD_REAP;
816
              th->p_eventbuf.eventdata = th;
817
              __pthread_last_event = th;
818
 
819
              /* Now call the function to signal the event.  */
820
              __linuxthreads_reap_event();
821
            }
822
        }
823
      detached = th->p_detached;
824
      __pthread_unlock(th->p_lock);
825
      if (detached)
826
        pthread_free(th);
827
      break;
828
    }
829
  }
830
  /* If all threads have exited and the main thread is pending on a
831
     pthread_exit, wake up the main thread and terminate ourselves. */
832
  if (main_thread_exiting &&
833
      __pthread_main_thread->p_nextlive == __pthread_main_thread) {
834
    restart(__pthread_main_thread);
835
    /* Same logic as REQ_MAIN_THREAD_EXIT. */
836
  }
837
}
838
 
839
static void pthread_reap_children(void)
840
{
841
  pid_t pid;
842
  int status;
843
 
844
  while ((pid = __libc___waitpid(-1, &status, WNOHANG | __WCLONE)) > 0) {
845
    pthread_exited(pid);
846
    if (WIFSIGNALED(status)) {
847
      /* If a thread died due to a signal, send the same signal to
848
         all other threads, including the main thread. */
849
      pthread_kill_all_threads(WTERMSIG(status), 1);
850
      _exit(0);
851
    }
852
  }
853
}
854
 
855
/* Try to free the resources of a thread when requested by pthread_join
856
   or pthread_detach on a terminated thread. */
857
 
858
static void pthread_handle_free(pthread_t th_id)
859
{
860
  pthread_handle handle = thread_handle(th_id);
861
  pthread_descr th;
862
 
863
  __pthread_lock(&handle->h_lock, NULL);
864
  if (nonexisting_handle(handle, th_id)) {
865
    /* pthread_reap_children has deallocated the thread already,
866
       nothing needs to be done */
867
    __pthread_unlock(&handle->h_lock);
868
    return;
869
  }
870
  th = handle->h_descr;
871
  if (th->p_exited) {
872
    __pthread_unlock(&handle->h_lock);
873
    pthread_free(th);
874
  } else {
875
    /* The Unix process of the thread is still running.
876
       Mark the thread as detached so that the thread manager will
877
       deallocate its resources when the Unix process exits. */
878
    th->p_detached = 1;
879
    __pthread_unlock(&handle->h_lock);
880
  }
881
}
882
 
883
/* Send a signal to all running threads */
884
 
885
static void pthread_kill_all_threads(int sig, int main_thread_also)
886
{
887
  pthread_descr th;
888
  for (th = __pthread_main_thread->p_nextlive;
889
       th != __pthread_main_thread;
890
       th = th->p_nextlive) {
891
    kill(th->p_pid, sig);
892
  }
893
  if (main_thread_also) {
894
    kill(__pthread_main_thread->p_pid, sig);
895
  }
896
}
897
 
898
static void pthread_for_each_thread(void *arg,
899
    void (*fn)(void *, pthread_descr))
900
{
901
  pthread_descr th;
902
 
903
  for (th = __pthread_main_thread->p_nextlive;
904
       th != __pthread_main_thread;
905
       th = th->p_nextlive) {
906
    fn(arg, th);
907
  }
908
 
909
  fn(arg, __pthread_main_thread);
910
}
911
 
912
/* Process-wide exit() */
913
 
914
static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
915
{
916
  pthread_descr th;
917
  __pthread_exit_requested = 1;
918
  __pthread_exit_code = exitcode;
919
  /* A forced asynchronous cancellation follows.  Make sure we won't
920
     get stuck later in the main thread with a system lock being held
921
     by one of the cancelled threads.  Ideally one would use the same
922
     code as in pthread_atfork(), but we can't distinguish system and
923
     user handlers there.  */
924
  __flockfilelist();
925
  /* Send the CANCEL signal to all running threads, including the main
926
     thread, but excluding the thread from which the exit request originated
927
     (that thread must complete the exit, e.g. calling atexit functions
928
     and flushing stdio buffers). */
929
  for (th = issuing_thread->p_nextlive;
930
       th != issuing_thread;
931
       th = th->p_nextlive) {
932
    kill(th->p_pid, __pthread_sig_cancel);
933
  }
934
  /* Now, wait for all these threads, so that they don't become zombies
935
     and their times are properly added to the thread manager's times. */
936
  for (th = issuing_thread->p_nextlive;
937
       th != issuing_thread;
938
       th = th->p_nextlive) {
939
    __waitpid(th->p_pid, NULL, __WCLONE);
940
  }
941
  __fresetlockfiles();
942
  restart(issuing_thread);
943
  _exit(0);
944
}
945
 
946
/* Handler for __pthread_sig_cancel in thread manager thread */
947
 
948
void __pthread_manager_sighandler(int sig)
949
{
950
  int kick_manager = terminated_children == 0 && main_thread_exiting;
951
  terminated_children = 1;
952
 
953
  /* If the main thread is terminating, kick the thread manager loop
954
     each time some threads terminate. This eliminates a two second
955
     shutdown delay caused by the thread manager sleeping in the
956
     call to __poll(). Instead, the thread manager is kicked into
957
     action, reaps the outstanding threads and resumes the main thread
958
     so that it can complete the shutdown. */
959
 
960
  if (kick_manager) {
961
    struct pthread_request request;
962
    request.req_thread = 0;
963
    request.req_kind = REQ_KICK;
964
    TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
965
                                    (char *) &request, sizeof(request)));
966
  }
967
}
968
 
969
/* Adjust priority of thread manager so that it always run at a priority
970
   higher than all threads */
971
 
972
void __pthread_manager_adjust_prio(int thread_prio)
973
{
974
  struct sched_param param;
975
 
976
  if (thread_prio <= __pthread_manager_thread.p_priority) return;
977
  param.sched_priority =
978
    thread_prio < __sched_get_priority_max(SCHED_FIFO)
979
    ? thread_prio + 1 : thread_prio;
980
  __sched_setscheduler(__pthread_manager_thread.p_pid, SCHED_FIFO, &param);
981
  __pthread_manager_thread.p_priority = thread_prio;
982
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.