OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [uClibc/] [libpthread/] [linuxthreads/] [manager.c] - Blame information for rev 1771

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1325 phoenix
/* Linuxthreads - a simple clone()-based implementation of Posix        */
2
/* threads for Linux.                                                   */
3
/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr)              */
4
/*                                                                      */
5
/* This program is free software; you can redistribute it and/or        */
6
/* modify it under the terms of the GNU Library General Public License  */
7
/* as published by the Free Software Foundation; either version 2       */
8
/* of the License, or (at your option) any later version.               */
9
/*                                                                      */
10
/* This program is distributed in the hope that it will be useful,      */
11
/* but WITHOUT ANY WARRANTY; without even the implied warranty of       */
12
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the        */
13
/* GNU Library General Public License for more details.                 */
14
 
15
/* The "thread manager" thread: manages creation and termination of threads */
16
 
17
/* mods for uClibc: getpwd and getpagesize are the syscalls */
18
#define __getpid getpid
19
#define __getpagesize getpagesize
20
 
21
#include <features.h>
22
#define __USE_GNU
23
#include <errno.h>
24
#include <sched.h>
25
#include <stddef.h>
26
#include <stdio.h>
27
#include <stdlib.h>
28
#include <string.h>
29
#include <unistd.h>
30
#include <sys/poll.h>           /* for poll */
31
#include <sys/mman.h>           /* for mmap */
32
#include <sys/param.h>
33
#include <sys/time.h>
34
#include <sys/wait.h>           /* for waitpid macros */
35
 
36
#include "pthread.h"
37
#include "internals.h"
38
#include "spinlock.h"
39
#include "restart.h"
40
#include "semaphore.h"
41
#include "debug.h" /* PDEBUG, added by StS */
42
 
43
 
44
/* poll() is not supported in kernel <= 2.0, therefore is __NR_poll is
45
 * not available, we assume an old Linux kernel is in use and we will
46
 * use select() instead. */
47
#include <sys/syscall.h>
48
#ifndef __NR_poll
49
# define USE_SELECT
50
#endif
51
 
52
 
53
/* Array of active threads. Entry 0 is reserved for the initial thread. */
54
struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX] =
55
{ { __LOCK_INITIALIZER, &__pthread_initial_thread, 0},
56
  { __LOCK_INITIALIZER, &__pthread_manager_thread, 0}, /* All NULLs */ };
57
 
58
/* For debugging purposes put the maximum number of threads in a variable.  */
59
const int __linuxthreads_pthread_threads_max = PTHREAD_THREADS_MAX;
60
 
61
/* Indicate whether at least one thread has a user-defined stack (if 1),
62
   or if all threads have stacks supplied by LinuxThreads (if 0). */
63
int __pthread_nonstandard_stacks;
64
 
65
/* Number of active entries in __pthread_handles (used by gdb) */
66
volatile int __pthread_handles_num = 2;
67
 
68
/* Whether to use debugger additional actions for thread creation
69
   (set to 1 by gdb) */
70
volatile int __pthread_threads_debug;
71
 
72
/* Globally enabled events.  */
73
volatile td_thr_events_t __pthread_threads_events;
74
 
75
/* Pointer to thread descriptor with last event.  */
76
volatile pthread_descr __pthread_last_event;
77
 
78
/* Mapping from stack segment to thread descriptor. */
79
/* Stack segment numbers are also indices into the __pthread_handles array. */
80
/* Stack segment number 0 is reserved for the initial thread. */
81
 
82
static inline pthread_descr thread_segment(int seg)
83
{
84
  return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
85
         - 1;
86
}
87
 
88
/* Flag set in signal handler to record child termination */
89
 
90
static volatile int terminated_children = 0;
91
 
92
/* Flag set when the initial thread is blocked on pthread_exit waiting
93
   for all other threads to terminate */
94
 
95
static int main_thread_exiting = 0;
96
 
97
/* Counter used to generate unique thread identifier.
98
   Thread identifier is pthread_threads_counter + segment. */
99
 
100
static pthread_t pthread_threads_counter = 0;
101
 
102
/* Forward declarations */
103
 
104
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
105
                                 void * (*start_routine)(void *), void *arg,
106
                                 sigset_t *mask, int father_pid,
107
                                 int report_events,
108
                                 td_thr_events_t *event_maskp);
109
static void pthread_handle_free(pthread_t th_id);
110
static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode);
111
static void pthread_reap_children(void);
112
static void pthread_kill_all_threads(int sig, int main_thread_also);
113
 
114
/* The server thread managing requests for thread creation and termination */
115
 
116
int __pthread_manager(void *arg)
117
{
118
  int reqfd = (int) (long int) arg;
119
#ifdef USE_SELECT
120
  struct timeval tv;
121
  fd_set fd;
122
#else
123
  struct pollfd ufd;
124
#endif
125
  sigset_t manager_mask;
126
  int n;
127
  struct pthread_request request;
128
 
129
  /* If we have special thread_self processing, initialize it.  */
130
#ifdef INIT_THREAD_SELF
131
  INIT_THREAD_SELF(&__pthread_manager_thread, 1);
132
#endif
133
  /* Set the error variable.  */
134
  __pthread_manager_thread.p_errnop = &__pthread_manager_thread.p_errno;
135
  __pthread_manager_thread.p_h_errnop = &__pthread_manager_thread.p_h_errno;
136
 
137
#ifdef __UCLIBC_HAS_XLOCALE__
138
  /* Initialize thread's locale to the global locale. */
139
  __pthread_manager_thread.locale = __global_locale;
140
#endif /* __UCLIBC_HAS_XLOCALE__ */
141
 
142
  /* Block all signals except __pthread_sig_cancel and SIGTRAP */
143
  sigfillset(&manager_mask);
144
  sigdelset(&manager_mask, __pthread_sig_cancel); /* for thread termination */
145
  sigdelset(&manager_mask, SIGTRAP);            /* for debugging purposes */
146
  if (__pthread_threads_debug && __pthread_sig_debug > 0)
147
      sigdelset(&manager_mask, __pthread_sig_debug);
148
  sigprocmask(SIG_SETMASK, &manager_mask, NULL);
149
  /* Raise our priority to match that of main thread */
150
  __pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
151
  /* Synchronize debugging of the thread manager */
152
  n = TEMP_FAILURE_RETRY(__libc_read(reqfd, (char *)&request,
153
                                     sizeof(request)));
154
  ASSERT(n == sizeof(request) && request.req_kind == REQ_DEBUG);
155
#ifndef USE_SELECT
156
  ufd.fd = reqfd;
157
  ufd.events = POLLIN;
158
#endif
159
  /* Enter server loop */
160
  while(1) {
161
#ifdef USE_SELECT
162
    tv.tv_sec = 2;
163
    tv.tv_usec = 0;
164
    FD_ZERO (&fd);
165
    FD_SET (reqfd, &fd);
166
    n = select (reqfd + 1, &fd, NULL, NULL, &tv);
167
#else
168
PDEBUG("before poll\n");
169
    n = poll(&ufd, 1, 2000);
170
PDEBUG("after poll\n");
171
#endif
172
    /* Check for termination of the main thread */
173
    if (getppid() == 1) {
174
      pthread_kill_all_threads(SIGKILL, 0);
175
      _exit(0);
176
    }
177
    /* Check for dead children */
178
    if (terminated_children) {
179
      terminated_children = 0;
180
      pthread_reap_children();
181
    }
182
    /* Read and execute request */
183
#ifdef USE_SELECT
184
    if (n == 1)
185
#else
186
    if (n == 1 && (ufd.revents & POLLIN))
187
#endif
188
    {
189
 
190
PDEBUG("before __libc_read\n");
191
      n = __libc_read(reqfd, (char *)&request, sizeof(request));
192
PDEBUG("after __libc_read, n=%d\n", n);
193
      ASSERT(n == sizeof(request));
194
      switch(request.req_kind) {
195
      case REQ_CREATE:
196
PDEBUG("got REQ_CREATE\n");
197
        request.req_thread->p_retcode =
198
          pthread_handle_create((pthread_t *) &request.req_thread->p_retval,
199
                                request.req_args.create.attr,
200
                                request.req_args.create.fn,
201
                                request.req_args.create.arg,
202
                                &request.req_args.create.mask,
203
                                request.req_thread->p_pid,
204
                                request.req_thread->p_report_events,
205
                                &request.req_thread->p_eventbuf.eventmask);
206
PDEBUG("restarting %d\n", request.req_thread);
207
        restart(request.req_thread);
208
        break;
209
      case REQ_FREE:
210
PDEBUG("got REQ_FREE\n");
211
        pthread_handle_free(request.req_args.free.thread_id);
212
        break;
213
      case REQ_PROCESS_EXIT:
214
PDEBUG("got REQ_PROCESS_EXIT from %d, exit code = %d\n",
215
                request.req_thread, request.req_args.exit.code);
216
        pthread_handle_exit(request.req_thread,
217
                            request.req_args.exit.code);
218
        break;
219
      case REQ_MAIN_THREAD_EXIT:
220
PDEBUG("got REQ_MAIN_THREAD_EXIT\n");
221
        main_thread_exiting = 1;
222
        /* Reap children in case all other threads died and the signal handler
223
           went off before we set main_thread_exiting to 1, and therefore did
224
           not do REQ_KICK. */
225
        pthread_reap_children();
226
 
227
        if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
228
          restart(__pthread_main_thread);
229
          /* The main thread will now call exit() which will trigger an
230
             __on_exit handler, which in turn will send REQ_PROCESS_EXIT
231
             to the thread manager. In case you are wondering how the
232
             manager terminates from its loop here. */
233
        }
234
        break;
235
      case REQ_POST:
236
PDEBUG("got REQ_POST\n");
237
        __new_sem_post(request.req_args.post);
238
        break;
239
      case REQ_DEBUG:
240
PDEBUG("got REQ_DEBUG\n");
241
        /* Make gdb aware of new thread and gdb will restart the
242
           new thread when it is ready to handle the new thread. */
243
        if (__pthread_threads_debug && __pthread_sig_debug > 0) {
244
PDEBUG("about to call raise(__pthread_sig_debug)\n");
245
          raise(__pthread_sig_debug);
246
        }
247
      case REQ_KICK:
248
        /* This is just a prod to get the manager to reap some
249
           threads right away, avoiding a potential delay at shutdown. */
250
        break;
251
      }
252
    }
253
  }
254
}
255
 
256
int __pthread_manager_event(void *arg)
257
{
258
  /* If we have special thread_self processing, initialize it.  */
259
#ifdef INIT_THREAD_SELF
260
  INIT_THREAD_SELF(&__pthread_manager_thread, 1);
261
#endif
262
 
263
  /* Get the lock the manager will free once all is correctly set up.  */
264
  __pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL);
265
  /* Free it immediately.  */
266
  __pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
267
 
268
  return __pthread_manager(arg);
269
}
270
 
271
/* Process creation */
272
static int
273
__attribute__ ((noreturn))
274
pthread_start_thread(void *arg)
275
{
276
  pthread_descr self = (pthread_descr) arg;
277
  struct pthread_request request;
278
  void * outcome;
279
  /* Initialize special thread_self processing, if any.  */
280
#ifdef INIT_THREAD_SELF
281
  INIT_THREAD_SELF(self, self->p_nr);
282
#endif
283
PDEBUG("\n");
284
  /* Make sure our pid field is initialized, just in case we get there
285
     before our father has initialized it. */
286
  THREAD_SETMEM(self, p_pid, __getpid());
287
  /* Initial signal mask is that of the creating thread. (Otherwise,
288
     we'd just inherit the mask of the thread manager.) */
289
  sigprocmask(SIG_SETMASK, &self->p_start_args.mask, NULL);
290
  /* Set the scheduling policy and priority for the new thread, if needed */
291
  if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
292
    /* Explicit scheduling attributes were provided: apply them */
293
    sched_setscheduler(THREAD_GETMEM(self, p_pid),
294
                         THREAD_GETMEM(self, p_start_args.schedpolicy),
295
                         &self->p_start_args.schedparam);
296
  else if (__pthread_manager_thread.p_priority > 0)
297
    /* Default scheduling required, but thread manager runs in realtime
298
       scheduling: switch new thread to SCHED_OTHER policy */
299
    {
300
      struct sched_param default_params;
301
      default_params.sched_priority = 0;
302
      sched_setscheduler(THREAD_GETMEM(self, p_pid),
303
                           SCHED_OTHER, &default_params);
304
    }
305
  /* Make gdb aware of new thread */
306
  if (__pthread_threads_debug && __pthread_sig_debug > 0) {
307
    request.req_thread = self;
308
    request.req_kind = REQ_DEBUG;
309
    TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
310
                (char *) &request, sizeof(request)));
311
    suspend(self);
312
  }
313
  /* Run the thread code */
314
  outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
315
                                                           p_start_args.arg));
316
  /* Exit with the given return value */
317
  pthread_exit(outcome);
318
}
319
 
320
static int
321
__attribute__ ((noreturn))
322
pthread_start_thread_event(void *arg)
323
{
324
  pthread_descr self = (pthread_descr) arg;
325
 
326
#ifdef INIT_THREAD_SELF
327
  INIT_THREAD_SELF(self, self->p_nr);
328
#endif
329
  /* Make sure our pid field is initialized, just in case we get there
330
     before our father has initialized it. */
331
  THREAD_SETMEM(self, p_pid, __getpid());
332
  /* Get the lock the manager will free once all is correctly set up.  */
333
  __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
334
  /* Free it immediately.  */
335
  __pthread_unlock (THREAD_GETMEM(self, p_lock));
336
 
337
  /* Continue with the real function.  */
338
  pthread_start_thread (arg);
339
}
340
 
341
static int pthread_allocate_stack(const pthread_attr_t *attr,
342
                                  pthread_descr default_new_thread,
343
                                  int pagesize,
344
                                  pthread_descr * out_new_thread,
345
                                  char ** out_new_thread_bottom,
346
                                  char ** out_guardaddr,
347
                                  size_t * out_guardsize)
348
{
349
  pthread_descr new_thread;
350
  char * new_thread_bottom;
351
  char * guardaddr;
352
  size_t stacksize, guardsize;
353
 
354
  if (attr != NULL && attr->__stackaddr_set)
355
    {
356
      /* The user provided a stack. */
357
      new_thread =
358
        (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1;
359
      new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize;
360
      guardaddr = NULL;
361
      guardsize = 0;
362
      __pthread_nonstandard_stacks = 1;
363
    }
364
  else
365
    {
366
#ifdef __UCLIBC_HAS_MMU__
367
      stacksize = STACK_SIZE - pagesize;
368
      if (attr != NULL)
369
        stacksize = MIN (stacksize, roundup(attr->__stacksize, pagesize));
370
      /* Allocate space for stack and thread descriptor at default address */
371
      new_thread = default_new_thread;
372
      new_thread_bottom = (char *) (new_thread + 1) - stacksize;
373
      if (mmap((caddr_t)((char *)(new_thread + 1) - INITIAL_STACK_SIZE),
374
               INITIAL_STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
375
               MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_GROWSDOWN,
376
               -1, 0) == MAP_FAILED)
377
        /* Bad luck, this segment is already mapped. */
378
        return -1;
379
      /* We manage to get a stack.  Now see whether we need a guard
380
         and allocate it if necessary.  Notice that the default
381
         attributes (stack_size = STACK_SIZE - pagesize) do not need
382
         a guard page, since the RLIMIT_STACK soft limit prevents stacks
383
         from running into one another. */
384
      if (stacksize == STACK_SIZE - pagesize)
385
        {
386
          /* We don't need a guard page. */
387
          guardaddr = NULL;
388
          guardsize = 0;
389
        }
390
      else
391
        {
392
          /* Put a bad page at the bottom of the stack */
393
          guardsize = attr->__guardsize;
394
          guardaddr = (void *)new_thread_bottom - guardsize;
395
          if (mmap ((caddr_t) guardaddr, guardsize, 0, MAP_FIXED, -1, 0)
396
              == MAP_FAILED)
397
            {
398
              /* We don't make this an error.  */
399
              guardaddr = NULL;
400
              guardsize = 0;
401
            }
402
        }
403
#else
404
      /* We cannot mmap to this huge chunk of stack space when we don't have
405
       * an MMU. Pretend we are using a user provided stack even if there was
406
       * none provided by the user. Thus, we get around the mmap and reservation
407
       * of a huge stack segment. -StS */
408
 
409
      stacksize = INITIAL_STACK_SIZE;
410
      /* The user may want to use a non-default stacksize */
411
      if (attr != NULL)
412
        {
413
          stacksize = attr->__stacksize;
414
        }
415
 
416
      /* malloc a stack - memory from the bottom up */
417
      if ((new_thread_bottom = malloc(stacksize)) == NULL)
418
        {
419
          /* bad luck, we cannot malloc any more */
420
          return -1 ;
421
        }
422
      PDEBUG("malloced chunk: base=%p, size=0x%04x\n", new_thread_bottom, stacksize);
423
 
424
      /* Set up the pointers. new_thread marks the TOP of the stack frame and
425
       * the address of the pthread_descr struct at the same time. Therefore we
426
       * must account for its size and fit it in the malloc()'ed block. The
427
       * value of `new_thread' is then passed to clone() as the stack argument.
428
       *
429
       *               ^ +------------------------+
430
       *               | |  pthread_descr struct  |
431
       *               | +------------------------+  <- new_thread
432
       * malloc block  | |                        |
433
       *               | |  thread stack          |
434
       *               | |                        |
435
       *               v +------------------------+  <- new_thread_bottom
436
       *
437
       * Note: The calculated value of new_thread must be word aligned otherwise
438
       * the kernel chokes on a non-aligned stack frame. Choose the lower
439
       * available word boundary.
440
       */
441
      new_thread = ((pthread_descr) ((int)(new_thread_bottom + stacksize) & -sizeof(void*))) - 1;
442
      guardaddr = NULL;
443
      guardsize = 0;
444
 
445
      PDEBUG("thread stack: bos=%p, tos=%p\n", new_thread_bottom, new_thread);
446
 
447
      /* check the initial thread stack boundaries so they don't overlap */
448
      NOMMU_INITIAL_THREAD_BOUNDS((char *) new_thread, (char *) new_thread_bottom);
449
 
450
      PDEBUG("initial stack: bos=%p, tos=%p\n", __pthread_initial_thread_bos,
451
             __pthread_initial_thread_tos);
452
 
453
      /* on non-MMU systems we always have non-standard stack frames */
454
      __pthread_nonstandard_stacks = 1;
455
 
456
#endif /* __UCLIBC_HAS_MMU__ */
457
    }
458
 
459
  /* Clear the thread data structure.  */
460
  memset (new_thread, '\0', sizeof (*new_thread));
461
  *out_new_thread = new_thread;
462
  *out_new_thread_bottom = new_thread_bottom;
463
  *out_guardaddr = guardaddr;
464
  *out_guardsize = guardsize;
465
  return 0;
466
}
467
 
468
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
469
                                 void * (*start_routine)(void *), void *arg,
470
                                 sigset_t * mask, int father_pid,
471
                                 int report_events,
472
                                 td_thr_events_t *event_maskp)
473
{
474
  size_t sseg;
475
  int pid;
476
  pthread_descr new_thread;
477
  char * new_thread_bottom;
478
  pthread_t new_thread_id;
479
  char *guardaddr = NULL;
480
  size_t guardsize = 0;
481
  int pagesize = __getpagesize();
482
  int saved_errno = 0;
483
 
484
  /* First check whether we have to change the policy and if yes, whether
485
     we can  do this.  Normally this should be done by examining the
486
     return value of the sched_setscheduler call in pthread_start_thread
487
     but this is hard to implement.  FIXME  */
488
  if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
489
    return EPERM;
490
  /* Find a free segment for the thread, and allocate a stack if needed */
491
  for (sseg = 2; ; sseg++)
492
    {
493
      if (sseg >= PTHREAD_THREADS_MAX)
494
        return EAGAIN;
495
      if (__pthread_handles[sseg].h_descr != NULL)
496
        continue;
497
      if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize,
498
                                 &new_thread, &new_thread_bottom,
499
                                 &guardaddr, &guardsize) == 0)
500
        break;
501
    }
502
  __pthread_handles_num++;
503
  /* Allocate new thread identifier */
504
  pthread_threads_counter += PTHREAD_THREADS_MAX;
505
  new_thread_id = sseg + pthread_threads_counter;
506
  /* Initialize the thread descriptor.  Elements which have to be
507
     initialized to zero already have this value.  */
508
  new_thread->p_tid = new_thread_id;
509
  new_thread->p_lock = &(__pthread_handles[sseg].h_lock);
510
  new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
511
  new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
512
  new_thread->p_errnop = &new_thread->p_errno;
513
  new_thread->p_h_errnop = &new_thread->p_h_errno;
514
#ifdef __UCLIBC_HAS_XLOCALE__
515
  /* Initialize thread's locale to the global locale. */
516
  new_thread->locale = __global_locale;
517
#endif /* __UCLIBC_HAS_XLOCALE__ */
518
  new_thread->p_guardaddr = guardaddr;
519
  new_thread->p_guardsize = guardsize;
520
  new_thread->p_self = new_thread;
521
  new_thread->p_nr = sseg;
522
  /* Initialize the thread handle */
523
  __pthread_init_lock(&__pthread_handles[sseg].h_lock);
524
  __pthread_handles[sseg].h_descr = new_thread;
525
  __pthread_handles[sseg].h_bottom = new_thread_bottom;
526
  /* Determine scheduling parameters for the thread */
527
  new_thread->p_start_args.schedpolicy = -1;
528
  if (attr != NULL) {
529
    new_thread->p_detached = attr->__detachstate;
530
    new_thread->p_userstack = attr->__stackaddr_set;
531
 
532
    switch(attr->__inheritsched) {
533
    case PTHREAD_EXPLICIT_SCHED:
534
      new_thread->p_start_args.schedpolicy = attr->__schedpolicy;
535
      memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam,
536
              sizeof (struct sched_param));
537
      break;
538
    case PTHREAD_INHERIT_SCHED:
539
      new_thread->p_start_args.schedpolicy = sched_getscheduler(father_pid);
540
      sched_getparam(father_pid, &new_thread->p_start_args.schedparam);
541
      break;
542
    }
543
    new_thread->p_priority =
544
      new_thread->p_start_args.schedparam.sched_priority;
545
  }
546
  /* Finish setting up arguments to pthread_start_thread */
547
  new_thread->p_start_args.start_routine = start_routine;
548
  new_thread->p_start_args.arg = arg;
549
  new_thread->p_start_args.mask = *mask;
550
  /* Raise priority of thread manager if needed */
551
  __pthread_manager_adjust_prio(new_thread->p_priority);
552
  /* Do the cloning.  We have to use two different functions depending
553
     on whether we are debugging or not.  */
554
  pid = 0;     /* Note that the thread never can have PID zero.  */
555
 
556
 
557
  /* ******************************************************** */
558
  /*  This code was moved from below to cope with running threads
559
   *  on uClinux systems.  See comment below...
560
   * Insert new thread in doubly linked list of active threads */
561
  new_thread->p_prevlive = __pthread_main_thread;
562
  new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
563
  __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
564
  __pthread_main_thread->p_nextlive = new_thread;
565
  /* ********************************************************* */
566
 
567
  if (report_events)
568
    {
569
      /* See whether the TD_CREATE event bit is set in any of the
570
         masks.  */
571
      int idx = __td_eventword (TD_CREATE);
572
      uint32_t mask = __td_eventmask (TD_CREATE);
573
 
574
      if ((mask & (__pthread_threads_events.event_bits[idx]
575
                   | event_maskp->event_bits[idx])) != 0)
576
        {
577
          /* Lock the mutex the child will use now so that it will stop.  */
578
          __pthread_lock(new_thread->p_lock, NULL);
579
 
580
          /* We have to report this event.  */
581
          pid = clone(pthread_start_thread_event, (void **) new_thread,
582
                        CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
583
                        __pthread_sig_cancel, new_thread);
584
 
585
          saved_errno = errno;
586
          if (pid != -1)
587
            {
588
              /* Now fill in the information about the new thread in
589
                 the newly created thread's data structure.  We cannot let
590
                 the new thread do this since we don't know whether it was
591
                 already scheduled when we send the event.  */
592
              new_thread->p_eventbuf.eventdata = new_thread;
593
              new_thread->p_eventbuf.eventnum = TD_CREATE;
594
              __pthread_last_event = new_thread;
595
 
596
              /* We have to set the PID here since the callback function
597
                 in the debug library will need it and we cannot guarantee
598
                 the child got scheduled before the debugger.  */
599
              new_thread->p_pid = pid;
600
 
601
              /* Now call the function which signals the event.  */
602
              __linuxthreads_create_event ();
603
 
604
              /* Now restart the thread.  */
605
              __pthread_unlock(new_thread->p_lock);
606
            }
607
        }
608
    }
609
  if (pid == 0)
610
    {
611
PDEBUG("cloning new_thread = %p\n", new_thread);
612
      pid = clone(pthread_start_thread, (void **) new_thread,
613
                    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
614
                    __pthread_sig_cancel, new_thread);
615
      saved_errno = errno;
616
    }
617
  /* Check if cloning succeeded */
618
  if (pid == -1) {
619
    /********************************************************
620
     * Code inserted to remove the thread from our list of active
621
     * threads in case of failure (needed to cope with uClinux),
622
     * See comment below. */
623
    new_thread->p_nextlive->p_prevlive = new_thread->p_prevlive;
624
    new_thread->p_prevlive->p_nextlive = new_thread->p_nextlive;
625
    /********************************************************/
626
 
627
    /* Free the stack if we allocated it */
628
    if (attr == NULL || !attr->__stackaddr_set)
629
      {
630
#ifdef __UCLIBC_HAS_MMU__
631
        if (new_thread->p_guardsize != 0)
632
          munmap(new_thread->p_guardaddr, new_thread->p_guardsize);
633
        munmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE),
634
               INITIAL_STACK_SIZE);
635
#else
636
        free(new_thread_bottom);
637
#endif /* __UCLIBC_HAS_MMU__ */
638
      }
639
    __pthread_handles[sseg].h_descr = NULL;
640
    __pthread_handles[sseg].h_bottom = NULL;
641
    __pthread_handles_num--;
642
    return errno;
643
  }
644
PDEBUG("new thread pid = %d\n", pid);
645
 
646
#if 0
647
  /* ***********************************************************
648
   This code has been moved before the call to clone().  In uClinux,
649
   the use of wait on a semaphore is dependant upon that the child so
650
   the child must be in the active threads list. This list is used in
651
   pthread_find_self() to get the pthread_descr of self. So, if the
652
   child calls sem_wait before this code is executed , it will hang
653
   forever and initial_thread will instead be posted by a sem_post
654
   call. */
655
 
656
  /* Insert new thread in doubly linked list of active threads */
657
  new_thread->p_prevlive = __pthread_main_thread;
658
  new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
659
  __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
660
  __pthread_main_thread->p_nextlive = new_thread;
661
  /************************************************************/
662
#endif
663
 
664
  /* Set pid field of the new thread, in case we get there before the
665
     child starts. */
666
  new_thread->p_pid = pid;
667
  /* We're all set */
668
  *thread = new_thread_id;
669
  return 0;
670
}
671
 
672
 
673
/* Try to free the resources of a thread when requested by pthread_join
674
   or pthread_detach on a terminated thread. */
675
 
676
static void pthread_free(pthread_descr th)
677
{
678
  pthread_handle handle;
679
  pthread_readlock_info *iter, *next;
680
  char *h_bottom_save;
681
 
682
  ASSERT(th->p_exited);
683
  /* Make the handle invalid */
684
  handle =  thread_handle(th->p_tid);
685
  __pthread_lock(&handle->h_lock, NULL);
686
  h_bottom_save = handle->h_bottom;
687
  handle->h_descr = NULL;
688
  handle->h_bottom = (char *)(-1L);
689
  __pthread_unlock(&handle->h_lock);
690
#ifdef FREE_THREAD_SELF
691
  FREE_THREAD_SELF(th, th->p_nr);
692
#endif
693
  /* One fewer threads in __pthread_handles */
694
  __pthread_handles_num--;
695
 
696
  /* Destroy read lock list, and list of free read lock structures.
697
     If the former is not empty, it means the thread exited while
698
     holding read locks! */
699
 
700
  for (iter = th->p_readlock_list; iter != NULL; iter = next)
701
    {
702
      next = iter->pr_next;
703
      free(iter);
704
    }
705
 
706
  for (iter = th->p_readlock_free; iter != NULL; iter = next)
707
    {
708
      next = iter->pr_next;
709
      free(iter);
710
    }
711
 
712
  /* If initial thread, nothing to free */
713
  if (th == &__pthread_initial_thread) return;
714
#ifdef __UCLIBC_HAS_MMU__
715
  if (!th->p_userstack)
716
    {
717
      /* Free the stack and thread descriptor area */
718
      if (th->p_guardsize != 0)
719
        munmap(th->p_guardaddr, th->p_guardsize);
720
      munmap((caddr_t) ((char *)(th+1) - STACK_SIZE), STACK_SIZE);
721
    }
722
#else
723
  /* For non-MMU systems we always malloc the stack, so free it here. -StS */
724
  if (!th->p_userstack) {
725
      free(h_bottom_save);
726
  }
727
#endif /* __UCLIBC_HAS_MMU__ */
728
}
729
 
730
/* Handle threads that have exited */
731
 
732
static void pthread_exited(pid_t pid)
733
{
734
  pthread_descr th;
735
  int detached;
736
  /* Find thread with that pid */
737
  for (th = __pthread_main_thread->p_nextlive;
738
       th != __pthread_main_thread;
739
       th = th->p_nextlive) {
740
    if (th->p_pid == pid) {
741
      /* Remove thread from list of active threads */
742
      th->p_nextlive->p_prevlive = th->p_prevlive;
743
      th->p_prevlive->p_nextlive = th->p_nextlive;
744
      /* Mark thread as exited, and if detached, free its resources */
745
      __pthread_lock(th->p_lock, NULL);
746
      th->p_exited = 1;
747
      /* If we have to signal this event do it now.  */
748
      if (th->p_report_events)
749
        {
750
          /* See whether TD_REAP is in any of the mask.  */
751
          int idx = __td_eventword (TD_REAP);
752
          uint32_t mask = __td_eventmask (TD_REAP);
753
 
754
          if ((mask & (__pthread_threads_events.event_bits[idx]
755
                       | th->p_eventbuf.eventmask.event_bits[idx])) != 0)
756
            {
757
              /* Yep, we have to signal the reapage.  */
758
              th->p_eventbuf.eventnum = TD_REAP;
759
              th->p_eventbuf.eventdata = th;
760
              __pthread_last_event = th;
761
 
762
              /* Now call the function to signal the event.  */
763
              __linuxthreads_reap_event();
764
            }
765
        }
766
      detached = th->p_detached;
767
      __pthread_unlock(th->p_lock);
768
      if (detached)
769
        pthread_free(th);
770
      break;
771
    }
772
  }
773
  /* If all threads have exited and the main thread is pending on a
774
     pthread_exit, wake up the main thread and terminate ourselves. */
775
  if (main_thread_exiting &&
776
      __pthread_main_thread->p_nextlive == __pthread_main_thread) {
777
    restart(__pthread_main_thread);
778
    /* Same logic as REQ_MAIN_THREAD_EXIT. */
779
  }
780
}
781
 
782
static void pthread_reap_children(void)
783
{
784
  pid_t pid;
785
  int status;
786
PDEBUG("\n");
787
 
788
  while ((pid = __libc_waitpid(-1, &status, WNOHANG | __WCLONE)) > 0) {
789
    pthread_exited(pid);
790
    if (WIFSIGNALED(status)) {
791
      /* If a thread died due to a signal, send the same signal to
792
         all other threads, including the main thread. */
793
      pthread_kill_all_threads(WTERMSIG(status), 1);
794
      _exit(0);
795
    }
796
  }
797
}
798
 
799
/* Try to free the resources of a thread when requested by pthread_join
800
   or pthread_detach on a terminated thread. */
801
 
802
static void pthread_handle_free(pthread_t th_id)
803
{
804
  pthread_handle handle = thread_handle(th_id);
805
  pthread_descr th;
806
 
807
  __pthread_lock(&handle->h_lock, NULL);
808
  if (invalid_handle(handle, th_id)) {
809
    /* pthread_reap_children has deallocated the thread already,
810
       nothing needs to be done */
811
    __pthread_unlock(&handle->h_lock);
812
    return;
813
  }
814
  th = handle->h_descr;
815
  if (th->p_exited) {
816
    __pthread_unlock(&handle->h_lock);
817
    pthread_free(th);
818
  } else {
819
    /* The Unix process of the thread is still running.
820
       Mark the thread as detached so that the thread manager will
821
       deallocate its resources when the Unix process exits. */
822
    th->p_detached = 1;
823
    __pthread_unlock(&handle->h_lock);
824
  }
825
}
826
 
827
/* Send a signal to all running threads */
828
 
829
static void pthread_kill_all_threads(int sig, int main_thread_also)
830
{
831
  pthread_descr th;
832
  for (th = __pthread_main_thread->p_nextlive;
833
       th != __pthread_main_thread;
834
       th = th->p_nextlive) {
835
    kill(th->p_pid, sig);
836
  }
837
  if (main_thread_also) {
838
    kill(__pthread_main_thread->p_pid, sig);
839
  }
840
}
841
 
842
/* Process-wide exit() */
843
 
844
static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
845
{
846
  pthread_descr th;
847
  __pthread_exit_requested = 1;
848
  __pthread_exit_code = exitcode;
849
  /* Send the CANCEL signal to all running threads, including the main
850
     thread, but excluding the thread from which the exit request originated
851
     (that thread must complete the exit, e.g. calling atexit functions
852
     and flushing stdio buffers). */
853
  for (th = issuing_thread->p_nextlive;
854
       th != issuing_thread;
855
       th = th->p_nextlive) {
856
    kill(th->p_pid, __pthread_sig_cancel);
857
  }
858
  /* Now, wait for all these threads, so that they don't become zombies
859
     and their times are properly added to the thread manager's times. */
860
  for (th = issuing_thread->p_nextlive;
861
       th != issuing_thread;
862
       th = th->p_nextlive) {
863
    waitpid(th->p_pid, NULL, __WCLONE);
864
  }
865
  restart(issuing_thread);
866
  _exit(0);
867
}
868
 
869
/* Handler for __pthread_sig_cancel in thread manager thread */
870
 
871
void __pthread_manager_sighandler(int sig)
872
{
873
    int kick_manager = terminated_children == 0 && main_thread_exiting;
874
    terminated_children = 1;
875
 
876
    /* If the main thread is terminating, kick the thread manager loop
877
       each time some threads terminate. This eliminates a two second
878
       shutdown delay caused by the thread manager sleeping in the
879
       call to __poll(). Instead, the thread manager is kicked into
880
       action, reaps the outstanding threads and resumes the main thread
881
       so that it can complete the shutdown. */
882
 
883
    if (kick_manager) {
884
        struct pthread_request request;
885
        request.req_thread = 0;
886
        request.req_kind = REQ_KICK;
887
        TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
888
                    (char *) &request, sizeof(request)));
889
    }
890
}
891
 
892
/* Adjust priority of thread manager so that it always run at a priority
893
   higher than all threads */
894
 
895
void __pthread_manager_adjust_prio(int thread_prio)
896
{
897
  struct sched_param param;
898
 
899
  if (thread_prio <= __pthread_manager_thread.p_priority) return;
900
  param.sched_priority =
901
    thread_prio < sched_get_priority_max(SCHED_FIFO)
902
    ? thread_prio + 1 : thread_prio;
903
  sched_setscheduler(__pthread_manager_thread.p_pid, SCHED_FIFO, &param);
904
  __pthread_manager_thread.p_priority = thread_prio;
905
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.