OpenCores
URL https://opencores.org/ocsvn/scarts/scarts/trunk

Subversion Repositories scarts

[/] [scarts/] [trunk/] [toolchain/] [scarts-newlib/] [newlib-1.17.0/] [newlib/] [libc/] [sys/] [linux/] [linuxthreads/] [timer_routines.c] - Blame information for rev 9

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 9 jlechner
/* Helper code for POSIX timer implementation on LinuxThreads.
2
   Copyright (C) 2000, 2001 Free Software Foundation, Inc.
3
   This file is part of the GNU C Library.
4
   Contributed by Kaz Kylheku <kaz@ashi.footprints.net>.
5
 
6
   The GNU C Library is free software; you can redistribute it and/or
7
   modify it under the terms of the GNU Library General Public License as
8
   published by the Free Software Foundation; either version 2 of the
9
   License, or (at your option) any later version.
10
 
11
   The GNU C Library is distributed in the hope that it will be useful,
12
   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
   Library General Public License for more details.
15
 
16
   You should have received a copy of the GNU Library General Public
17
   License along with the GNU C Library; see the file COPYING.LIB.  If not,
18
   write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19
   Boston, MA 02111-1307, USA.  */
20
 
21
#include <assert.h>
22
#include <errno.h>
23
#include <pthread.h>
24
#include <stddef.h>
25
#include <stdlib.h>
26
#include <string.h>
27
#include <sysdep.h>
28
#include <time.h>
29
#include <unistd.h>
30
#include <sys/syscall.h>
31
 
32
#include "posix-timer.h"
33
 
34
 
35
/* Number of threads used.  */
36
#define THREAD_MAXNODES 16
37
 
38
/* Array containing the descriptors for the used threads.  */
39
static struct thread_node thread_array[THREAD_MAXNODES];
40
 
41
/* Static array with the structures for all the timers.  */
42
struct timer_node __timer_array[TIMER_MAX];
43
 
44
/* Global lock to protect operation on the lists.  */
45
pthread_mutex_t __timer_mutex = PTHREAD_MUTEX_INITIALIZER;
46
 
47
/* Variable to protext initialization.  */
48
pthread_once_t __timer_init_once_control = PTHREAD_ONCE_INIT;
49
 
50
/* Nonzero if initialization of timer implementation failed.  */
51
int __timer_init_failed;
52
 
53
/* Node for the thread used to deliver signals.  */
54
struct thread_node __timer_signal_thread_rclk;
55
#ifdef _POSIX_CPUTIME
56
struct thread_node __timer_signal_thread_pclk;
57
#endif
58
#ifdef _POSIX_THREAD_CPUTIME
59
struct thread_node __timer_signal_thread_tclk;
60
#endif
61
 
62
/* Lists to keep free and used timers and threads.  */
63
struct list_links timer_free_list;
64
struct list_links thread_free_list;
65
struct list_links thread_active_list;
66
 
67
 
68
#ifdef __NR_rt_sigqueueinfo
69
extern int __syscall_rt_sigqueueinfo (int, int, siginfo_t *);
70
#endif
71
 
72
 
73
/* List handling functions.  */
74
static inline void
75
list_init (struct list_links *list)
76
{
77
  list->next = list->prev = list;
78
}
79
 
80
static inline void
81
list_append (struct list_links *list, struct list_links *newp)
82
{
83
  newp->prev = list->prev;
84
  newp->next = list;
85
  list->prev->next = newp;
86
  list->prev = newp;
87
}
88
 
89
static inline void
90
list_insbefore (struct list_links *list, struct list_links *newp)
91
{
92
  list_append (list, newp);
93
}
94
 
95
/*
96
 * Like list_unlink_ip, except that calling it on a node that
97
 * is already unlinked is disastrous rather than a noop.
98
 */
99
 
100
static inline void
101
list_unlink (struct list_links *list)
102
{
103
  struct list_links *lnext = list->next, *lprev = list->prev;
104
 
105
  lnext->prev = lprev;
106
  lprev->next = lnext;
107
}
108
 
109
static inline struct list_links *
110
list_first (struct list_links *list)
111
{
112
  return list->next;
113
}
114
 
115
static inline struct list_links *
116
list_null (struct list_links *list)
117
{
118
  return list;
119
}
120
 
121
static inline struct list_links *
122
list_next (struct list_links *list)
123
{
124
  return list->next;
125
}
126
 
127
static inline int
128
list_isempty (struct list_links *list)
129
{
130
  return list->next == list;
131
}
132
 
133
 
134
/* Functions build on top of the list functions.  */
135
static inline struct thread_node *
136
thread_links2ptr (struct list_links *list)
137
{
138
  return (struct thread_node *) ((char *) list
139
                                 - offsetof (struct thread_node, links));
140
}
141
 
142
static inline struct timer_node *
143
timer_links2ptr (struct list_links *list)
144
{
145
  return (struct timer_node *) ((char *) list
146
                                - offsetof (struct timer_node, links));
147
}
148
 
149
 
150
/* Initialize a newly allocated thread structure.  */
151
static void
152
thread_init (struct thread_node *thread, const pthread_attr_t *attr, clockid_t clock_id)
153
{
154
  if (attr != NULL)
155
    thread->attr = *attr;
156
  else
157
    {
158
      pthread_attr_init (&thread->attr);
159
      pthread_attr_setdetachstate (&thread->attr, PTHREAD_CREATE_DETACHED);
160
    }
161
 
162
  thread->exists = 0;
163
  list_init (&thread->timer_queue);
164
  pthread_cond_init (&thread->cond, 0);
165
  thread->current_timer = 0;
166
  thread->captured = pthread_self ();
167
  thread->clock_id = clock_id;
168
}
169
 
170
 
171
/* Initialize the global lists, and acquire global resources.  Error
172
   reporting is done by storing a non-zero value to the global variable
173
   timer_init_failed.  */
174
static void
175
init_module (void)
176
{
177
  int i;
178
 
179
  list_init (&timer_free_list);
180
  list_init (&thread_free_list);
181
  list_init (&thread_active_list);
182
 
183
  for (i = 0; i < TIMER_MAX; ++i)
184
    {
185
      list_append (&timer_free_list, &__timer_array[i].links);
186
      __timer_array[i].inuse = TIMER_FREE;
187
    }
188
 
189
  for (i = 0; i < THREAD_MAXNODES; ++i)
190
    list_append (&thread_free_list, &thread_array[i].links);
191
 
192
  thread_init (&__timer_signal_thread_rclk, 0, CLOCK_REALTIME);
193
#ifdef _POSIX_CPUTIME
194
  thread_init (&__timer_signal_thread_pclk, 0, CLOCK_PROCESS_CPUTIME_ID);
195
#endif
196
#ifdef _POSIX_THREAD_CPUTIME
197
  thread_init (&__timer_signal_thread_tclk, 0, CLOCK_THREAD_CPUTIME_ID);
198
#endif
199
}
200
 
201
 
202
/* This is a handler executed in a child process after a fork()
203
   occurs.  It reinitializes the module, resetting all of the data
204
   structures to their initial state.  The mutex is initialized in
205
   case it was locked in the parent process.  */
206
static void
207
reinit_after_fork (void)
208
{
209
  init_module ();
210
  pthread_mutex_init (&__timer_mutex, 0);
211
}
212
 
213
 
214
/* Called once form pthread_once in timer_init. This initializes the
215
   module and ensures that reinit_after_fork will be executed in any
216
   child process.  */
217
void
218
__timer_init_once (void)
219
{
220
  init_module ();
221
#if !defined(_ELIX_LEVEL) || _ELIX_LEVEL >= 3
222
  pthread_atfork (0, 0, reinit_after_fork);
223
#endif
224
}
225
 
226
 
227
/* Deinitialize a thread that is about to be deallocated.  */
228
static void
229
thread_deinit (struct thread_node *thread)
230
{
231
  assert (list_isempty (&thread->timer_queue));
232
  pthread_cond_destroy (&thread->cond);
233
}
234
 
235
 
236
/* Allocate a thread structure from the global free list.  Global
237
   mutex lock must be held by caller.  The thread is moved to
238
   the active list. */
239
struct thread_node *
240
__timer_thread_alloc (const pthread_attr_t *desired_attr, clockid_t clock_id)
241
{
242
  struct list_links *node = list_first (&thread_free_list);
243
 
244
  if (node != list_null (&thread_free_list))
245
    {
246
      struct thread_node *thread = thread_links2ptr (node);
247
      list_unlink (node);
248
      thread_init (thread, desired_attr, clock_id);
249
      list_append (&thread_active_list, node);
250
      return thread;
251
    }
252
 
253
  return 0;
254
}
255
 
256
 
257
/* Return a thread structure to the global free list.  Global lock
258
   must be held by caller.  */
259
void
260
__timer_thread_dealloc (struct thread_node *thread)
261
{
262
  thread_deinit (thread);
263
  list_unlink (&thread->links);
264
  list_append (&thread_free_list, &thread->links);
265
}
266
 
267
 
268
/* Each of our threads which terminates executes this cleanup
269
   handler. We never terminate threads ourselves; if a thread gets here
270
   it means that the evil application has killed it.  If the thread has
271
   timers, these require servicing and so we must hire a replacement
272
   thread right away.  We must also unblock another thread that may
273
   have been waiting for this thread to finish servicing a timer (see
274
   timer_delete()).  */
275
 
276
static void
277
thread_cleanup (void *val)
278
{
279
  if (val != NULL)
280
    {
281
      struct thread_node *thread = val;
282
 
283
      /* How did the signal thread get killed?  */
284
      assert (thread != &__timer_signal_thread_rclk);
285
#ifdef _POSIX_CPUTIME
286
      assert (thread != &__timer_signal_thread_pclk);
287
#endif
288
#ifdef _POSIX_THREAD_CPUTIME
289
      assert (thread != &__timer_signal_thread_tclk);
290
#endif
291
 
292
      pthread_mutex_lock (&__timer_mutex);
293
 
294
      thread->exists = 0;
295
 
296
      /* We are no longer processing a timer event.  */
297
      thread->current_timer = 0;
298
 
299
      if (list_isempty (&thread->timer_queue))
300
          __timer_thread_dealloc (thread);
301
      else
302
        (void) __timer_thread_start (thread);
303
 
304
      pthread_mutex_unlock (&__timer_mutex);
305
 
306
      /* Unblock potentially blocked timer_delete().  */
307
      pthread_cond_broadcast (&thread->cond);
308
    }
309
}
310
 
311
 
312
/* Handle a timer which is supposed to go off now.  */
313
static void
314
thread_expire_timer (struct thread_node *self, struct timer_node *timer)
315
{
316
  self->current_timer = timer; /* Lets timer_delete know timer is running. */
317
 
318
  pthread_mutex_unlock (&__timer_mutex);
319
 
320
  switch (__builtin_expect (timer->event.sigev_notify, SIGEV_SIGNAL))
321
    {
322
    case SIGEV_NONE:
323
      assert (! "timer_create should never have created such a timer");
324
      break;
325
 
326
    case SIGEV_SIGNAL:
327
#ifdef __NR_rt_sigqueueinfo
328
      {
329
        siginfo_t info;
330
 
331
        /* First, clear the siginfo_t structure, so that we don't pass our
332
           stack content to other tasks.  */
333
        memset (&info, 0, sizeof (siginfo_t));
334
        /* We must pass the information about the data in a siginfo_t
335
           value.  */
336
        info.si_signo = timer->event.sigev_signo;
337
        info.si_code = SI_TIMER;
338
        info.si_pid = timer->creator_pid;
339
        info.si_uid = getuid ();
340
        info.si_value = timer->event.sigev_value;
341
 
342
        INLINE_SYSCALL (rt_sigqueueinfo, 3, info.si_pid, info.si_signo, &info);
343
      }
344
#else
345
      if (pthread_kill (self->captured, timer->event.sigev_signo) != 0)
346
        {
347
          if (pthread_kill (self->id, timer->event.sigev_signo) != 0)
348
            abort ();
349
        }
350
#endif
351
      break;
352
 
353
    case SIGEV_THREAD:
354
      timer->event.sigev_notify_function (timer->event.sigev_value);
355
      break;
356
 
357
    default:
358
      assert (! "unknown event");
359
      break;
360
    }
361
 
362
  pthread_mutex_lock (&__timer_mutex);
363
 
364
  self->current_timer = 0;
365
 
366
  pthread_cond_broadcast (&self->cond);
367
}
368
 
369
 
370
/* Thread function; executed by each timer thread. The job of this
371
   function is to wait on the thread's timer queue and expire the
372
   timers in chronological order as close to their scheduled time as
373
   possible.  */
374
static void *
375
__attribute__ ((noreturn))
376
thread_func (void *arg)
377
{
378
  struct thread_node *self = arg;
379
 
380
  /* Register cleanup handler, in case rogue application terminates
381
     this thread.  (This cannot happen to __timer_signal_thread, which
382
     doesn't invoke application callbacks). */
383
 
384
  pthread_cleanup_push (thread_cleanup, self);
385
 
386
  pthread_mutex_lock (&__timer_mutex);
387
 
388
  while (1)
389
    {
390
      struct list_links *first;
391
      struct timer_node *timer = NULL;
392
 
393
      /* While the timer queue is not empty, inspect the first node.  */
394
      first = list_first (&self->timer_queue);
395
      if (first != list_null (&self->timer_queue))
396
        {
397
          struct timespec now;
398
 
399
          timer = timer_links2ptr (first);
400
 
401
          /* This assumes that the elements of the list of one thread
402
             are all for the same clock.  */
403
          clock_gettime (timer->clock, &now);
404
 
405
          while (1)
406
            {
407
              /* If the timer is due or overdue, remove it from the queue.
408
                 If it's a periodic timer, re-compute its new time and
409
                 requeue it.  Either way, perform the timer expiry. */
410
              if (timespec_compare (&now, &timer->expirytime) < 0)
411
                break;
412
 
413
              list_unlink_ip (first);
414
 
415
              if (__builtin_expect (timer->value.it_interval.tv_sec, 0) != 0
416
                  || timer->value.it_interval.tv_nsec != 0)
417
                {
418
                  timespec_add (&timer->expirytime, &now,
419
                                &timer->value.it_interval);
420
                  __timer_thread_queue_timer (self, timer);
421
                }
422
 
423
              thread_expire_timer (self, timer);
424
 
425
              first = list_first (&self->timer_queue);
426
              if (first == list_null (&self->timer_queue))
427
                break;
428
 
429
              timer = timer_links2ptr (first);
430
            }
431
        }
432
 
433
      /* If the queue is not empty, wait until the expiry time of the
434
         first node.  Otherwise wait indefinitely.  Insertions at the
435
         head of the queue must wake up the thread by broadcasting
436
         this condition variable.  */
437
      if (timer != NULL)
438
        pthread_cond_timedwait (&self->cond, &__timer_mutex,
439
                                &timer->expirytime);
440
      else
441
        pthread_cond_wait (&self->cond, &__timer_mutex);
442
    }
443
  /* This macro will never be executed since the while loop loops
444
     forever - but we have to add it for proper nesting.  */
445
  pthread_cleanup_pop (1);
446
 
447
}
448
 
449
 
450
/* Enqueue a timer in wakeup order in the thread's timer queue.
451
   Returns 1 if the timer was inserted at the head of the queue,
452
   causing the queue's next wakeup time to change. */
453
 
454
int
455
__timer_thread_queue_timer (struct thread_node *thread,
456
                            struct timer_node *insert)
457
{
458
  struct list_links *iter;
459
  int athead = 1;
460
 
461
  for (iter = list_first (&thread->timer_queue);
462
       iter != list_null (&thread->timer_queue);
463
        iter = list_next (iter))
464
    {
465
      struct timer_node *timer = timer_links2ptr (iter);
466
 
467
      if (timespec_compare (&insert->expirytime, &timer->expirytime) < 0)
468
          break;
469
      athead = 0;
470
    }
471
 
472
  list_insbefore (iter, &insert->links);
473
  return athead;
474
}
475
 
476
 
477
/* Start a thread and associate it with the given thread node.  Global
478
   lock must be held by caller.  */
479
int
480
__timer_thread_start (struct thread_node *thread)
481
{
482
  int retval = 1;
483
 
484
  assert (!thread->exists);
485
  thread->exists = 1;
486
 
487
  if (pthread_create (&thread->id, &thread->attr, thread_func, thread) != 0)
488
    {
489
      thread->exists = 0;
490
      retval = -1;
491
    }
492
 
493
  return retval;
494
}
495
 
496
 
497
void
498
__timer_thread_wakeup (struct thread_node *thread)
499
{
500
  pthread_cond_broadcast (&thread->cond);
501
}
502
 
503
 
504
/* Compare two pthread_attr_t thread attributes for exact equality.
505
   Returns 1 if they are equal, otherwise zero if they are not equal or
506
   contain illegal values.  This version is LinuxThreads-specific for
507
   performance reason.  One could use the access functions to get the
508
   values of all the fields of the attribute structure.  */
509
static int
510
thread_attr_compare (const pthread_attr_t *left, const pthread_attr_t *right)
511
{
512
  return (left->__detachstate == right->__detachstate
513
          && left->__schedpolicy == right->__schedpolicy
514
          && (left->__schedparam.sched_priority
515
              == right->__schedparam.sched_priority)
516
          && left->__inheritsched == right->__inheritsched
517
          && left->__scope == right->__scope);
518
}
519
 
520
 
521
/* Search the list of active threads and find one which has matching
522
   attributes.  Global mutex lock must be held by caller.  */
523
struct thread_node *
524
__timer_thread_find_matching (const pthread_attr_t *desired_attr,
525
                              clockid_t desired_clock_id)
526
{
527
  struct list_links *iter = list_first (&thread_active_list);
528
 
529
  while (iter != list_null (&thread_active_list))
530
    {
531
      struct thread_node *candidate = thread_links2ptr (iter);
532
 
533
      if (thread_attr_compare (desired_attr, &candidate->attr)
534
          && desired_clock_id == candidate->clock_id)
535
        {
536
          list_unlink (iter);
537
          return candidate;
538
        }
539
 
540
      iter = list_next (iter);
541
    }
542
 
543
  return NULL;
544
}
545
 
546
 
547
/* Grab a free timer structure from the global free list.  The global
548
   lock must be held by the caller.  */
549
struct timer_node *
550
__timer_alloc (void)
551
{
552
  struct list_links *node = list_first (&timer_free_list);
553
 
554
  if (node != list_null (&timer_free_list))
555
    {
556
      struct timer_node *timer = timer_links2ptr (node);
557
      list_unlink_ip (node);
558
      timer->inuse = TIMER_INUSE;
559
      timer->refcount = 1;
560
      return timer;
561
    }
562
 
563
  return NULL;
564
}
565
 
566
 
567
/* Return a timer structure to the global free list.  The global lock
568
   must be held by the caller.  */
569
void
570
__timer_dealloc (struct timer_node *timer)
571
{
572
  assert (timer->refcount == 0);
573
  timer->thread = NULL; /* Break association between timer and thread.  */
574
  timer->inuse = TIMER_FREE;
575
  list_append (&timer_free_list, &timer->links);
576
}
577
 
578
 
579
/* Thread cancellation handler which unlocks a mutex.  */
580
void
581
__timer_mutex_cancel_handler (void *arg)
582
{
583
  pthread_mutex_unlock (arg);
584
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.