OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [ecos-2.0/] [packages/] [compat/] [posix/] [v2_0/] [src/] [pthread.cxx] - Blame information for rev 27

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 27 unneback
//==========================================================================
2
//
3
//      pthread.cxx
4
//
5
//      POSIX pthreads implementation
6
//
7
//==========================================================================
8
//####ECOSGPLCOPYRIGHTBEGIN####
9
// -------------------------------------------
10
// This file is part of eCos, the Embedded Configurable Operating System.
11
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
12
//
13
// eCos is free software; you can redistribute it and/or modify it under
14
// the terms of the GNU General Public License as published by the Free
15
// Software Foundation; either version 2 or (at your option) any later version.
16
//
17
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY
18
// WARRANTY; without even the implied warranty of MERCHANTABILITY or
19
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20
// for more details.
21
//
22
// You should have received a copy of the GNU General Public License along
23
// with eCos; if not, write to the Free Software Foundation, Inc.,
24
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25
//
26
// As a special exception, if other files instantiate templates or use macros
27
// or inline functions from this file, or you compile this file and link it
28
// with other works to produce a work based on this file, this file does not
29
// by itself cause the resulting work to be covered by the GNU General Public
30
// License. However the source code for this file must still be made available
31
// in accordance with section (3) of the GNU General Public License.
32
//
33
// This exception does not invalidate any other reasons why a work based on
34
// this file might be covered by the GNU General Public License.
35
//
36
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
37
// at http://sources.redhat.com/ecos/ecos-license/
38
// -------------------------------------------
39
//####ECOSGPLCOPYRIGHTEND####
40
//==========================================================================
41
//#####DESCRIPTIONBEGIN####
42
//
43
// Author(s):           nickg
44
// Contributors:        nickg, jlarmour
45
// Date:                2000-03-27
46
// Purpose:             POSIX pthread implementation
47
// Description:         This file contains the implementation of the POSIX pthread
48
//                      functions.
49
//              
50
//              
51
//
52
//####DESCRIPTIONEND####
53
//
54
//==========================================================================
55
 
56
#include <pkgconf/hal.h>
57
#include <pkgconf/kernel.h>
58
#include <pkgconf/posix.h>
59
#include <pkgconf/isoinfra.h>
60
#include <pkgconf/libc_startup.h>
61
 
62
#include <cyg/kernel/ktypes.h>         // base kernel types
63
#include <cyg/infra/cyg_trac.h>        // tracing macros
64
#include <cyg/infra/cyg_ass.h>         // assertion macros
65
 
66
#include "pprivate.h"                   // POSIX private header
67
 
68
#include <stdlib.h>                     // malloc(), free()
69
 
70
#include <cyg/kernel/sched.hxx>        // scheduler definitions
71
#include <cyg/kernel/thread.hxx>       // thread definitions
72
#include <cyg/kernel/clock.hxx>        // clock definitions
73
 
74
#include <cyg/kernel/sched.inl>        // scheduler inlines
75
 
76
//-----------------------------------------------------------------------------
77
// First check that the configuration contains the elements we need
78
 
79
#ifndef CYGPKG_KERNEL
80
#error POSIX pthread need eCos kernel
81
#endif
82
 
83
#ifndef CYGSEM_KERNEL_SCHED_MLQUEUE
84
#error POSIX pthreads need MLQ scheduler
85
#endif
86
 
87
#ifndef CYGSEM_KERNEL_SCHED_TIMESLICE
88
#error POSIX pthreads need timeslicing
89
#endif
90
 
91
#ifndef CYGVAR_KERNEL_THREADS_DATA
92
#error POSIX pthreads need per-thread data
93
#endif
94
 
95
//=============================================================================
96
// Internal data structures
97
 
98
// Mutex for controlling access to shared data structures
99
Cyg_Mutex pthread_mutex CYGBLD_POSIX_INIT;
100
 
101
// Array of pthread control structures. A pthread_t object is
102
// "just" an index into this array.
103
static pthread_info *thread_table[CYGNUM_POSIX_PTHREAD_THREADS_MAX];
104
 
105
// Count of number of threads in table.
106
static int pthread_count = 0;
107
 
108
// Count of number of threads that have exited and not been reaped.
109
static int pthreads_exited;
110
 
111
// Count of number of threads that are waiting to be joined
112
static int pthreads_tobejoined;
113
 
114
// Per-thread key allocation. This key map has a 1 bit set for each
115
// key that is free, zero if it is allocated.
116
#define KEY_MAP_TYPE cyg_uint32
117
#define KEY_MAP_TYPE_SIZE (sizeof(KEY_MAP_TYPE)*8) // in BITS!
118
static KEY_MAP_TYPE thread_key[PTHREAD_KEYS_MAX/KEY_MAP_TYPE_SIZE];
119
static void (*key_destructor[PTHREAD_KEYS_MAX]) (void *);
120
 
121
// Index of next pthread_info to allocate from thread_table array.
122
static int thread_info_next = 0;
123
 
124
// This is used to make pthread_t values unique even when reusing
125
// a table slot. This allows CYGNUM_POSIX_PTHREAD_THREADS_MAX to range
126
// up to 1024.
127
#define THREAD_ID_COOKIE_INC 0x00000400
128
#define THREAD_ID_COOKIE_MASK (THREAD_ID_COOKIE_INC-1)
129
static pthread_t thread_id_cookie = THREAD_ID_COOKIE_INC;
130
 
131
//-----------------------------------------------------------------------------
132
// Main thread.
133
 
134
#define MAIN_DEFAULT_STACK_SIZE \
135
  (CYGNUM_LIBC_MAIN_DEFAULT_STACK_SIZE < PTHREAD_STACK_MIN \
136
              ? PTHREAD_STACK_MIN : CYGNUM_LIBC_MAIN_DEFAULT_STACK_SIZE)
137
 
138
static char main_stack[MAIN_DEFAULT_STACK_SIZE];
139
 
140
// Thread ID of main thread.
141
static pthread_t main_thread;
142
 
143
//=============================================================================
144
// Exported variables
145
 
146
int pthread_canceled_dummy_var;           // pointed to by PTHREAD_CANCELED
147
 
148
//=============================================================================
149
// Internal functions
150
 
151
//-----------------------------------------------------------------------------
152
// Private version of pthread_self() that returns a pointer to our internal
153
// control structure.
154
 
155
pthread_info *pthread_self_info(void)
156
{
157
    Cyg_Thread *thread = Cyg_Thread::self();
158
 
159
    CYG_CHECK_DATA_PTR(thread, "Illegal current thread");
160
 
161
    pthread_info *info = (pthread_info *)thread->get_data(CYGNUM_KERNEL_THREADS_DATA_POSIX);
162
 
163
//    CYG_CHECK_DATA_PTR(info, "Not a POSIX thread!!!");
164
 
165
    return info;
166
}
167
 
168
externC pthread_info *pthread_info_id( pthread_t id )
169
{
170
    pthread_t index = id & THREAD_ID_COOKIE_MASK;
171
 
172
    pthread_info *info = thread_table[index];
173
 
174
    // Check for a valid entry
175
    if( info == NULL )
176
        return NULL;
177
 
178
    // Check that this is a valid entry
179
    if ( info->state == PTHREAD_STATE_FREE ||
180
         info->state == PTHREAD_STATE_EXITED )
181
        return NULL;
182
 
183
    // Check that the entry matches the id
184
    if( info->id != id ) return NULL;
185
 
186
    // Return the pointer
187
    return info;
188
}
189
 
190
//-----------------------------------------------------------------------------
191
// new operator to allow us to invoke the Cyg_Thread constructor on the
192
// pthread_info.thread_obj array.
193
 
194
inline void *operator new(size_t size,  cyg_uint8 *ptr) { return (void *)ptr; };
195
 
196
//-----------------------------------------------------------------------------
197
// Optional memory allocation functions for pthread stacks.
198
// If there is an implementation of malloc() available, define pthread_malloc()
199
// and pthread_free() to use it. Otherwise define them to do nothing.
200
// In the future we may want to add configuration here to permit thread stacks
201
// to be allocated in a nominated memory pool separate from the standard malloc()
202
// pool. Hence the (currently redundant) encapsulation of these functions.
203
 
204
#if CYGINT_ISO_MALLOC
205
 
206
static __inline__ CYG_ADDRWORD pthread_malloc( CYG_ADDRWORD size )
207
{
208
    return (CYG_ADDRWORD)malloc( size );
209
}
210
 
211
static __inline__ void pthread_free( CYG_ADDRWORD m )
212
{
213
    free( (void *)m );
214
}
215
 
216
#define PTHREAD_MALLOC
217
 
218
#else
219
 
220
#define pthread_malloc(_x_) (0)
221
 
222
#define pthread_free(_x_)
223
 
224
#endif
225
 
226
//-----------------------------------------------------------------------------
227
// pthread entry function.
228
// does some housekeeping and then calls the user's start routine.
229
 
230
static void pthread_entry(CYG_ADDRWORD data)
231
{
232
    pthread_info *self = (pthread_info *)data;
233
 
234
    void *retval = self->start_routine(self->start_arg);
235
 
236
    pthread_exit( retval );
237
}
238
 
239
//-----------------------------------------------------------------------------
240
// Main entry function.
241
// This is set as the start_routine of the main thread. It invokes main()
242
// and if it returns, shuts down the system.
243
 
244
externC void cyg_libc_invoke_main( void );
245
 
246
static void *call_main( void * )
247
{
248
    cyg_libc_invoke_main();
249
    return NULL; // placate compiler
250
}
251
 
252
//-----------------------------------------------------------------------------
253
// Check whether there is a cancel pending and if so, whether
254
// cancellations are enabled. We do it in this order to reduce the
255
// number of tests in the common case - when no cancellations are
256
// pending.
257
// We make this inline so it can be called directly below for speed
258
 
259
static __inline__ int
260
checkforcancel( void )
261
{
262
     pthread_info *self = pthread_self_info();
263
 
264
    if( self != NULL &&
265
        self->cancelpending &&
266
        self->cancelstate == PTHREAD_CANCEL_ENABLE )
267
        return 1;
268
    else
269
        return 0;
270
}
271
 
272
 
273
//-----------------------------------------------------------------------------
274
// POSIX ASR
275
// This is installed as the ASR for all POSIX threads.
276
 
277
static void posix_asr( CYG_ADDRWORD data )
278
{
279
    pthread_info *self = (pthread_info *)data;
280
 
281
#ifdef CYGPKG_POSIX_TIMERS
282
    // Call into timer subsystem to deliver any pending
283
    // timer expirations.
284
    cyg_posix_timer_asr(self);
285
#endif
286
 
287
#ifdef CYGPKG_POSIX_SIGNALS
288
    // Call signal subsystem to deliver any signals
289
    cyg_posix_signal_asr(self);
290
#endif
291
 
292
    // Check for cancellation
293
    if( self->cancelpending &&
294
        self->cancelstate == PTHREAD_CANCEL_ENABLE &&
295
        self->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS )
296
    {
297
        // If we have a pending cancellation, cancellations are
298
        // enabled and we are in asynchronous mode, then we can do the
299
        // cancellation processing.  Since pthread_exit() does
300
        // everything we need to do, we just call that here.
301
 
302
        pthread_exit(PTHREAD_CANCELED);
303
    }
304
}
305
 
306
//-----------------------------------------------------------------------------
307
// The (Grim) Reaper.
308
// This function is called to tidy up and dispose of any threads that have
309
// exited. This work must be done from a thread other than the one exiting.
310
// Note: this function _must_ be called with pthread_mutex locked.
311
 
312
static void pthread_reap()
313
{
314
    int i;
315
 
316
    // Loop over the thread table looking for exited threads. The
317
    // pthreads_exited counter springs us out of this once we have
318
    // found them all (and keeps us out if there are none to do).
319
 
320
    for( i = 0; pthreads_exited && i < CYGNUM_POSIX_PTHREAD_THREADS_MAX ; i++ )
321
    {
322
        pthread_info *thread = thread_table[i];
323
 
324
        if( thread != NULL && thread->state == PTHREAD_STATE_EXITED )
325
        {
326
            // The thread has exited, so it is a candidate for being
327
            // reaped. We have to make sure that the eCos thread has
328
            // also reached EXITED state before we can tidy it up.
329
 
330
            while( thread->thread->get_state() != Cyg_Thread::EXITED )
331
            {
332
                // The eCos thread has not yet exited. This is
333
                // probably because its priority is too low to allow
334
                // it to complete.  We fix this here by raising its
335
                // priority to equal ours and then yielding. This
336
                // should eventually get it into exited state.
337
 
338
                Cyg_Thread *self = Cyg_Thread::self();
339
 
340
                // Set thread's priority to our current dispatching priority.
341
                thread->thread->set_priority( self->get_current_priority() );
342
 
343
                // Yield, yield
344
                self->yield();
345
 
346
                // and keep looping until he exits.
347
            }
348
 
349
            // At this point we have a thread that we can reap.
350
 
351
            // destroy the eCos thread
352
            thread->thread->~Cyg_Thread();
353
 
354
            // destroy the joiner condvar
355
            thread->joiner->~Cyg_Condition_Variable();
356
 
357
#ifdef CYGPKG_POSIX_SIGNALS
358
            // Destroy signal handling fields
359
            cyg_posix_thread_sigdestroy( thread );
360
#endif
361
 
362
            // Free the stack if we allocated it
363
            if( thread->freestack )
364
                pthread_free( thread->stackmem );
365
 
366
            // Finally, set the thread table entry to NULL so that it
367
            // may be reused.
368
            thread_table[i] = NULL;
369
 
370
            pthread_count--;
371
            pthreads_exited--;
372
        }
373
    }
374
}
375
 
376
//=============================================================================
377
// Functions exported to rest of POSIX subsystem.
378
 
379
//-----------------------------------------------------------------------------
380
// Create the main() thread.
381
 
382
externC void cyg_posix_pthread_start( void )
383
{
384
 
385
    // Initialize the per-thread data key map.
386
 
387
    for( cyg_ucount32 i = 0; i < (PTHREAD_KEYS_MAX/KEY_MAP_TYPE_SIZE); i++ )
388
    {
389
        thread_key[i] = ~0;
390
    }
391
 
392
    // Create the main thread
393
    pthread_attr_t attr;
394
    struct sched_param schedparam;
395
 
396
    schedparam.sched_priority = CYGNUM_POSIX_MAIN_DEFAULT_PRIORITY;
397
 
398
    pthread_attr_init( &attr );
399
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
400
    pthread_attr_setstackaddr( &attr, &main_stack[sizeof(main_stack)] );
401
    pthread_attr_setstacksize( &attr, sizeof(main_stack) );
402
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
403
    pthread_attr_setschedparam( &attr, &schedparam );
404
 
405
    pthread_create( &main_thread, &attr, call_main, NULL );
406
}
407
 
408
#ifdef CYGPKG_POSIX_SIGNALS
409
//-----------------------------------------------------------------------------
410
// Look for a thread that can accept delivery of any of the signals in
411
// the mask and release it from any wait it is in.  Since this may be
412
// called from a DSR, it cannot use any locks internally - any locking
413
// should be done before the call.
414
 
415
externC void cyg_posix_pthread_release_thread( sigset_t *mask )
416
{
417
    int i;
418
    int count = pthread_count;
419
 
420
    // Loop over the thread table looking for a thread that has a
421
    // signal mask that does not mask all the signals in mask.
422
    // FIXME: find a more efficient way of doing this.
423
 
424
    for( i = 0; count > 0 && i < CYGNUM_POSIX_PTHREAD_THREADS_MAX ; i++ )
425
    {
426
        pthread_info *thread = thread_table[i];
427
 
428
        if( (thread != NULL) &&
429
            (thread->state <= PTHREAD_STATE_RUNNING) &&
430
            ((*mask & ~thread->sigmask) != 0) )
431
        {
432
            // This thread can service at least one of the signals in
433
            // *mask. Knock it out of its wait and make its ASR pending.
434
 
435
            thread->thread->set_asr_pending();
436
            thread->thread->release();
437
            break;
438
        }
439
 
440
        // Decrement count for each valid thread we find.
441
        if( thread != NULL && thread->state != PTHREAD_STATE_FREE )
442
            count--;
443
    }
444
}
445
#endif
446
 
447
//=============================================================================
448
// General thread operations
449
 
450
//-----------------------------------------------------------------------------
451
// Thread creation and management.
452
 
453
// Create a thread.
454
externC int pthread_create ( pthread_t *thread,
455
                             const pthread_attr_t *attr,
456
                             void *(*start_routine) (void *),
457
                             void *arg)
458
{
459
    PTHREAD_ENTRY();
460
 
461
    PTHREAD_CHECK(thread);
462
    PTHREAD_CHECK(start_routine);
463
 
464
    pthread_info *self = pthread_self_info();
465
 
466
    pthread_attr_t use_attr;
467
 
468
    // Set use_attr to the set of attributes we are going to
469
    // actually use. Either those passed in, or the default set.
470
 
471
    if( attr == NULL )
472
        pthread_attr_init( &use_attr );
473
    else use_attr = *attr;
474
 
475
    // Adjust the attributes to cope with the setting of inheritsched.
476
 
477
    if( use_attr.inheritsched == PTHREAD_INHERIT_SCHED )
478
    {
479
        use_attr.schedpolicy = self->attr.schedpolicy;
480
        use_attr.schedparam  = self->attr.schedparam;
481
    }
482
 
483
    CYG_ADDRWORD stackbase, stacksize;
484
    cyg_bool freestack = false;
485
    CYG_ADDRWORD stackmem = 0;
486
 
487
    // If the stack size is not valid, we can assume that it is at
488
    // least PTHREAD_STACK_MIN bytes.
489
 
490
    if( use_attr.stacksize_valid )
491
        stacksize = use_attr.stacksize;
492
    else stacksize = PTHREAD_STACK_MIN;
493
 
494
    if( use_attr.stackaddr_valid )
495
    {
496
        // Set up stack base and size from supplied arguments.
497
 
498
        // Calculate stack base from address and size.
499
        // FIXME: Falling stack assumed in pthread_create().
500
        stackmem = stackbase = (CYG_ADDRWORD)use_attr.stackaddr-stacksize;
501
    }
502
    else
503
    {
504
#ifdef PTHREAD_MALLOC
505
 
506
        stackmem = stackbase = pthread_malloc( stacksize );
507
 
508
        if( stackmem == 0 )
509
            PTHREAD_RETURN( EAGAIN );
510
 
511
        freestack = true;
512
#else        
513
        PTHREAD_RETURN(EINVAL);
514
#endif        
515
 
516
    }
517
 
518
    // Get sole access to data structures
519
 
520
    pthread_mutex.lock();
521
 
522
    // Dispose of any dead threads
523
    pthread_reap();
524
 
525
    // Find a free slot in the thread table
526
 
527
    pthread_info *nthread;
528
    int thread_next = thread_info_next;
529
 
530
    while( thread_table[thread_next] != NULL )
531
    {
532
        thread_next++;
533
        if( thread_next >= CYGNUM_POSIX_PTHREAD_THREADS_MAX )
534
            thread_next = 0;
535
 
536
        // check for wrap, and return error if no slots left
537
        if( thread_next == thread_info_next )
538
        {
539
            pthread_mutex.unlock();
540
            if( freestack )
541
                pthread_free( stackmem );
542
            PTHREAD_RETURN(ENOMEM);
543
        }
544
    }
545
 
546
    nthread = (pthread_info *)stackbase;
547
 
548
    stackbase += sizeof(pthread_info);
549
    stacksize -= sizeof(pthread_info);
550
 
551
    thread_table[thread_next] = nthread;
552
 
553
    // Set new next index
554
    thread_info_next = thread_next;
555
 
556
    // step the cookie
557
    thread_id_cookie += THREAD_ID_COOKIE_INC;
558
 
559
    // Initialize the table entry
560
    nthread->state              = use_attr.detachstate == PTHREAD_CREATE_JOINABLE ?
561
                                  PTHREAD_STATE_RUNNING : PTHREAD_STATE_DETACHED;
562
    nthread->id                 = thread_next+thread_id_cookie;
563
    nthread->attr               = use_attr;
564
    nthread->retval             = 0;
565
    nthread->start_routine      = start_routine;
566
    nthread->start_arg          = arg;
567
 
568
    nthread->freestack          = freestack;
569
    nthread->stackmem           = stackmem;
570
 
571
    nthread->cancelstate        = PTHREAD_CANCEL_ENABLE;
572
    nthread->canceltype         = PTHREAD_CANCEL_DEFERRED;
573
    nthread->cancelbuffer       = NULL;
574
    nthread->cancelpending      = false;
575
 
576
    nthread->thread_data        = NULL;
577
 
578
#ifdef CYGVAR_KERNEL_THREADS_NAME    
579
    // generate a name for this thread
580
 
581
    char *name = nthread->name;
582
    static char *name_template = "pthread.00000000";
583
    pthread_t id = nthread->id;
584
 
585
    for( int i = 0; name_template[i]; i++ ) name[i] = name_template[i];
586
 
587
    // dump the id, in hex into the name.
588
    for( int i = 15; i >= 8; i-- )
589
    {
590
        name[i] = "0123456789ABCDEF"[id&0xF];
591
        id >>= 4;
592
    }
593
 
594
#endif
595
 
596
    // Initialize the joiner condition variable
597
 
598
    nthread->joiner = new(nthread->joiner_obj) Cyg_Condition_Variable( pthread_mutex );
599
 
600
#ifdef CYGPKG_POSIX_SIGNALS
601
    // Initialize signal specific fields.
602
    cyg_posix_thread_siginit( nthread, self );
603
#endif
604
 
605
    // create the underlying eCos thread
606
 
607
    nthread->thread = new(&nthread->thread_obj[0])
608
        Cyg_Thread ( PTHREAD_ECOS_PRIORITY(use_attr.schedparam.sched_priority),
609
                     pthread_entry,
610
                     (CYG_ADDRWORD)nthread,
611
                     name,
612
                     stackbase,
613
                     stacksize);
614
 
615
    // Put pointer to pthread_info into eCos thread's per-thread data.
616
    nthread->thread->set_data( CYGNUM_KERNEL_THREADS_DATA_POSIX, (CYG_ADDRWORD)nthread );
617
 
618
    // Set timeslice enable according to scheduling policy.
619
    if( use_attr.schedpolicy == SCHED_FIFO )
620
         nthread->thread->timeslice_disable();
621
    else nthread->thread->timeslice_enable();
622
 
623
    // set up ASR and data
624
    nthread->thread->set_asr( posix_asr, (CYG_ADDRWORD)nthread, NULL, NULL );
625
 
626
    // return thread ID
627
    *thread = nthread->id;
628
 
629
    pthread_count++;
630
 
631
    pthread_mutex.unlock();
632
 
633
    // finally, set the thread going
634
    nthread->thread->resume();
635
 
636
    PTHREAD_RETURN(0);
637
}
638
 
639
//-----------------------------------------------------------------------------
640
// Get current thread id.
641
 
642
externC pthread_t pthread_self ( void )
643
{
644
    PTHREAD_ENTRY();
645
 
646
    pthread_info *info = pthread_self_info();
647
 
648
    CYG_CHECK_DATA_PTR(info, "Not a POSIX thread!!!");
649
 
650
    return info->id;
651
}
652
 
653
//-----------------------------------------------------------------------------
654
// Compare two thread identifiers.
655
 
656
externC int pthread_equal (pthread_t thread1, pthread_t thread2)
657
{
658
    PTHREAD_ENTRY();
659
 
660
    return thread1 == thread2;
661
}
662
 
663
//-----------------------------------------------------------------------------
664
// Terminate current thread.
665
 
666
externC void exit(int) CYGBLD_ATTRIB_NORET;
667
 
668
externC void pthread_exit (void *retval)
669
{
670
    PTHREAD_ENTRY();
671
 
672
    pthread_info *self = pthread_self_info();
673
 
674
    // Call cancellation handlers. We eat up the buffers as we go in
675
    // case any of the routines calls pthread_exit() itself.
676
    while( self->cancelbuffer != NULL )
677
    {
678
        struct pthread_cleanup_buffer *buffer = self->cancelbuffer;
679
 
680
        self->cancelbuffer = buffer->prev;
681
 
682
        buffer->routine(buffer->arg);
683
    }
684
 
685
    if( self->thread_data != NULL )
686
    {
687
        // Call per-thread key destructors.
688
        // The specification of this is that we must continue to call the
689
        // destructor functions until all the per-thread data values are NULL or
690
        // we have done it PTHREAD_DESTRUCTOR_ITERATIONS times.
691
 
692
        cyg_bool destructors_called;
693
        int destructor_iterations = 0;
694
 
695
        do
696
        {
697
            destructors_called = false;
698
 
699
            for( cyg_ucount32 key = 0; key < PTHREAD_KEYS_MAX; key++ )
700
            {
701
                // Skip unallocated keys
702
                if( thread_key[key/KEY_MAP_TYPE_SIZE] & 1<<(key%KEY_MAP_TYPE_SIZE) )
703
                    continue;
704
 
705
                // Skip NULL destructors
706
                if( key_destructor[key] == NULL ) continue;
707
 
708
                // Skip NULL data values
709
                if( self->thread_data[key] == NULL ) continue;
710
 
711
                // If it passes all that, call the destructor.
712
                // Note that NULLing the data value here is new
713
                // behaviour in the 2001 POSIX standard.
714
                {
715
                    void* value = self->thread_data[key];
716
                    self->thread_data[key] = NULL;
717
                    key_destructor[key](value);
718
                }
719
 
720
                // Record that we called a destructor
721
                destructors_called = true;
722
            }
723
 
724
            // Count the iteration
725
            destructor_iterations++;
726
 
727
        } while( destructors_called &&
728
                 (destructor_iterations <= PTHREAD_DESTRUCTOR_ITERATIONS));
729
 
730
    }
731
 
732
    pthread_mutex.lock();
733
 
734
    // Set the retval for any joiner
735
    self->retval = retval;
736
 
737
    // If we are already detached, go to EXITED state, otherwise
738
    // go into JOIN state.
739
 
740
    if ( PTHREAD_STATE_DETACHED == self->state ) {
741
        self->state = PTHREAD_STATE_EXITED;
742
        pthreads_exited++;
743
    } else {
744
        self->state = PTHREAD_STATE_JOIN;
745
        pthreads_tobejoined++;
746
    }
747
 
748
    // Kick any waiting joiners
749
    self->joiner->broadcast();
750
 
751
    cyg_bool call_exit=false;
752
 
753
    // if this is the last thread (other than threads waiting to be joined)
754
    // then we need to call exit() later
755
    if ( pthreads_exited + pthreads_tobejoined == pthread_count )
756
        call_exit=true;
757
 
758
    pthread_mutex.unlock();
759
 
760
    // Finally, call the exit function; this will not return.
761
    if ( call_exit )
762
        ::exit(0);
763
    else
764
        self->thread->exit();
765
 
766
    // This loop keeps some compilers happy. pthread_exit() is marked
767
    // with the noreturn attribute, and without this they generate a
768
    // call to abort() here in case Cyg_Thread::exit() returns. 
769
 
770
    for(;;) continue;
771
}
772
 
773
//-----------------------------------------------------------------------------
774
// Wait for the thread to terminate. If thread_return is not NULL then
775
// the retval from the thread's call to pthread_exit() is stored at
776
// *thread_return.
777
 
778
externC int pthread_join (pthread_t thread, void **thread_return)
779
{
780
    int err = 0;
781
 
782
    PTHREAD_ENTRY();
783
 
784
    // check for cancellation first.
785
    pthread_testcancel();
786
 
787
    pthread_mutex.lock();
788
 
789
    // Dispose of any dead threads
790
    pthread_reap();
791
 
792
    pthread_info *self = pthread_self_info();
793
    pthread_info *joinee = pthread_info_id( thread );
794
 
795
    if( joinee == NULL )
796
    {
797
        err = ESRCH;
798
    }
799
 
800
    if( !err && joinee == self )
801
    {
802
        err = EDEADLK;
803
    }
804
 
805
    if ( !err ) {
806
        switch ( joinee->state )
807
        {
808
        case PTHREAD_STATE_RUNNING:
809
            // The thread is still running, we must wait for it.
810
        while( joinee->state == PTHREAD_STATE_RUNNING ) {
811
            if ( !joinee->joiner->wait() )
812
                // check if we were woken because we were being cancelled
813
                if ( checkforcancel() ) {
814
                    err = EAGAIN;  // value unimportant, just some error
815
                    break;
816
                }
817
        }
818
 
819
        // check that the thread is still joinable
820
        if( joinee->state == PTHREAD_STATE_JOIN )
821
            break;
822
 
823
        // The thread has become unjoinable while we waited, so we
824
        // fall through to complain.
825
 
826
        case PTHREAD_STATE_FREE:
827
        case PTHREAD_STATE_DETACHED:
828
        case PTHREAD_STATE_EXITED:
829
        // None of these may be joined.
830
            err = EINVAL;
831
            break;
832
 
833
        case PTHREAD_STATE_JOIN:
834
            break;
835
        }
836
    }
837
 
838
    if ( !err ) {
839
 
840
        // here, we know that joinee is a thread that has exited and is
841
        // ready to be joined.
842
 
843
        // Get the retval
844
        if( thread_return != NULL )
845
            *thread_return = joinee->retval;
846
 
847
        // set state to exited.
848
        joinee->state = PTHREAD_STATE_EXITED;
849
        pthreads_exited++;
850
        pthreads_tobejoined--;
851
 
852
        // Dispose of any dead threads
853
        pthread_reap();
854
    }
855
 
856
    pthread_mutex.unlock();
857
 
858
    // check for cancellation before returning
859
    pthread_testcancel();
860
 
861
    PTHREAD_RETURN(err);
862
}
863
 
864
//-----------------------------------------------------------------------------
865
// Set the detachstate of the thread to "detached". The thread then does not
866
// need to be joined and its resources will be freed when it exits.
867
 
868
externC int pthread_detach (pthread_t thread)
869
{
870
    PTHREAD_ENTRY();
871
 
872
    int ret = 0;
873
 
874
    pthread_mutex.lock();
875
 
876
    pthread_info *detachee = pthread_info_id( thread );
877
 
878
    if( detachee == NULL )
879
        ret = ESRCH;                    // No such thread
880
    else if( detachee->state == PTHREAD_STATE_DETACHED )
881
        ret = EINVAL;                   // Already detached!
882
    else
883
    {
884
        // Set state to detached and kick any joinees to
885
        // make them return.
886
        detachee->state = PTHREAD_STATE_DETACHED;
887
        detachee->joiner->broadcast();
888
    }
889
 
890
    // Dispose of any dead threads
891
    pthread_reap();
892
 
893
    pthread_mutex.unlock();
894
 
895
    PTHREAD_RETURN(ret);
896
}
897
 
898
 
899
//-----------------------------------------------------------------------------
900
// Thread attribute handling.
901
 
902
//-----------------------------------------------------------------------------
903
// Initialize attributes object with default attributes:
904
// detachstate          == PTHREAD_CREATE_JOINABLE
905
// scope                == PTHREAD_SCOPE_SYSTEM
906
// inheritsched         == PTHREAD_INHERIT_SCHED
907
// schedpolicy          == SCHED_OTHER
908
// schedparam           == unset
909
// stackaddr            == unset
910
// stacksize            == 0
911
// 
912
 
913
externC int pthread_attr_init (pthread_attr_t *attr)
914
{
915
    PTHREAD_ENTRY();
916
 
917
    PTHREAD_CHECK(attr);
918
 
919
    attr->detachstate                 = PTHREAD_CREATE_JOINABLE;
920
    attr->scope                       = PTHREAD_SCOPE_SYSTEM;
921
    attr->inheritsched                = PTHREAD_INHERIT_SCHED;
922
    attr->schedpolicy                 = SCHED_OTHER;
923
    attr->schedparam.sched_priority   = 0;
924
    attr->stackaddr_valid             = 0;
925
    attr->stackaddr                   = NULL;
926
    attr->stacksize_valid             = 0;
927
    attr->stacksize                   = 0;
928
 
929
    PTHREAD_RETURN(0);
930
}
931
 
932
//-----------------------------------------------------------------------------
933
// Destroy thread attributes object
934
 
935
externC int pthread_attr_destroy (pthread_attr_t *attr)
936
{
937
    PTHREAD_ENTRY();
938
 
939
    PTHREAD_CHECK(attr);
940
 
941
    // Nothing to do here...
942
 
943
    PTHREAD_RETURN(0);
944
}
945
 
946
//-----------------------------------------------------------------------------
947
// Set the detachstate attribute
948
 
949
externC int pthread_attr_setdetachstate (pthread_attr_t *attr,
950
                                         int detachstate)
951
{
952
    PTHREAD_ENTRY();
953
 
954
    PTHREAD_CHECK(attr);
955
 
956
    if( detachstate == PTHREAD_CREATE_JOINABLE ||
957
        detachstate == PTHREAD_CREATE_DETACHED )
958
    {
959
        attr->detachstate = detachstate;
960
        PTHREAD_RETURN(0);
961
    }
962
 
963
    PTHREAD_RETURN(EINVAL);
964
}
965
 
966
//-----------------------------------------------------------------------------
967
// Get the detachstate attribute
968
externC int pthread_attr_getdetachstate (const pthread_attr_t *attr,
969
                                         int *detachstate)
970
{
971
    PTHREAD_ENTRY();
972
 
973
    PTHREAD_CHECK(attr);
974
 
975
    if( detachstate != NULL )
976
        *detachstate = attr->detachstate;
977
 
978
    PTHREAD_RETURN(0);
979
}
980
 
981
//-----------------------------------------------------------------------------
982
// Set scheduling contention scope
983
 
984
externC int pthread_attr_setscope (pthread_attr_t *attr, int scope)
985
{
986
    PTHREAD_ENTRY();
987
 
988
    PTHREAD_CHECK(attr);
989
 
990
    if( scope == PTHREAD_SCOPE_SYSTEM ||
991
        scope == PTHREAD_SCOPE_PROCESS )
992
    {
993
        if( scope == PTHREAD_SCOPE_PROCESS )
994
            PTHREAD_RETURN(ENOTSUP);
995
 
996
        attr->scope = scope;
997
 
998
        PTHREAD_RETURN(0);
999
    }
1000
 
1001
    PTHREAD_RETURN(EINVAL);
1002
}
1003
 
1004
//-----------------------------------------------------------------------------
1005
// Get scheduling contention scope
1006
 
1007
externC int pthread_attr_getscope (const pthread_attr_t *attr, int *scope)
1008
{
1009
    PTHREAD_ENTRY();
1010
 
1011
    PTHREAD_CHECK(attr);
1012
 
1013
    if( scope != NULL )
1014
        *scope = attr->scope;
1015
 
1016
    PTHREAD_RETURN(0);
1017
}
1018
 
1019
//-----------------------------------------------------------------------------
1020
// Set scheduling inheritance attribute
1021
 
1022
externC int pthread_attr_setinheritsched (pthread_attr_t *attr, int inherit)
1023
{
1024
    PTHREAD_ENTRY();
1025
 
1026
    PTHREAD_CHECK(attr);
1027
 
1028
    if( inherit == PTHREAD_INHERIT_SCHED ||
1029
        inherit == PTHREAD_EXPLICIT_SCHED )
1030
    {
1031
        attr->inheritsched = inherit;
1032
 
1033
        PTHREAD_RETURN(0);
1034
    }
1035
 
1036
    PTHREAD_RETURN(EINVAL);
1037
}
1038
 
1039
//-----------------------------------------------------------------------------
1040
// Get scheduling inheritance attribute
1041
 
1042
externC int pthread_attr_getinheritsched (const pthread_attr_t *attr,
1043
                                          int *inherit)
1044
{
1045
    PTHREAD_ENTRY();
1046
 
1047
    PTHREAD_CHECK(attr);
1048
 
1049
    if( inherit != NULL )
1050
        *inherit = attr->inheritsched;
1051
 
1052
    PTHREAD_RETURN(0);
1053
}
1054
 
1055
//-----------------------------------------------------------------------------
1056
// Set scheduling policy
1057
 
1058
externC int pthread_attr_setschedpolicy (pthread_attr_t *attr, int policy)
1059
{
1060
    PTHREAD_ENTRY();
1061
 
1062
    PTHREAD_CHECK(attr);
1063
 
1064
    if( policy == SCHED_OTHER ||
1065
        policy == SCHED_FIFO ||
1066
        policy == SCHED_RR )
1067
    {
1068
        attr->schedpolicy = policy;
1069
 
1070
        PTHREAD_RETURN(0);
1071
    }
1072
 
1073
    PTHREAD_RETURN(EINVAL);
1074
}
1075
 
1076
//-----------------------------------------------------------------------------
1077
// Get scheduling policy
1078
 
1079
externC int pthread_attr_getschedpolicy (const pthread_attr_t *attr,
1080
                                         int *policy)
1081
{
1082
    PTHREAD_ENTRY();
1083
 
1084
    PTHREAD_CHECK(attr);
1085
 
1086
    if( policy != NULL )
1087
        *policy = attr->schedpolicy;
1088
 
1089
    PTHREAD_RETURN(0);
1090
}
1091
 
1092
//-----------------------------------------------------------------------------
1093
// Set scheduling parameters
1094
externC int pthread_attr_setschedparam (pthread_attr_t *attr,
1095
                                        const struct sched_param *param)
1096
{
1097
    PTHREAD_ENTRY();
1098
 
1099
    PTHREAD_CHECK(attr);
1100
    PTHREAD_CHECK(param);
1101
 
1102
    attr->schedparam = *param;
1103
 
1104
    PTHREAD_RETURN(0);
1105
}
1106
 
1107
//-----------------------------------------------------------------------------
1108
// Get scheduling parameters
1109
 
1110
externC int pthread_attr_getschedparam (const pthread_attr_t *attr,
1111
                                        struct sched_param *param)
1112
{
1113
    PTHREAD_ENTRY();
1114
 
1115
    PTHREAD_CHECK(attr);
1116
 
1117
    if( param != NULL )
1118
        *param = attr->schedparam;
1119
 
1120
    PTHREAD_RETURN(0);
1121
}
1122
 
1123
//-----------------------------------------------------------------------------
1124
// Set starting address of stack. Whether this is at the start or end of
1125
// the memory block allocated for the stack depends on whether the stack
1126
// grows up or down.
1127
 
1128
externC int pthread_attr_setstackaddr (pthread_attr_t *attr, void *stackaddr)
1129
{
1130
    PTHREAD_ENTRY();
1131
 
1132
    PTHREAD_CHECK(attr);
1133
 
1134
    attr->stackaddr       = stackaddr;
1135
    attr->stackaddr_valid = 1;
1136
 
1137
    PTHREAD_RETURN(0);
1138
}
1139
 
1140
//-----------------------------------------------------------------------------
1141
// Get any previously set stack address.
1142
 
1143
externC int pthread_attr_getstackaddr (const pthread_attr_t *attr,
1144
                                       void **stackaddr)
1145
{
1146
    PTHREAD_ENTRY();
1147
 
1148
    PTHREAD_CHECK(attr);
1149
 
1150
    if( stackaddr != NULL )
1151
    {
1152
        if( attr->stackaddr_valid )
1153
        {
1154
            *stackaddr = attr->stackaddr;
1155
            PTHREAD_RETURN(0);
1156
        }
1157
        // Stack address not set, return EINVAL.
1158
        else PTHREAD_RETURN(EINVAL);
1159
    }
1160
 
1161
    PTHREAD_RETURN(0);
1162
}
1163
 
1164
 
1165
//-----------------------------------------------------------------------------
1166
// Set minimum creation stack size.
1167
 
1168
externC int pthread_attr_setstacksize (pthread_attr_t *attr,
1169
                                       size_t stacksize)
1170
{
1171
    PTHREAD_ENTRY();
1172
 
1173
    PTHREAD_CHECK(attr);
1174
 
1175
    CYG_ASSERT( stacksize >= PTHREAD_STACK_MIN, "Inadequate stack size supplied");
1176
 
1177
    // Reject inadequate stack sizes
1178
    if( stacksize < PTHREAD_STACK_MIN )
1179
        PTHREAD_RETURN(EINVAL);
1180
 
1181
    attr->stacksize_valid = 1;
1182
    attr->stacksize = stacksize;
1183
 
1184
    PTHREAD_RETURN(0);
1185
}
1186
 
1187
//-----------------------------------------------------------------------------
1188
// Get current minimal stack size.
1189
 
1190
externC int pthread_attr_getstacksize (const pthread_attr_t *attr,
1191
                                       size_t *stacksize)
1192
{
1193
    PTHREAD_ENTRY();
1194
 
1195
    PTHREAD_CHECK(attr);
1196
 
1197
    // Reject attempts to get a stack size when one has not been set.
1198
    if( !attr->stacksize_valid )
1199
        PTHREAD_RETURN(EINVAL);
1200
 
1201
    if( stacksize != NULL )
1202
        *stacksize = attr->stacksize;
1203
 
1204
    PTHREAD_RETURN(0);
1205
}
1206
 
1207
//-----------------------------------------------------------------------------
1208
// Thread scheduling controls
1209
 
1210
//-----------------------------------------------------------------------------
1211
// Set scheduling policy and parameters for the thread
1212
 
1213
externC int pthread_setschedparam (pthread_t thread_id,
1214
                                   int policy,
1215
                                   const struct sched_param *param)
1216
{
1217
    PTHREAD_ENTRY();
1218
 
1219
    if( policy != SCHED_OTHER &&
1220
        policy != SCHED_FIFO &&
1221
        policy != SCHED_RR )
1222
        PTHREAD_RETURN(EINVAL);
1223
 
1224
    PTHREAD_CHECK(param);
1225
 
1226
    // The parameters seem OK, change the thread...
1227
 
1228
    pthread_mutex.lock();
1229
 
1230
    pthread_info *thread = pthread_info_id( thread_id );
1231
 
1232
    if( thread == NULL )
1233
    {
1234
        pthread_mutex.unlock();
1235
        PTHREAD_RETURN(ESRCH);
1236
    }
1237
 
1238
    thread->attr.schedpolicy = policy;
1239
    thread->attr.schedparam = *param;
1240
 
1241
    if ( policy == SCHED_FIFO )
1242
         thread->thread->timeslice_disable();
1243
    else thread->thread->timeslice_enable();
1244
 
1245
    thread->thread->set_priority( PTHREAD_ECOS_PRIORITY( param->sched_priority ));
1246
 
1247
    pthread_mutex.unlock();
1248
 
1249
    PTHREAD_RETURN(0);
1250
}
1251
 
1252
//-----------------------------------------------------------------------------
1253
// Get scheduling policy and parameters for the thread
1254
 
1255
externC int pthread_getschedparam (pthread_t thread_id,
1256
                                   int *policy,
1257
                                   struct sched_param *param)
1258
{
1259
    PTHREAD_ENTRY();
1260
 
1261
    pthread_mutex.lock();
1262
 
1263
    pthread_info *thread = pthread_info_id( thread_id );
1264
 
1265
    if( thread == NULL )
1266
    {
1267
        pthread_mutex.unlock();
1268
        PTHREAD_RETURN(ESRCH);
1269
    }
1270
 
1271
    if( policy != NULL )
1272
        *policy = thread->attr.schedpolicy;
1273
 
1274
    if( param != NULL )
1275
        *param = thread->attr.schedparam;
1276
 
1277
    pthread_mutex.unlock();
1278
 
1279
    PTHREAD_RETURN(0);
1280
}
1281
 
1282
 
1283
//=============================================================================
1284
// Dynamic package initialization
1285
// Call init_routine just the once per control variable.
1286
 
1287
externC int pthread_once (pthread_once_t *once_control,
1288
                          void (*init_routine) (void))
1289
{
1290
    PTHREAD_ENTRY();
1291
 
1292
    PTHREAD_CHECK( once_control );
1293
    PTHREAD_CHECK( init_routine );
1294
 
1295
    pthread_once_t old;
1296
 
1297
    // Do a test and set on the once_control object.
1298
    pthread_mutex.lock();
1299
 
1300
    old = *once_control;
1301
    *once_control = 1;
1302
 
1303
    pthread_mutex.unlock();
1304
 
1305
    // If the once_control was zero, call the init_routine().
1306
    if( !old ) init_routine();
1307
 
1308
    PTHREAD_RETURN(0);
1309
}
1310
 
1311
 
1312
//=============================================================================
1313
//Thread specific data
1314
 
1315
//-----------------------------------------------------------------------------
1316
// Create a key to identify a location in the thread specific data area.
1317
// Each thread has its own distinct thread-specific data area but all are
1318
// addressed by the same keys. The destructor function is called whenever a
1319
// thread exits and the value associated with the key is non-NULL.
1320
 
1321
externC int pthread_key_create (pthread_key_t *key,
1322
                                void (*destructor) (void *))
1323
{
1324
    PTHREAD_ENTRY();
1325
 
1326
    pthread_key_t k = -1;
1327
 
1328
    pthread_mutex.lock();
1329
 
1330
    // Find a key to allocate
1331
    for( cyg_ucount32 i = 0; i < (PTHREAD_KEYS_MAX/KEY_MAP_TYPE_SIZE); i++ )
1332
    {
1333
        if( thread_key[i] != 0 )
1334
        {
1335
            // We have a table slot with space available
1336
 
1337
            // Get index of ls set bit.
1338
            HAL_LSBIT_INDEX( k, thread_key[i] );
1339
 
1340
            // clear it
1341
            thread_key[i] &= ~(1<<k);
1342
 
1343
            // Add index of word
1344
            k += i * KEY_MAP_TYPE_SIZE;
1345
 
1346
            // Install destructor
1347
            key_destructor[k] = destructor;
1348
 
1349
            // break out with key found
1350
            break;
1351
        }
1352
    }
1353
 
1354
    if( k != -1 )
1355
    {
1356
        // plant a NULL in all the valid thread data slots for this
1357
        // key in case we are reusing a key we used before.
1358
 
1359
        for( cyg_ucount32 i = 0; i < CYGNUM_POSIX_PTHREAD_THREADS_MAX ; i++ )
1360
        {
1361
            pthread_info *thread = thread_table[i];
1362
 
1363
            if( thread != NULL && thread->thread_data != NULL )
1364
                thread->thread_data[k] = NULL;
1365
        }
1366
    }
1367
 
1368
    pthread_mutex.unlock();
1369
 
1370
    if( k == -1 ) PTHREAD_RETURN(EAGAIN);
1371
 
1372
    *key = k;
1373
 
1374
    PTHREAD_RETURN(0);
1375
}
1376
 
1377
//-----------------------------------------------------------------------------
1378
// Delete key.
1379
 
1380
externC int pthread_key_delete (pthread_key_t key)
1381
{
1382
    PTHREAD_ENTRY();
1383
 
1384
    pthread_mutex.lock();
1385
 
1386
    // Set the key bit to 1 to indicate it is free.
1387
    thread_key[key/KEY_MAP_TYPE_SIZE] |= 1<<(key%(KEY_MAP_TYPE_SIZE));
1388
 
1389
    pthread_mutex.unlock();
1390
 
1391
    PTHREAD_RETURN(0);
1392
}
1393
 
1394
//-----------------------------------------------------------------------------
1395
// Store the pointer value in the thread-specific data slot addressed
1396
// by the key.
1397
 
1398
externC int pthread_setspecific (pthread_key_t key, const void *pointer)
1399
{
1400
    PTHREAD_ENTRY();
1401
 
1402
    if( thread_key[key/KEY_MAP_TYPE_SIZE] & 1<<(key%KEY_MAP_TYPE_SIZE) )
1403
        PTHREAD_RETURN(EINVAL);
1404
 
1405
    pthread_info *self = pthread_self_info();
1406
 
1407
    if( self->thread_data == NULL )
1408
    {
1409
        // Allocate the per-thread data table
1410
        self->thread_data =
1411
            (void **)self->thread->increment_stack_limit(
1412
                PTHREAD_KEYS_MAX * sizeof(void *) );
1413
 
1414
        // Clear out all entries
1415
        for( int i  = 0; i < PTHREAD_KEYS_MAX; i++ )
1416
            self->thread_data[i] = NULL;
1417
    }
1418
 
1419
    self->thread_data[key] = (void *)pointer;
1420
 
1421
    PTHREAD_RETURN(0);
1422
}
1423
 
1424
//-----------------------------------------------------------------------------
1425
// Retrieve the pointer value in the thread-specific data slot addressed
1426
// by the key.
1427
 
1428
externC void *pthread_getspecific (pthread_key_t key)
1429
{
1430
    void *val;
1431
    PTHREAD_ENTRY();
1432
 
1433
    if( thread_key[key/KEY_MAP_TYPE_SIZE] & 1<<(key%KEY_MAP_TYPE_SIZE) )
1434
        PTHREAD_RETURN(NULL);
1435
 
1436
    pthread_info *self = pthread_self_info();
1437
 
1438
    if( self->thread_data == NULL )
1439
        val = NULL;
1440
    else val = self->thread_data[key];
1441
 
1442
    PTHREAD_RETURN(val);
1443
}
1444
 
1445
//=============================================================================
1446
// Thread Cancellation Functions
1447
 
1448
//-----------------------------------------------------------------------------
1449
// Set cancel state of current thread to ENABLE or DISABLE.
1450
// Returns old state in *oldstate.
1451
 
1452
externC int pthread_setcancelstate (int state, int *oldstate)
1453
{
1454
    PTHREAD_ENTRY();
1455
 
1456
    if( state != PTHREAD_CANCEL_ENABLE &&
1457
        state != PTHREAD_CANCEL_DISABLE )
1458
        PTHREAD_RETURN(EINVAL);
1459
 
1460
    pthread_mutex.lock();
1461
 
1462
    pthread_info *self = pthread_self_info();
1463
 
1464
    if( oldstate != NULL ) *oldstate = self->cancelstate;
1465
 
1466
    self->cancelstate = state;
1467
 
1468
    pthread_mutex.unlock();
1469
 
1470
    // Note: This function may have made it possible for a pending
1471
    // cancellation to now be delivered. However the standard does not
1472
    // list this function as a cancellation point, so for now we do
1473
    // nothing. In future we might call pthread_testcancel() here.
1474
 
1475
    PTHREAD_RETURN(0);
1476
}
1477
 
1478
//-----------------------------------------------------------------------------
1479
// Set cancel type of current thread to ASYNCHRONOUS or DEFERRED.
1480
// Returns old type in *oldtype.
1481
 
1482
externC int pthread_setcanceltype (int type, int *oldtype)
1483
{
1484
    PTHREAD_ENTRY();
1485
 
1486
    if( type != PTHREAD_CANCEL_ASYNCHRONOUS &&
1487
        type != PTHREAD_CANCEL_DEFERRED )
1488
        PTHREAD_RETURN(EINVAL);
1489
 
1490
    pthread_mutex.lock();
1491
 
1492
    pthread_info *self = pthread_self_info();
1493
 
1494
    if( oldtype != NULL ) *oldtype = self->canceltype;
1495
 
1496
    self->canceltype = type;
1497
 
1498
    pthread_mutex.unlock();
1499
 
1500
    // Note: This function may have made it possible for a pending
1501
    // cancellation to now be delivered. However the standard does not
1502
    // list this function as a cancellation point, so for now we do
1503
    // nothing. In future we might call pthread_testcancel() here.
1504
 
1505
    PTHREAD_RETURN(0);
1506
}
1507
 
1508
//-----------------------------------------------------------------------------
1509
// Cancel the thread.
1510
 
1511
externC int pthread_cancel (pthread_t thread)
1512
{
1513
    PTHREAD_ENTRY();
1514
 
1515
    pthread_mutex.lock();
1516
 
1517
    pthread_info *th = pthread_info_id(thread);
1518
 
1519
    if( th == NULL )
1520
    {
1521
        pthread_mutex.unlock();
1522
        PTHREAD_RETURN(ESRCH);
1523
    }
1524
 
1525
    th->cancelpending = true;
1526
 
1527
    if ( th->cancelstate == PTHREAD_CANCEL_ENABLE )
1528
    {
1529
        if ( th->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS )
1530
        {
1531
            // If the thread has cancellation enabled, and it is in
1532
            // asynchronous mode, set the eCos thread's ASR pending to
1533
            // deal with it when the thread wakes up. We also release the
1534
            // thread out of any current wait to make it wake up.
1535
 
1536
            th->thread->set_asr_pending();
1537
            th->thread->release();
1538
        }
1539
        else if ( th->canceltype == PTHREAD_CANCEL_DEFERRED )
1540
        {
1541
            // If the thread has cancellation enabled, and it is in 
1542
            // deferred mode, wake the thread up so that cancellation
1543
            // points can test for cancellation.
1544
            th->thread->release();
1545
        }
1546
        else
1547
            CYG_FAIL("Unknown cancellation type");
1548
    }
1549
 
1550
    // Otherwise the thread has cancellation disabled, in which case
1551
    // it is up to the thread to enable cancellation
1552
 
1553
    pthread_mutex.unlock();
1554
 
1555
 
1556
    PTHREAD_RETURN(0);
1557
}
1558
 
1559
//-----------------------------------------------------------------------------
1560
// Test for a pending cancellation for the current thread and terminate
1561
// the thread if there is one.
1562
 
1563
externC void pthread_testcancel (void)
1564
{
1565
    PTHREAD_ENTRY_VOID();
1566
 
1567
    if( checkforcancel() )
1568
    {
1569
        // If we have cancellation enabled, and there is a cancellation
1570
        // pending, then go ahead and do the deed. 
1571
 
1572
        // Exit now with special retval. pthread_exit() calls the
1573
        // cancellation handlers implicitly.
1574
        pthread_exit(PTHREAD_CANCELED);
1575
    }
1576
 
1577
    PTHREAD_RETURN_VOID;
1578
}
1579
 
1580
//-----------------------------------------------------------------------------
1581
// These two functions actually implement the cleanup push and pop functionality.
1582
 
1583
externC void pthread_cleanup_push_inner (struct pthread_cleanup_buffer *buffer,
1584
                                         void (*routine) (void *),
1585
                                         void *arg)
1586
{
1587
    PTHREAD_ENTRY();
1588
 
1589
    pthread_info *self = pthread_self_info();
1590
 
1591
    buffer->routine     = routine;
1592
    buffer->arg         = arg;
1593
 
1594
    buffer->prev        = self->cancelbuffer;
1595
 
1596
    self->cancelbuffer  = buffer;
1597
 
1598
    return;
1599
}
1600
 
1601
externC void pthread_cleanup_pop_inner (struct pthread_cleanup_buffer *buffer,
1602
                                        int execute)
1603
{
1604
    PTHREAD_ENTRY();
1605
 
1606
    pthread_info *self = pthread_self_info();
1607
 
1608
    CYG_ASSERT( self->cancelbuffer == buffer, "Stacking error in cleanup buffers");
1609
 
1610
    if( self->cancelbuffer == buffer )
1611
    {
1612
        // Remove the buffer from the stack
1613
        self->cancelbuffer = buffer->prev;
1614
    }
1615
    else
1616
    {
1617
        // If the top of the stack is not the buffer we expect, do not
1618
        // execute it.
1619
        execute = 0;
1620
    }
1621
 
1622
    if( execute ) buffer->routine(buffer->arg);
1623
 
1624
    return;
1625
}
1626
 
1627
 
1628
// -------------------------------------------------------------------------
1629
// eCos-specific function to measure stack usage of the supplied thread
1630
 
1631
#ifdef CYGFUN_KERNEL_THREADS_STACK_MEASUREMENT
1632
externC size_t pthread_measure_stack_usage (pthread_t thread)
1633
{
1634
    pthread_info *th = pthread_info_id(thread);
1635
 
1636
    if ( NULL == th )
1637
      return (size_t)-1;
1638
 
1639
    return (size_t)th->thread->measure_stack_usage();
1640
}
1641
#endif
1642
 
1643
// -------------------------------------------------------------------------
1644
// EOF pthread.cxx

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.