OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [ecos-3.0/] [packages/] [compat/] [posix/] [current/] [src/] [pthread.cxx] - Blame information for rev 786

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 786 skrzyp
//==========================================================================
2
//
3
//      pthread.cxx
4
//
5
//      POSIX pthreads implementation
6
//
7
//==========================================================================
8
// ####ECOSGPLCOPYRIGHTBEGIN####                                            
9
// -------------------------------------------                              
10
// This file is part of eCos, the Embedded Configurable Operating System.   
11
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
12
//
13
// eCos is free software; you can redistribute it and/or modify it under    
14
// the terms of the GNU General Public License as published by the Free     
15
// Software Foundation; either version 2 or (at your option) any later      
16
// version.                                                                 
17
//
18
// eCos is distributed in the hope that it will be useful, but WITHOUT      
19
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or    
20
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License    
21
// for more details.                                                        
22
//
23
// You should have received a copy of the GNU General Public License        
24
// along with eCos; if not, write to the Free Software Foundation, Inc.,    
25
// 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.            
26
//
27
// As a special exception, if other files instantiate templates or use      
28
// macros or inline functions from this file, or you compile this file      
29
// and link it with other works to produce a work based on this file,       
30
// this file does not by itself cause the resulting work to be covered by   
31
// the GNU General Public License. However the source code for this file    
32
// must still be made available in accordance with section (3) of the GNU   
33
// General Public License v2.                                               
34
//
35
// This exception does not invalidate any other reasons why a work based    
36
// on this file might be covered by the GNU General Public License.         
37
// -------------------------------------------                              
38
// ####ECOSGPLCOPYRIGHTEND####                                              
39
//==========================================================================
40
//#####DESCRIPTIONBEGIN####
41
//
42
// Author(s):           nickg
43
// Contributors:        nickg, jlarmour
44
// Date:                2000-03-27
45
// Purpose:             POSIX pthread implementation
46
// Description:         This file contains the implementation of the POSIX pthread
47
//                      functions.
48
//              
49
//              
50
//
51
//####DESCRIPTIONEND####
52
//
53
//==========================================================================
54
 
55
#include <pkgconf/hal.h>
56
#include <pkgconf/kernel.h>
57
#include <pkgconf/posix.h>
58
#include <pkgconf/isoinfra.h>
59
#include <pkgconf/libc_startup.h>
60
 
61
#include <cyg/kernel/ktypes.h>         // base kernel types
62
#include <cyg/infra/cyg_trac.h>        // tracing macros
63
#include <cyg/infra/cyg_ass.h>         // assertion macros
64
 
65
#include "pprivate.h"                   // POSIX private header
66
 
67
#include <stdlib.h>                     // malloc(), free()
68
 
69
#include <cyg/kernel/sched.hxx>        // scheduler definitions
70
#include <cyg/kernel/thread.hxx>       // thread definitions
71
#include <cyg/kernel/clock.hxx>        // clock definitions
72
 
73
#include <cyg/kernel/sched.inl>        // scheduler inlines
74
 
75
//-----------------------------------------------------------------------------
76
// First check that the configuration contains the elements we need
77
 
78
#ifndef CYGPKG_KERNEL
79
#error POSIX pthread need eCos kernel
80
#endif
81
 
82
#ifndef CYGSEM_KERNEL_SCHED_MLQUEUE
83
#error POSIX pthreads need MLQ scheduler
84
#endif
85
 
86
#ifndef CYGSEM_KERNEL_SCHED_TIMESLICE
87
#error POSIX pthreads need timeslicing
88
#endif
89
 
90
#ifndef CYGVAR_KERNEL_THREADS_DATA
91
#error POSIX pthreads need per-thread data
92
#endif
93
 
94
//=============================================================================
95
// Internal data structures
96
 
97
// Mutex for controlling access to shared data structures
98
Cyg_Mutex pthread_mutex CYGBLD_POSIX_INIT;
99
 
100
// Array of pthread control structures. A pthread_t object is
101
// "just" an index into this array.
102
static pthread_info *thread_table[CYGNUM_POSIX_PTHREAD_THREADS_MAX];
103
 
104
// Count of number of threads in table.
105
static int pthread_count = 0;
106
 
107
// Count of number of threads that have exited and not been reaped.
108
static int pthreads_exited;
109
 
110
// Count of number of threads that are waiting to be joined
111
static int pthreads_tobejoined;
112
 
113
// Per-thread key allocation. This key map has a 1 bit set for each
114
// key that is free, zero if it is allocated.
115
#define KEY_MAP_TYPE cyg_uint32
116
#define KEY_MAP_TYPE_SIZE (sizeof(KEY_MAP_TYPE)*8) // in BITS!
117
static KEY_MAP_TYPE thread_key[PTHREAD_KEYS_MAX/KEY_MAP_TYPE_SIZE];
118
static void (*key_destructor[PTHREAD_KEYS_MAX]) (void *);
119
 
120
// Index of next pthread_info to allocate from thread_table array.
121
static int thread_info_next = 0;
122
 
123
// This is used to make pthread_t values unique even when reusing
124
// a table slot. This allows CYGNUM_POSIX_PTHREAD_THREADS_MAX to range
125
// up to 1024.
126
#define THREAD_ID_COOKIE_INC 0x00000400
127
#define THREAD_ID_COOKIE_MASK (THREAD_ID_COOKIE_INC-1)
128
static pthread_t thread_id_cookie = THREAD_ID_COOKIE_INC;
129
 
130
//-----------------------------------------------------------------------------
131
// Main thread.
132
 
133
#define MAIN_DEFAULT_STACK_SIZE \
134
  (CYGNUM_LIBC_MAIN_DEFAULT_STACK_SIZE < PTHREAD_STACK_MIN \
135
              ? PTHREAD_STACK_MIN : CYGNUM_LIBC_MAIN_DEFAULT_STACK_SIZE)
136
 
137
static char main_stack[MAIN_DEFAULT_STACK_SIZE];
138
 
139
// Thread ID of main thread.
140
static pthread_t main_thread;
141
 
142
//=============================================================================
143
// Exported variables
144
 
145
int pthread_canceled_dummy_var;           // pointed to by PTHREAD_CANCELED
146
 
147
//=============================================================================
148
// Internal functions
149
 
150
//-----------------------------------------------------------------------------
151
// Private version of pthread_self() that returns a pointer to our internal
152
// control structure.
153
 
154
pthread_info *pthread_self_info(void)
155
{
156
    Cyg_Thread *thread = Cyg_Thread::self();
157
 
158
    CYG_CHECK_DATA_PTR(thread, "Illegal current thread");
159
 
160
    pthread_info *info = (pthread_info *)thread->get_data(CYGNUM_KERNEL_THREADS_DATA_POSIX);
161
 
162
    // This assertion mustn't be enabled because sometimes we can legitimately
163
    // carefully call this as long as we realise the value can be NULL.
164
    // e.g. consider the use of this when inheriting sigmasks when in the
165
    // context of creating the main() thread.
166
//    CYG_CHECK_DATA_PTR(info, "Not a POSIX thread!!!");
167
 
168
    return info;
169
}
170
 
171
externC pthread_info *pthread_info_id( pthread_t id )
172
{
173
    pthread_t index = id & THREAD_ID_COOKIE_MASK;
174
 
175
    pthread_info *info = thread_table[index];
176
 
177
    // Check for a valid entry
178
    if( info == NULL )
179
        return NULL;
180
 
181
    // Check that this is a valid entry
182
    if ( info->state == PTHREAD_STATE_FREE ||
183
         info->state == PTHREAD_STATE_EXITED )
184
        return NULL;
185
 
186
    // Check that the entry matches the id
187
    if( info->id != id ) return NULL;
188
 
189
    // Return the pointer
190
    return info;
191
}
192
 
193
//-----------------------------------------------------------------------------
194
// new operator to allow us to invoke the Cyg_Thread constructor on the
195
// pthread_info.thread_obj array.
196
 
197
inline void *operator new(size_t size,  cyg_uint8 *ptr) { return (void *)ptr; };
198
 
199
//-----------------------------------------------------------------------------
200
// Optional memory allocation functions for pthread stacks.
201
// If there is an implementation of malloc() available, define pthread_malloc()
202
// and pthread_free() to use it. Otherwise define them to do nothing.
203
// In the future we may want to add configuration here to permit thread stacks
204
// to be allocated in a nominated memory pool separate from the standard malloc()
205
// pool. Hence the (currently redundant) encapsulation of these functions.
206
 
207
#if CYGINT_ISO_MALLOC
208
 
209
static __inline__ CYG_ADDRWORD pthread_malloc( CYG_ADDRWORD size )
210
{
211
    return (CYG_ADDRWORD)malloc( size );
212
}
213
 
214
static __inline__ void pthread_free( CYG_ADDRWORD m )
215
{
216
    free( (void *)m );
217
}
218
 
219
#define PTHREAD_MALLOC
220
 
221
#else
222
 
223
#define pthread_malloc(_x_) (0)
224
 
225
#define pthread_free(_x_)
226
 
227
#endif
228
 
229
//-----------------------------------------------------------------------------
230
// pthread entry function.
231
// does some housekeeping and then calls the user's start routine.
232
 
233
static void pthread_entry(CYG_ADDRWORD data)
234
{
235
    pthread_info *self = (pthread_info *)data;
236
 
237
    void *retval = self->start_routine(self->start_arg);
238
 
239
    pthread_exit( retval );
240
}
241
 
242
//-----------------------------------------------------------------------------
243
// Main entry function.
244
// This is set as the start_routine of the main thread. It invokes main()
245
// and if it returns, shuts down the system.
246
 
247
externC void cyg_libc_invoke_main( void );
248
 
249
static void *call_main( void * )
250
{
251
    cyg_libc_invoke_main();
252
    return NULL; // placate compiler
253
}
254
 
255
//-----------------------------------------------------------------------------
256
// Check whether there is a cancel pending and if so, whether
257
// cancellations are enabled. We do it in this order to reduce the
258
// number of tests in the common case - when no cancellations are
259
// pending.
260
// We make this inline so it can be called directly below for speed
261
 
262
static __inline__ int
263
checkforcancel( void )
264
{
265
     pthread_info *self = pthread_self_info();
266
 
267
    if( self != NULL &&
268
        self->cancelpending &&
269
        self->cancelstate == PTHREAD_CANCEL_ENABLE )
270
        return 1;
271
    else
272
        return 0;
273
}
274
 
275
 
276
//-----------------------------------------------------------------------------
277
// POSIX ASR
278
// This is installed as the ASR for all POSIX threads.
279
 
280
static void posix_asr( CYG_ADDRWORD data )
281
{
282
    pthread_info *self = (pthread_info *)data;
283
 
284
#ifdef CYGPKG_POSIX_TIMERS
285
    // Call into timer subsystem to deliver any pending
286
    // timer expirations.
287
    cyg_posix_timer_asr(self);
288
#endif
289
 
290
#ifdef CYGPKG_POSIX_SIGNALS
291
    // Call signal subsystem to deliver any signals
292
    cyg_posix_signal_asr(self);
293
#endif
294
 
295
    // Check for cancellation
296
    if( self->cancelpending &&
297
        self->cancelstate == PTHREAD_CANCEL_ENABLE &&
298
        self->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS )
299
    {
300
        // If we have a pending cancellation, cancellations are
301
        // enabled and we are in asynchronous mode, then we can do the
302
        // cancellation processing.  Since pthread_exit() does
303
        // everything we need to do, we just call that here.
304
 
305
        pthread_exit(PTHREAD_CANCELED);
306
    }
307
}
308
 
309
//-----------------------------------------------------------------------------
310
// The (Grim) Reaper.
311
// This function is called to tidy up and dispose of any threads that have
312
// exited. This work must be done from a thread other than the one exiting.
313
// Note: this function _must_ be called with pthread_mutex locked.
314
 
315
static void pthread_reap()
316
{
317
    int i;
318
 
319
    // Loop over the thread table looking for exited threads. The
320
    // pthreads_exited counter springs us out of this once we have
321
    // found them all (and keeps us out if there are none to do).
322
 
323
    for( i = 0; pthreads_exited && i < CYGNUM_POSIX_PTHREAD_THREADS_MAX ; i++ )
324
    {
325
        pthread_info *thread = thread_table[i];
326
 
327
        if( thread != NULL && thread->state == PTHREAD_STATE_EXITED )
328
        {
329
            // The thread has exited, so it is a candidate for being
330
            // reaped. We have to make sure that the eCos thread has
331
            // also reached EXITED state before we can tidy it up.
332
 
333
            while( thread->thread->get_state() != Cyg_Thread::EXITED )
334
            {
335
                // The eCos thread has not yet exited. This is
336
                // probably because its priority is too low to allow
337
                // it to complete.  We fix this here by raising its
338
                // priority to equal ours and then yielding. This
339
                // should eventually get it into exited state.
340
 
341
                Cyg_Thread *self = Cyg_Thread::self();
342
 
343
                // Set thread's priority to our current dispatching priority.
344
                thread->thread->set_priority( self->get_current_priority() );
345
 
346
                // Yield, yield
347
                self->yield();
348
 
349
                // and keep looping until he exits.
350
            }
351
 
352
            // At this point we have a thread that we can reap.
353
 
354
            // destroy the eCos thread
355
            thread->thread->~Cyg_Thread();
356
 
357
            // destroy the joiner condvar
358
            thread->joiner->~Cyg_Condition_Variable();
359
 
360
#ifdef CYGPKG_POSIX_SIGNALS
361
            // Destroy signal handling fields
362
            cyg_posix_thread_sigdestroy( thread );
363
#endif
364
 
365
            // Free the stack if we allocated it
366
            if( thread->freestack )
367
                pthread_free( thread->stackmem );
368
 
369
            // Finally, set the thread table entry to NULL so that it
370
            // may be reused.
371
            thread_table[i] = NULL;
372
 
373
            pthread_count--;
374
            pthreads_exited--;
375
        }
376
    }
377
}
378
 
379
//=============================================================================
380
// Functions exported to rest of POSIX subsystem.
381
 
382
//-----------------------------------------------------------------------------
383
// Create the main() thread.
384
 
385
externC void cyg_posix_pthread_start( void )
386
{
387
 
388
    // Initialize the per-thread data key map.
389
 
390
    for( cyg_ucount32 i = 0; i < (PTHREAD_KEYS_MAX/KEY_MAP_TYPE_SIZE); i++ )
391
    {
392
        thread_key[i] = ~0;
393
    }
394
 
395
    // Create the main thread
396
    pthread_attr_t attr;
397
    struct sched_param schedparam;
398
 
399
    schedparam.sched_priority = CYGNUM_POSIX_MAIN_DEFAULT_PRIORITY;
400
 
401
    pthread_attr_init( &attr );
402
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
403
    pthread_attr_setstackaddr( &attr, &main_stack[sizeof(main_stack)] );
404
    pthread_attr_setstacksize( &attr, sizeof(main_stack) );
405
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
406
    pthread_attr_setschedparam( &attr, &schedparam );
407
 
408
    pthread_create( &main_thread, &attr, call_main, NULL );
409
}
410
 
411
#ifdef CYGPKG_POSIX_SIGNALS
412
//-----------------------------------------------------------------------------
413
// Look for a thread that can accept delivery of any of the signals in
414
// the mask and release it from any wait it is in.  Since this may be
415
// called from a DSR, it cannot use any locks internally - any locking
416
// should be done before the call.
417
 
418
externC void cyg_posix_pthread_release_thread( sigset_t *mask )
419
{
420
    int i;
421
    int count = pthread_count;
422
 
423
    // Loop over the thread table looking for a thread that has a
424
    // signal mask that does not mask all the signals in mask.
425
    // FIXME: find a more efficient way of doing this.
426
 
427
    for( i = 0; count > 0 && i < CYGNUM_POSIX_PTHREAD_THREADS_MAX ; i++ )
428
    {
429
        pthread_info *thread = thread_table[i];
430
 
431
        if( (thread != NULL) &&
432
            (thread->state <= PTHREAD_STATE_RUNNING) &&
433
            ((*mask & ~thread->sigmask) != 0) )
434
        {
435
            // This thread can service at least one of the signals in
436
            // *mask. Knock it out of its wait and make its ASR pending.
437
 
438
            thread->thread->set_asr_pending();
439
            thread->thread->release();
440
            break;
441
        }
442
 
443
        // Decrement count for each valid thread we find.
444
        if( thread != NULL && thread->state != PTHREAD_STATE_FREE )
445
            count--;
446
    }
447
}
448
#endif
449
 
450
//=============================================================================
451
// General thread operations
452
 
453
//-----------------------------------------------------------------------------
454
// Thread creation and management.
455
 
456
// Create a thread.
457
externC int pthread_create ( pthread_t *thread,
458
                             const pthread_attr_t *attr,
459
                             void *(*start_routine) (void *),
460
                             void *arg)
461
{
462
    PTHREAD_ENTRY();
463
 
464
    PTHREAD_CHECK(thread);
465
    PTHREAD_CHECK(start_routine);
466
 
467
    pthread_info *self = pthread_self_info();
468
 
469
    pthread_attr_t use_attr;
470
 
471
    // Set use_attr to the set of attributes we are going to
472
    // actually use. Either those passed in, or the default set.
473
 
474
    if( attr == NULL )
475
        pthread_attr_init( &use_attr );
476
    else use_attr = *attr;
477
 
478
    // Adjust the attributes to cope with the setting of inheritsched.
479
 
480
    if( use_attr.inheritsched == PTHREAD_INHERIT_SCHED )
481
    {
482
        CYG_ASSERT( NULL != self,
483
                    "Attempt to inherit sched policy from non-POSIX thread" );
484
#ifdef CYGDBG_USE_ASSERTS
485
        // paranoia check
486
        int i;
487
        for (i=(sizeof(thread_table)/sizeof(*thread_table))-1; i>=0; i--) {
488
            if (thread_table[i] == self)
489
                break;
490
        }
491
        CYG_ASSERT( i>=0, "Current pthread not found in table" );
492
#endif
493
        use_attr.schedpolicy = self->attr.schedpolicy;
494
        use_attr.schedparam  = self->attr.schedparam;
495
    }
496
 
497
    CYG_ADDRWORD stackbase, stacksize;
498
    cyg_bool freestack = false;
499
    CYG_ADDRWORD stackmem = 0;
500
 
501
    // If the stack size is not valid, we can assume that it is at
502
    // least PTHREAD_STACK_MIN bytes.
503
 
504
    if( use_attr.stacksize_valid )
505
        stacksize = use_attr.stacksize;
506
    else stacksize = PTHREAD_STACK_MIN;
507
 
508
    if( use_attr.stackaddr_valid )
509
    {
510
        // Set up stack base and size from supplied arguments.
511
 
512
        // Calculate stack base from address and size.
513
        // FIXME: Falling stack assumed in pthread_create().
514
        stackmem = stackbase = (CYG_ADDRWORD)use_attr.stackaddr-stacksize;
515
    }
516
    else
517
    {
518
#ifdef PTHREAD_MALLOC
519
 
520
        stackmem = stackbase = pthread_malloc( stacksize );
521
 
522
        if( stackmem == 0 )
523
            PTHREAD_RETURN( EAGAIN );
524
 
525
        freestack = true;
526
#else        
527
        PTHREAD_RETURN(EINVAL);
528
#endif        
529
 
530
    }
531
 
532
    // Get sole access to data structures
533
 
534
    pthread_mutex.lock();
535
 
536
    // Dispose of any dead threads
537
    pthread_reap();
538
 
539
    // Find a free slot in the thread table
540
 
541
    pthread_info *nthread;
542
    int thread_next = thread_info_next;
543
 
544
    while( thread_table[thread_next] != NULL )
545
    {
546
        thread_next++;
547
        if( thread_next >= CYGNUM_POSIX_PTHREAD_THREADS_MAX )
548
            thread_next = 0;
549
 
550
        // check for wrap, and return error if no slots left
551
        if( thread_next == thread_info_next )
552
        {
553
            pthread_mutex.unlock();
554
            if( freestack )
555
                pthread_free( stackmem );
556
            PTHREAD_RETURN(ENOMEM);
557
        }
558
    }
559
 
560
    nthread = (pthread_info *)stackbase;
561
 
562
    stackbase += sizeof(pthread_info);
563
    stacksize -= sizeof(pthread_info);
564
 
565
    thread_table[thread_next] = nthread;
566
 
567
    // Set new next index
568
    thread_info_next = thread_next;
569
 
570
    // step the cookie
571
    thread_id_cookie += THREAD_ID_COOKIE_INC;
572
 
573
    // Initialize the table entry
574
    nthread->state              = use_attr.detachstate == PTHREAD_CREATE_JOINABLE ?
575
                                  PTHREAD_STATE_RUNNING : PTHREAD_STATE_DETACHED;
576
    nthread->id                 = thread_next+thread_id_cookie;
577
    nthread->attr               = use_attr;
578
    nthread->retval             = 0;
579
    nthread->start_routine      = start_routine;
580
    nthread->start_arg          = arg;
581
 
582
    nthread->freestack          = freestack;
583
    nthread->stackmem           = stackmem;
584
 
585
    nthread->cancelstate        = PTHREAD_CANCEL_ENABLE;
586
    nthread->canceltype         = PTHREAD_CANCEL_DEFERRED;
587
    nthread->cancelbuffer       = NULL;
588
    nthread->cancelpending      = false;
589
 
590
    nthread->thread_data        = NULL;
591
 
592
#ifdef CYGVAR_KERNEL_THREADS_NAME    
593
    // generate a name for this thread
594
 
595
    char *name = nthread->name;
596
    static char *name_template = "pthread.00000000";
597
    pthread_t id = nthread->id;
598
 
599
    for( int i = 0; name_template[i]; i++ ) name[i] = name_template[i];
600
 
601
    // dump the id, in hex into the name.
602
    for( int i = 15; i >= 8; i-- )
603
    {
604
        name[i] = "0123456789ABCDEF"[id&0xF];
605
        id >>= 4;
606
    }
607
 
608
#endif
609
 
610
    // Initialize the joiner condition variable
611
 
612
    nthread->joiner = new(nthread->joiner_obj) Cyg_Condition_Variable( pthread_mutex );
613
 
614
#ifdef CYGPKG_POSIX_SIGNALS
615
    // Initialize signal specific fields.
616
    if (NULL != self) {
617
        CYG_CHECK_DATA_PTR( self,
618
                            "Attempt to inherit signal mask from bogus pthread" );
619
#ifdef CYGDBG_USE_ASSERTS
620
        // paranoia check
621
        int i;
622
        for (i=(sizeof(thread_table)/sizeof(*thread_table))-1; i>=0; i--) {
623
            if (thread_table[i] == self)
624
                break;
625
        }
626
        CYG_ASSERT( i>=0, "Current pthread not found in table" );
627
#endif
628
    }
629
    cyg_posix_thread_siginit( nthread, self );
630
#endif
631
 
632
    // create the underlying eCos thread
633
 
634
    nthread->thread = new(&nthread->thread_obj[0])
635
        Cyg_Thread ( PTHREAD_ECOS_PRIORITY(use_attr.schedparam.sched_priority),
636
                     pthread_entry,
637
                     (CYG_ADDRWORD)nthread,
638
#ifdef CYGVAR_KERNEL_THREADS_NAME
639
                     name,
640
#else
641
                     NULL,
642
#endif
643
                     stackbase,
644
                     stacksize);
645
 
646
    // Put pointer to pthread_info into eCos thread's per-thread data.
647
    nthread->thread->set_data( CYGNUM_KERNEL_THREADS_DATA_POSIX, (CYG_ADDRWORD)nthread );
648
 
649
    // Set timeslice enable according to scheduling policy.
650
    if( use_attr.schedpolicy == SCHED_FIFO )
651
         nthread->thread->timeslice_disable();
652
    else nthread->thread->timeslice_enable();
653
 
654
    // set up ASR and data
655
    nthread->thread->set_asr( posix_asr, (CYG_ADDRWORD)nthread, NULL, NULL );
656
 
657
    // return thread ID
658
    *thread = nthread->id;
659
 
660
    pthread_count++;
661
 
662
    pthread_mutex.unlock();
663
 
664
    // finally, set the thread going
665
    nthread->thread->resume();
666
 
667
    PTHREAD_RETURN(0);
668
}
669
 
670
//-----------------------------------------------------------------------------
671
// Get current thread id.
672
 
673
externC pthread_t pthread_self ( void )
674
{
675
    PTHREAD_ENTRY();
676
 
677
    pthread_info *info = pthread_self_info();
678
 
679
    CYG_CHECK_DATA_PTR(info, "Not a POSIX thread!!!");
680
 
681
    return info->id;
682
}
683
 
684
//-----------------------------------------------------------------------------
685
// Compare two thread identifiers.
686
 
687
externC int pthread_equal (pthread_t thread1, pthread_t thread2)
688
{
689
    PTHREAD_ENTRY();
690
 
691
    return thread1 == thread2;
692
}
693
 
694
//-----------------------------------------------------------------------------
695
// Terminate current thread.
696
 
697
externC void exit(int) CYGBLD_ATTRIB_NORET;
698
 
699
externC void pthread_exit (void *retval)
700
{
701
    PTHREAD_ENTRY();
702
 
703
    pthread_info *self = pthread_self_info();
704
 
705
    // Disable cancellation requests for this thread.  If cleanup
706
    // handlers exist, they will generally be issuing system calls
707
    // to clean up resources.  We want these system calls to run
708
    // without cancelling, and we also want to prevent being
709
    // re-cancelled.
710
    pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
711
 
712
    // Call cancellation handlers. We eat up the buffers as we go in
713
    // case any of the routines calls pthread_exit() itself.
714
    while( self->cancelbuffer != NULL )
715
    {
716
        struct pthread_cleanup_buffer *buffer = self->cancelbuffer;
717
 
718
        self->cancelbuffer = buffer->prev;
719
 
720
        buffer->routine(buffer->arg);
721
    }
722
 
723
    if( self->thread_data != NULL )
724
    {
725
        // Call per-thread key destructors.
726
        // The specification of this is that we must continue to call the
727
        // destructor functions until all the per-thread data values are NULL or
728
        // we have done it PTHREAD_DESTRUCTOR_ITERATIONS times.
729
 
730
        cyg_bool destructors_called;
731
        int destructor_iterations = 0;
732
 
733
        do
734
        {
735
            destructors_called = false;
736
 
737
            for( cyg_ucount32 key = 0; key < PTHREAD_KEYS_MAX; key++ )
738
            {
739
                // Skip unallocated keys
740
                if( thread_key[key/KEY_MAP_TYPE_SIZE] & 1<<(key%KEY_MAP_TYPE_SIZE) )
741
                    continue;
742
 
743
                // Skip NULL destructors
744
                if( key_destructor[key] == NULL ) continue;
745
 
746
                // Skip NULL data values
747
                if( self->thread_data[key] == NULL ) continue;
748
 
749
                // If it passes all that, call the destructor.
750
                // Note that NULLing the data value here is new
751
                // behaviour in the 2001 POSIX standard.
752
                {
753
                    void* value = self->thread_data[key];
754
                    self->thread_data[key] = NULL;
755
                    key_destructor[key](value);
756
                }
757
 
758
                // Record that we called a destructor
759
                destructors_called = true;
760
            }
761
 
762
            // Count the iteration
763
            destructor_iterations++;
764
 
765
        } while( destructors_called &&
766
                 (destructor_iterations <= PTHREAD_DESTRUCTOR_ITERATIONS));
767
 
768
    }
769
 
770
    pthread_mutex.lock();
771
 
772
    // Set the retval for any joiner
773
    self->retval = retval;
774
 
775
    // If we are already detached, go to EXITED state, otherwise
776
    // go into JOIN state.
777
 
778
    if ( PTHREAD_STATE_DETACHED == self->state ) {
779
        self->state = PTHREAD_STATE_EXITED;
780
        pthreads_exited++;
781
    } else {
782
        self->state = PTHREAD_STATE_JOIN;
783
        pthreads_tobejoined++;
784
    }
785
 
786
    // Kick any waiting joiners
787
    self->joiner->broadcast();
788
 
789
    cyg_bool call_exit=false;
790
 
791
    // if this is the last thread (other than threads waiting to be joined)
792
    // then we need to call exit() later
793
    if ( pthreads_exited + pthreads_tobejoined == pthread_count )
794
        call_exit=true;
795
 
796
    pthread_mutex.unlock();
797
 
798
    // Finally, call the exit function; this will not return.
799
    if ( call_exit )
800
        ::exit(0);
801
    else
802
        self->thread->exit();
803
 
804
    // This loop keeps some compilers happy. pthread_exit() is marked
805
    // with the noreturn attribute, and without this they generate a
806
    // call to abort() here in case Cyg_Thread::exit() returns. 
807
 
808
    for(;;) continue;
809
}
810
 
811
//-----------------------------------------------------------------------------
812
// Wait for the thread to terminate. If thread_return is not NULL then
813
// the retval from the thread's call to pthread_exit() is stored at
814
// *thread_return.
815
 
816
externC int pthread_join (pthread_t thread, void **thread_return)
817
{
818
    int err = 0;
819
 
820
    PTHREAD_ENTRY();
821
 
822
    // check for cancellation first.
823
    pthread_testcancel();
824
 
825
    pthread_mutex.lock();
826
 
827
    // Dispose of any dead threads
828
    pthread_reap();
829
 
830
    pthread_info *self = pthread_self_info();
831
    pthread_info *joinee = pthread_info_id( thread );
832
 
833
    if( joinee == NULL )
834
    {
835
        err = ESRCH;
836
    }
837
 
838
    if( !err && joinee == self )
839
    {
840
        err = EDEADLK;
841
    }
842
 
843
    if ( !err ) {
844
        switch ( joinee->state )
845
        {
846
        case PTHREAD_STATE_RUNNING:
847
            // The thread is still running, we must wait for it.
848
        while( joinee->state == PTHREAD_STATE_RUNNING ) {
849
            if ( !joinee->joiner->wait() )
850
                // check if we were woken because we were being cancelled
851
                if ( checkforcancel() ) {
852
                    err = EAGAIN;  // value unimportant, just some error
853
                    break;
854
                }
855
        }
856
 
857
        // check that the thread is still joinable
858
        if( joinee->state == PTHREAD_STATE_JOIN )
859
            break;
860
 
861
        // The thread has become unjoinable while we waited, so we
862
        // fall through to complain.
863
 
864
        case PTHREAD_STATE_FREE:
865
        case PTHREAD_STATE_DETACHED:
866
        case PTHREAD_STATE_EXITED:
867
        // None of these may be joined.
868
            err = EINVAL;
869
            break;
870
 
871
        case PTHREAD_STATE_JOIN:
872
            break;
873
        }
874
    }
875
 
876
    if ( !err ) {
877
 
878
        // here, we know that joinee is a thread that has exited and is
879
        // ready to be joined.
880
 
881
        // Get the retval
882
        if( thread_return != NULL )
883
            *thread_return = joinee->retval;
884
 
885
        // set state to exited.
886
        joinee->state = PTHREAD_STATE_EXITED;
887
        pthreads_exited++;
888
        pthreads_tobejoined--;
889
 
890
        // Dispose of any dead threads
891
        pthread_reap();
892
    }
893
 
894
    pthread_mutex.unlock();
895
 
896
    // check for cancellation before returning
897
    pthread_testcancel();
898
 
899
    PTHREAD_RETURN(err);
900
}
901
 
902
//-----------------------------------------------------------------------------
903
// Set the detachstate of the thread to "detached". The thread then does not
904
// need to be joined and its resources will be freed when it exits.
905
 
906
externC int pthread_detach (pthread_t thread)
907
{
908
    PTHREAD_ENTRY();
909
 
910
    int ret = 0;
911
 
912
    pthread_mutex.lock();
913
 
914
    pthread_info *detachee = pthread_info_id( thread );
915
 
916
    if( detachee == NULL )
917
        ret = ESRCH;                    // No such thread
918
    else if( detachee->state == PTHREAD_STATE_DETACHED )
919
        ret = EINVAL;                   // Already detached!
920
    else
921
    {
922
        // Set state to detached and kick any joinees to
923
        // make them return.
924
        detachee->state = PTHREAD_STATE_DETACHED;
925
        detachee->joiner->broadcast();
926
    }
927
 
928
    // Dispose of any dead threads
929
    pthread_reap();
930
 
931
    pthread_mutex.unlock();
932
 
933
    PTHREAD_RETURN(ret);
934
}
935
 
936
 
937
//-----------------------------------------------------------------------------
938
// Thread attribute handling.
939
 
940
//-----------------------------------------------------------------------------
941
// Initialize attributes object with default attributes:
942
// detachstate          == PTHREAD_CREATE_JOINABLE
943
// scope                == PTHREAD_SCOPE_SYSTEM
944
// inheritsched         == PTHREAD_INHERIT_SCHED
945
// schedpolicy          == SCHED_OTHER
946
// schedparam           == unset
947
// stackaddr            == unset
948
// stacksize            == 0
949
// 
950
 
951
externC int pthread_attr_init (pthread_attr_t *attr)
952
{
953
    PTHREAD_ENTRY();
954
 
955
    PTHREAD_CHECK(attr);
956
 
957
    attr->detachstate                 = PTHREAD_CREATE_JOINABLE;
958
    attr->scope                       = PTHREAD_SCOPE_SYSTEM;
959
    attr->inheritsched                = PTHREAD_INHERIT_SCHED;
960
    attr->schedpolicy                 = SCHED_OTHER;
961
    attr->schedparam.sched_priority   = 0;
962
    attr->stackaddr_valid             = 0;
963
    attr->stackaddr                   = NULL;
964
    attr->stacksize_valid             = 0;
965
    attr->stacksize                   = 0;
966
 
967
    PTHREAD_RETURN(0);
968
}
969
 
970
//-----------------------------------------------------------------------------
971
// Destroy thread attributes object
972
 
973
externC int pthread_attr_destroy (pthread_attr_t *attr)
974
{
975
    PTHREAD_ENTRY();
976
 
977
    PTHREAD_CHECK(attr);
978
 
979
    // Nothing to do here...
980
 
981
    PTHREAD_RETURN(0);
982
}
983
 
984
//-----------------------------------------------------------------------------
985
// Set the detachstate attribute
986
 
987
externC int pthread_attr_setdetachstate (pthread_attr_t *attr,
988
                                         int detachstate)
989
{
990
    PTHREAD_ENTRY();
991
 
992
    PTHREAD_CHECK(attr);
993
 
994
    if( detachstate == PTHREAD_CREATE_JOINABLE ||
995
        detachstate == PTHREAD_CREATE_DETACHED )
996
    {
997
        attr->detachstate = detachstate;
998
        PTHREAD_RETURN(0);
999
    }
1000
 
1001
    PTHREAD_RETURN(EINVAL);
1002
}
1003
 
1004
//-----------------------------------------------------------------------------
1005
// Get the detachstate attribute
1006
externC int pthread_attr_getdetachstate (const pthread_attr_t *attr,
1007
                                         int *detachstate)
1008
{
1009
    PTHREAD_ENTRY();
1010
 
1011
    PTHREAD_CHECK(attr);
1012
 
1013
    if( detachstate != NULL )
1014
        *detachstate = attr->detachstate;
1015
 
1016
    PTHREAD_RETURN(0);
1017
}
1018
 
1019
//-----------------------------------------------------------------------------
1020
// Set scheduling contention scope
1021
 
1022
externC int pthread_attr_setscope (pthread_attr_t *attr, int scope)
1023
{
1024
    PTHREAD_ENTRY();
1025
 
1026
    PTHREAD_CHECK(attr);
1027
 
1028
    if( scope == PTHREAD_SCOPE_SYSTEM ||
1029
        scope == PTHREAD_SCOPE_PROCESS )
1030
    {
1031
        if( scope == PTHREAD_SCOPE_PROCESS )
1032
            PTHREAD_RETURN(ENOTSUP);
1033
 
1034
        attr->scope = scope;
1035
 
1036
        PTHREAD_RETURN(0);
1037
    }
1038
 
1039
    PTHREAD_RETURN(EINVAL);
1040
}
1041
 
1042
//-----------------------------------------------------------------------------
1043
// Get scheduling contention scope
1044
 
1045
externC int pthread_attr_getscope (const pthread_attr_t *attr, int *scope)
1046
{
1047
    PTHREAD_ENTRY();
1048
 
1049
    PTHREAD_CHECK(attr);
1050
 
1051
    if( scope != NULL )
1052
        *scope = attr->scope;
1053
 
1054
    PTHREAD_RETURN(0);
1055
}
1056
 
1057
//-----------------------------------------------------------------------------
1058
// Set scheduling inheritance attribute
1059
 
1060
externC int pthread_attr_setinheritsched (pthread_attr_t *attr, int inherit)
1061
{
1062
    PTHREAD_ENTRY();
1063
 
1064
    PTHREAD_CHECK(attr);
1065
 
1066
    if( inherit == PTHREAD_INHERIT_SCHED ||
1067
        inherit == PTHREAD_EXPLICIT_SCHED )
1068
    {
1069
        attr->inheritsched = inherit;
1070
 
1071
        PTHREAD_RETURN(0);
1072
    }
1073
 
1074
    PTHREAD_RETURN(EINVAL);
1075
}
1076
 
1077
//-----------------------------------------------------------------------------
1078
// Get scheduling inheritance attribute
1079
 
1080
externC int pthread_attr_getinheritsched (const pthread_attr_t *attr,
1081
                                          int *inherit)
1082
{
1083
    PTHREAD_ENTRY();
1084
 
1085
    PTHREAD_CHECK(attr);
1086
 
1087
    if( inherit != NULL )
1088
        *inherit = attr->inheritsched;
1089
 
1090
    PTHREAD_RETURN(0);
1091
}
1092
 
1093
//-----------------------------------------------------------------------------
1094
// Set scheduling policy
1095
 
1096
externC int pthread_attr_setschedpolicy (pthread_attr_t *attr, int policy)
1097
{
1098
    PTHREAD_ENTRY();
1099
 
1100
    PTHREAD_CHECK(attr);
1101
 
1102
    if( policy == SCHED_OTHER ||
1103
        policy == SCHED_FIFO ||
1104
        policy == SCHED_RR )
1105
    {
1106
        attr->schedpolicy = policy;
1107
 
1108
        PTHREAD_RETURN(0);
1109
    }
1110
 
1111
    PTHREAD_RETURN(EINVAL);
1112
}
1113
 
1114
//-----------------------------------------------------------------------------
1115
// Get scheduling policy
1116
 
1117
externC int pthread_attr_getschedpolicy (const pthread_attr_t *attr,
1118
                                         int *policy)
1119
{
1120
    PTHREAD_ENTRY();
1121
 
1122
    PTHREAD_CHECK(attr);
1123
 
1124
    if( policy != NULL )
1125
        *policy = attr->schedpolicy;
1126
 
1127
    PTHREAD_RETURN(0);
1128
}
1129
 
1130
//-----------------------------------------------------------------------------
1131
// Set scheduling parameters
1132
externC int pthread_attr_setschedparam (pthread_attr_t *attr,
1133
                                        const struct sched_param *param)
1134
{
1135
    PTHREAD_ENTRY();
1136
 
1137
    PTHREAD_CHECK(attr);
1138
    PTHREAD_CHECK(param);
1139
 
1140
    attr->schedparam = *param;
1141
 
1142
    PTHREAD_RETURN(0);
1143
}
1144
 
1145
//-----------------------------------------------------------------------------
1146
// Get scheduling parameters
1147
 
1148
externC int pthread_attr_getschedparam (const pthread_attr_t *attr,
1149
                                        struct sched_param *param)
1150
{
1151
    PTHREAD_ENTRY();
1152
 
1153
    PTHREAD_CHECK(attr);
1154
 
1155
    if( param != NULL )
1156
        *param = attr->schedparam;
1157
 
1158
    PTHREAD_RETURN(0);
1159
}
1160
 
1161
//-----------------------------------------------------------------------------
1162
// Set starting address of stack. Whether this is at the start or end of
1163
// the memory block allocated for the stack depends on whether the stack
1164
// grows up or down.
1165
 
1166
externC int pthread_attr_setstackaddr (pthread_attr_t *attr, void *stackaddr)
1167
{
1168
    PTHREAD_ENTRY();
1169
 
1170
    PTHREAD_CHECK(attr);
1171
 
1172
    attr->stackaddr       = stackaddr;
1173
    attr->stackaddr_valid = 1;
1174
 
1175
    PTHREAD_RETURN(0);
1176
}
1177
 
1178
//-----------------------------------------------------------------------------
1179
// Get any previously set stack address.
1180
 
1181
externC int pthread_attr_getstackaddr (const pthread_attr_t *attr,
1182
                                       void **stackaddr)
1183
{
1184
    PTHREAD_ENTRY();
1185
 
1186
    PTHREAD_CHECK(attr);
1187
 
1188
    if( stackaddr != NULL )
1189
    {
1190
        if( attr->stackaddr_valid )
1191
        {
1192
            *stackaddr = attr->stackaddr;
1193
            PTHREAD_RETURN(0);
1194
        }
1195
        // Stack address not set, return EINVAL.
1196
        else PTHREAD_RETURN(EINVAL);
1197
    }
1198
 
1199
    PTHREAD_RETURN(0);
1200
}
1201
 
1202
 
1203
//-----------------------------------------------------------------------------
1204
// Set minimum creation stack size.
1205
 
1206
externC int pthread_attr_setstacksize (pthread_attr_t *attr,
1207
                                       size_t stacksize)
1208
{
1209
    PTHREAD_ENTRY();
1210
 
1211
    PTHREAD_CHECK(attr);
1212
 
1213
    CYG_ASSERT( stacksize >= PTHREAD_STACK_MIN, "Inadequate stack size supplied");
1214
 
1215
    // Reject inadequate stack sizes
1216
    if( stacksize < PTHREAD_STACK_MIN )
1217
        PTHREAD_RETURN(EINVAL);
1218
 
1219
    attr->stacksize_valid = 1;
1220
    attr->stacksize = stacksize;
1221
 
1222
    PTHREAD_RETURN(0);
1223
}
1224
 
1225
//-----------------------------------------------------------------------------
1226
// Get current minimal stack size.
1227
 
1228
externC int pthread_attr_getstacksize (const pthread_attr_t *attr,
1229
                                       size_t *stacksize)
1230
{
1231
    PTHREAD_ENTRY();
1232
 
1233
    PTHREAD_CHECK(attr);
1234
 
1235
    // Reject attempts to get a stack size when one has not been set.
1236
    if( !attr->stacksize_valid )
1237
        PTHREAD_RETURN(EINVAL);
1238
 
1239
    if( stacksize != NULL )
1240
        *stacksize = attr->stacksize;
1241
 
1242
    PTHREAD_RETURN(0);
1243
}
1244
 
1245
//-----------------------------------------------------------------------------
1246
// Thread scheduling controls
1247
 
1248
//-----------------------------------------------------------------------------
1249
// Set scheduling policy and parameters for the thread
1250
 
1251
externC int pthread_setschedparam (pthread_t thread_id,
1252
                                   int policy,
1253
                                   const struct sched_param *param)
1254
{
1255
    PTHREAD_ENTRY();
1256
 
1257
    if( policy != SCHED_OTHER &&
1258
        policy != SCHED_FIFO &&
1259
        policy != SCHED_RR )
1260
        PTHREAD_RETURN(EINVAL);
1261
 
1262
    PTHREAD_CHECK(param);
1263
 
1264
    // The parameters seem OK, change the thread...
1265
 
1266
    pthread_mutex.lock();
1267
 
1268
    pthread_info *thread = pthread_info_id( thread_id );
1269
 
1270
    if( thread == NULL )
1271
    {
1272
        pthread_mutex.unlock();
1273
        PTHREAD_RETURN(ESRCH);
1274
    }
1275
 
1276
    thread->attr.schedpolicy = policy;
1277
    thread->attr.schedparam = *param;
1278
 
1279
    if ( policy == SCHED_FIFO )
1280
         thread->thread->timeslice_disable();
1281
    else thread->thread->timeslice_enable();
1282
 
1283
    thread->thread->set_priority( PTHREAD_ECOS_PRIORITY( param->sched_priority ));
1284
 
1285
    pthread_mutex.unlock();
1286
 
1287
    PTHREAD_RETURN(0);
1288
}
1289
 
1290
//-----------------------------------------------------------------------------
1291
// Get scheduling policy and parameters for the thread
1292
 
1293
externC int pthread_getschedparam (pthread_t thread_id,
1294
                                   int *policy,
1295
                                   struct sched_param *param)
1296
{
1297
    PTHREAD_ENTRY();
1298
 
1299
    pthread_mutex.lock();
1300
 
1301
    pthread_info *thread = pthread_info_id( thread_id );
1302
 
1303
    if( thread == NULL )
1304
    {
1305
        pthread_mutex.unlock();
1306
        PTHREAD_RETURN(ESRCH);
1307
    }
1308
 
1309
    if( policy != NULL )
1310
        *policy = thread->attr.schedpolicy;
1311
 
1312
    if( param != NULL )
1313
        *param = thread->attr.schedparam;
1314
 
1315
    pthread_mutex.unlock();
1316
 
1317
    PTHREAD_RETURN(0);
1318
}
1319
 
1320
 
1321
//=============================================================================
1322
// Dynamic package initialization
1323
// Call init_routine just the once per control variable.
1324
 
1325
externC int pthread_once (pthread_once_t *once_control,
1326
                          void (*init_routine) (void))
1327
{
1328
    PTHREAD_ENTRY();
1329
 
1330
    PTHREAD_CHECK( once_control );
1331
    PTHREAD_CHECK( init_routine );
1332
 
1333
    pthread_once_t old;
1334
 
1335
    // Do a test and set on the once_control object.
1336
    pthread_mutex.lock();
1337
 
1338
    old = *once_control;
1339
    *once_control = 1;
1340
 
1341
    pthread_mutex.unlock();
1342
 
1343
    // If the once_control was zero, call the init_routine().
1344
    if( !old ) init_routine();
1345
 
1346
    PTHREAD_RETURN(0);
1347
}
1348
 
1349
 
1350
//=============================================================================
1351
//Thread specific data
1352
 
1353
//-----------------------------------------------------------------------------
1354
// Create a key to identify a location in the thread specific data area.
1355
// Each thread has its own distinct thread-specific data area but all are
1356
// addressed by the same keys. The destructor function is called whenever a
1357
// thread exits and the value associated with the key is non-NULL.
1358
 
1359
externC int pthread_key_create (pthread_key_t *key,
1360
                                void (*destructor) (void *))
1361
{
1362
    PTHREAD_ENTRY();
1363
 
1364
    pthread_key_t k = -1;
1365
 
1366
    pthread_mutex.lock();
1367
 
1368
    // Find a key to allocate
1369
    for( cyg_ucount32 i = 0; i < (PTHREAD_KEYS_MAX/KEY_MAP_TYPE_SIZE); i++ )
1370
    {
1371
        if( thread_key[i] != 0 )
1372
        {
1373
            // We have a table slot with space available
1374
 
1375
            // Get index of ls set bit.
1376
            HAL_LSBIT_INDEX( k, thread_key[i] );
1377
 
1378
            // clear it
1379
            thread_key[i] &= ~(1<<k);
1380
 
1381
            // Add index of word
1382
            k += i * KEY_MAP_TYPE_SIZE;
1383
 
1384
            // Install destructor
1385
            key_destructor[k] = destructor;
1386
 
1387
            // break out with key found
1388
            break;
1389
        }
1390
    }
1391
 
1392
    if( k != -1 )
1393
    {
1394
        // plant a NULL in all the valid thread data slots for this
1395
        // key in case we are reusing a key we used before.
1396
 
1397
        for( cyg_ucount32 i = 0; i < CYGNUM_POSIX_PTHREAD_THREADS_MAX ; i++ )
1398
        {
1399
            pthread_info *thread = thread_table[i];
1400
 
1401
            if( thread != NULL && thread->thread_data != NULL )
1402
                thread->thread_data[k] = NULL;
1403
        }
1404
    }
1405
 
1406
    pthread_mutex.unlock();
1407
 
1408
    if( k == -1 ) PTHREAD_RETURN(EAGAIN);
1409
 
1410
    *key = k;
1411
 
1412
    PTHREAD_RETURN(0);
1413
}
1414
 
1415
//-----------------------------------------------------------------------------
1416
// Delete key.
1417
 
1418
externC int pthread_key_delete (pthread_key_t key)
1419
{
1420
    PTHREAD_ENTRY();
1421
 
1422
    pthread_mutex.lock();
1423
 
1424
    // Set the key bit to 1 to indicate it is free.
1425
    thread_key[key/KEY_MAP_TYPE_SIZE] |= 1<<(key%(KEY_MAP_TYPE_SIZE));
1426
 
1427
    pthread_mutex.unlock();
1428
 
1429
    PTHREAD_RETURN(0);
1430
}
1431
 
1432
//-----------------------------------------------------------------------------
1433
// Store the pointer value in the thread-specific data slot addressed
1434
// by the key.
1435
 
1436
externC int pthread_setspecific (pthread_key_t key, const void *pointer)
1437
{
1438
    PTHREAD_ENTRY();
1439
 
1440
    if( thread_key[key/KEY_MAP_TYPE_SIZE] & 1<<(key%KEY_MAP_TYPE_SIZE) )
1441
        PTHREAD_RETURN(EINVAL);
1442
 
1443
    pthread_info *self = pthread_self_info();
1444
 
1445
    if( self->thread_data == NULL )
1446
    {
1447
        // Allocate the per-thread data table
1448
        self->thread_data =
1449
            (void **)self->thread->increment_stack_limit(
1450
                PTHREAD_KEYS_MAX * sizeof(void *) );
1451
 
1452
        // Clear out all entries
1453
        for( int i  = 0; i < PTHREAD_KEYS_MAX; i++ )
1454
            self->thread_data[i] = NULL;
1455
    }
1456
 
1457
    self->thread_data[key] = (void *)pointer;
1458
 
1459
    PTHREAD_RETURN(0);
1460
}
1461
 
1462
//-----------------------------------------------------------------------------
1463
// Retrieve the pointer value in the thread-specific data slot addressed
1464
// by the key.
1465
 
1466
externC void *pthread_getspecific (pthread_key_t key)
1467
{
1468
    void *val;
1469
    PTHREAD_ENTRY();
1470
 
1471
    if( thread_key[key/KEY_MAP_TYPE_SIZE] & 1<<(key%KEY_MAP_TYPE_SIZE) )
1472
        PTHREAD_RETURN(NULL);
1473
 
1474
    pthread_info *self = pthread_self_info();
1475
 
1476
    if( self->thread_data == NULL )
1477
        val = NULL;
1478
    else val = self->thread_data[key];
1479
 
1480
    PTHREAD_RETURN(val);
1481
}
1482
 
1483
//=============================================================================
1484
// Thread Cancellation Functions
1485
 
1486
//-----------------------------------------------------------------------------
1487
// Set cancel state of current thread to ENABLE or DISABLE.
1488
// Returns old state in *oldstate.
1489
 
1490
externC int pthread_setcancelstate (int state, int *oldstate)
1491
{
1492
    PTHREAD_ENTRY();
1493
 
1494
    if( state != PTHREAD_CANCEL_ENABLE &&
1495
        state != PTHREAD_CANCEL_DISABLE )
1496
        PTHREAD_RETURN(EINVAL);
1497
 
1498
    pthread_mutex.lock();
1499
 
1500
    pthread_info *self = pthread_self_info();
1501
 
1502
    if( oldstate != NULL ) *oldstate = self->cancelstate;
1503
 
1504
    self->cancelstate = state;
1505
 
1506
    pthread_mutex.unlock();
1507
 
1508
    // Note: This function may have made it possible for a pending
1509
    // cancellation to now be delivered. However the standard does not
1510
    // list this function as a cancellation point, so for now we do
1511
    // nothing. In future we might call pthread_testcancel() here.
1512
 
1513
    PTHREAD_RETURN(0);
1514
}
1515
 
1516
//-----------------------------------------------------------------------------
1517
// Set cancel type of current thread to ASYNCHRONOUS or DEFERRED.
1518
// Returns old type in *oldtype.
1519
 
1520
externC int pthread_setcanceltype (int type, int *oldtype)
1521
{
1522
    PTHREAD_ENTRY();
1523
 
1524
    if( type != PTHREAD_CANCEL_ASYNCHRONOUS &&
1525
        type != PTHREAD_CANCEL_DEFERRED )
1526
        PTHREAD_RETURN(EINVAL);
1527
 
1528
    pthread_mutex.lock();
1529
 
1530
    pthread_info *self = pthread_self_info();
1531
 
1532
    if( oldtype != NULL ) *oldtype = self->canceltype;
1533
 
1534
    self->canceltype = type;
1535
 
1536
    pthread_mutex.unlock();
1537
 
1538
    // Note: This function may have made it possible for a pending
1539
    // cancellation to now be delivered. However the standard does not
1540
    // list this function as a cancellation point, so for now we do
1541
    // nothing. In future we might call pthread_testcancel() here.
1542
 
1543
    PTHREAD_RETURN(0);
1544
}
1545
 
1546
//-----------------------------------------------------------------------------
1547
// Cancel the thread.
1548
 
1549
externC int pthread_cancel (pthread_t thread)
1550
{
1551
    PTHREAD_ENTRY();
1552
 
1553
    pthread_mutex.lock();
1554
 
1555
    pthread_info *th = pthread_info_id(thread);
1556
 
1557
    if( th == NULL )
1558
    {
1559
        pthread_mutex.unlock();
1560
        PTHREAD_RETURN(ESRCH);
1561
    }
1562
 
1563
    th->cancelpending = true;
1564
 
1565
    if ( th->cancelstate == PTHREAD_CANCEL_ENABLE )
1566
    {
1567
        if ( th->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS )
1568
        {
1569
            // If the thread has cancellation enabled, and it is in
1570
            // asynchronous mode, set the eCos thread's ASR pending to
1571
            // deal with it when the thread wakes up. We also release the
1572
            // thread out of any current wait to make it wake up.
1573
 
1574
            th->thread->set_asr_pending();
1575
            th->thread->release();
1576
        }
1577
        else if ( th->canceltype == PTHREAD_CANCEL_DEFERRED )
1578
        {
1579
            // If the thread has cancellation enabled, and it is in 
1580
            // deferred mode, wake the thread up so that cancellation
1581
            // points can test for cancellation.
1582
            th->thread->release();
1583
        }
1584
        else
1585
            CYG_FAIL("Unknown cancellation type");
1586
    }
1587
 
1588
    // Otherwise the thread has cancellation disabled, in which case
1589
    // it is up to the thread to enable cancellation
1590
 
1591
    pthread_mutex.unlock();
1592
 
1593
 
1594
    PTHREAD_RETURN(0);
1595
}
1596
 
1597
//-----------------------------------------------------------------------------
1598
// Test for a pending cancellation for the current thread and terminate
1599
// the thread if there is one.
1600
 
1601
externC void pthread_testcancel (void)
1602
{
1603
    PTHREAD_ENTRY_VOID();
1604
 
1605
    if( checkforcancel() )
1606
    {
1607
        // If we have cancellation enabled, and there is a cancellation
1608
        // pending, then go ahead and do the deed. 
1609
 
1610
        // Exit now with special retval. pthread_exit() calls the
1611
        // cancellation handlers implicitly.
1612
        pthread_exit(PTHREAD_CANCELED);
1613
    }
1614
 
1615
    PTHREAD_RETURN_VOID;
1616
}
1617
 
1618
//-----------------------------------------------------------------------------
1619
// These two functions actually implement the cleanup push and pop functionality.
1620
 
1621
externC void pthread_cleanup_push_inner (struct pthread_cleanup_buffer *buffer,
1622
                                         void (*routine) (void *),
1623
                                         void *arg)
1624
{
1625
    PTHREAD_ENTRY();
1626
 
1627
    pthread_info *self = pthread_self_info();
1628
 
1629
    buffer->routine     = routine;
1630
    buffer->arg         = arg;
1631
 
1632
    buffer->prev        = self->cancelbuffer;
1633
 
1634
    self->cancelbuffer  = buffer;
1635
 
1636
    return;
1637
}
1638
 
1639
externC void pthread_cleanup_pop_inner (struct pthread_cleanup_buffer *buffer,
1640
                                        int execute)
1641
{
1642
    PTHREAD_ENTRY();
1643
 
1644
    pthread_info *self = pthread_self_info();
1645
 
1646
    CYG_ASSERT( self->cancelbuffer == buffer, "Stacking error in cleanup buffers");
1647
 
1648
    if( self->cancelbuffer == buffer )
1649
    {
1650
        // Remove the buffer from the stack
1651
        self->cancelbuffer = buffer->prev;
1652
    }
1653
    else
1654
    {
1655
        // If the top of the stack is not the buffer we expect, do not
1656
        // execute it.
1657
        execute = 0;
1658
    }
1659
 
1660
    if( execute ) buffer->routine(buffer->arg);
1661
 
1662
    return;
1663
}
1664
 
1665
 
1666
// -------------------------------------------------------------------------
1667
// eCos-specific function to measure stack usage of the supplied thread
1668
 
1669
#ifdef CYGFUN_KERNEL_THREADS_STACK_MEASUREMENT
1670
externC size_t pthread_measure_stack_usage (pthread_t thread)
1671
{
1672
    pthread_info *th = pthread_info_id(thread);
1673
 
1674
    if ( NULL == th )
1675
      return (size_t)-1;
1676
 
1677
    return (size_t)th->thread->measure_stack_usage();
1678
}
1679
#endif
1680
 
1681
// -------------------------------------------------------------------------
1682
// EOF pthread.cxx

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.