OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [rtos/] [ecos-2.0/] [packages/] [kernel/] [v2_0/] [src/] [sched/] [mlqueue.cxx] - Blame information for rev 174

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 27 unneback
//==========================================================================
2
//
3
//      sched/mlqueue.cxx
4
//
5
//      Multi-level queue scheduler class implementation
6
//
7
//==========================================================================
8
//####ECOSGPLCOPYRIGHTBEGIN####
9
// -------------------------------------------
10
// This file is part of eCos, the Embedded Configurable Operating System.
11
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
12
//
13
// eCos is free software; you can redistribute it and/or modify it under
14
// the terms of the GNU General Public License as published by the Free
15
// Software Foundation; either version 2 or (at your option) any later version.
16
//
17
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY
18
// WARRANTY; without even the implied warranty of MERCHANTABILITY or
19
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20
// for more details.
21
//
22
// You should have received a copy of the GNU General Public License along
23
// with eCos; if not, write to the Free Software Foundation, Inc.,
24
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25
//
26
// As a special exception, if other files instantiate templates or use macros
27
// or inline functions from this file, or you compile this file and link it
28
// with other works to produce a work based on this file, this file does not
29
// by itself cause the resulting work to be covered by the GNU General Public
30
// License. However the source code for this file must still be made available
31
// in accordance with section (3) of the GNU General Public License.
32
//
33
// This exception does not invalidate any other reasons why a work based on
34
// this file might be covered by the GNU General Public License.
35
//
36
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
37
// at http://sources.redhat.com/ecos/ecos-license/
38
// -------------------------------------------
39
//####ECOSGPLCOPYRIGHTEND####
40
//==========================================================================
41
//#####DESCRIPTIONBEGIN####
42
//
43
// Author(s):    nickg
44
// Contributors: jlarmour
45
// Date:         1999-02-17
46
// Purpose:      Multilevel queue scheduler class implementation
47
// Description:  This file contains the implementations of
48
//               Cyg_Scheduler_Implementation and
49
//               Cyg_SchedThread_Implementation.
50
//              
51
//
52
//####DESCRIPTIONEND####
53
//
54
//==========================================================================
55
 
56
#include <pkgconf/kernel.h>
57
 
58
#include <cyg/kernel/ktypes.h>         // base kernel types
59
#include <cyg/infra/cyg_trac.h>        // tracing macros
60
#include <cyg/infra/cyg_ass.h>         // assertion macros
61
 
62
#include <cyg/kernel/sched.hxx>        // our header
63
 
64
#include <cyg/hal/hal_arch.h>          // Architecture specific definitions
65
 
66
#include <cyg/kernel/thread.inl>       // thread inlines
67
#include <cyg/kernel/sched.inl>        // scheduler inlines
68
 
69
#ifdef CYGSEM_KERNEL_SCHED_MLQUEUE
70
 
71
//==========================================================================
72
// Cyg_Scheduler_Implementation class static members
73
 
74
#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
75
 
76
cyg_ucount32 Cyg_Scheduler_Implementation::timeslice_count[CYGNUM_KERNEL_CPU_MAX];
77
 
78
#endif
79
 
80
 
81
//==========================================================================
82
// Cyg_Scheduler_Implementation class members
83
 
84
// -------------------------------------------------------------------------
85
// Constructor.
86
 
87
Cyg_Scheduler_Implementation::Cyg_Scheduler_Implementation()
88
{
89
    CYG_REPORT_FUNCTION();
90
 
91
    queue_map   = 0;
92
 
93
#ifdef CYGPKG_KERNEL_SMP_SUPPORT
94
 
95
    pending_map = 0;
96
 
97
    for( int i = 0; i < CYGNUM_KERNEL_SCHED_PRIORITIES; i++ )
98
        pending[i] = 0;
99
 
100
#endif
101
 
102
    for( int i = 0; i < CYGNUM_KERNEL_CPU_MAX; i++ )
103
    {
104
#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE        
105
        timeslice_count[i] = CYGNUM_KERNEL_SCHED_TIMESLICE_TICKS;
106
#endif        
107
        need_reschedule[i] = true;
108
    }
109
 
110
    CYG_REPORT_RETURN();
111
}
112
 
113
// -------------------------------------------------------------------------
114
// Choose the best thread to run next
115
 
116
Cyg_Thread *
117
Cyg_Scheduler_Implementation::schedule(void)
118
{
119
    CYG_REPORT_FUNCTYPE("returning thread %08x");
120
 
121
    // The run queue may _never_ be empty, there is always
122
    // an idle thread at the lowest priority.
123
 
124
    CYG_ASSERT( queue_map != 0, "Run queue empty");
125
    CYG_ASSERT( queue_map & (1<<CYG_THREAD_MIN_PRIORITY), "Idle thread vanished!!!");
126
    CYG_ASSERT( !run_queue[CYG_THREAD_MIN_PRIORITY].empty(), "Idle thread vanished!!!");
127
 
128
#ifdef CYGPKG_KERNEL_SMP_SUPPORT
129
 
130
    Cyg_Thread *current = get_current_thread();
131
    register cyg_uint32 index;
132
 
133
    CYG_ASSERT( current->cpu != CYG_KERNEL_CPU_NONE, "Current thread does not have CPU set!");
134
 
135
    // If the current thread is still runnable, return it to pending
136
    // state so that it can be considered alongside any other threads
137
    // for execution.
138
    if( current->get_state() == Cyg_Thread::RUNNING )
139
    {
140
        current->cpu = CYG_KERNEL_CPU_NONE;
141
        pending[current->priority]++;
142
        pending_map |= (1<<current->priority);
143
    }
144
    else
145
    {
146
        // Otherwise, ensure that the thread is no longer marked as
147
        // running.
148
        current->cpu = CYG_KERNEL_CPU_NONE;
149
    }
150
 
151
 
152
    HAL_LSBIT_INDEX(index, pending_map);
153
 
154
    Cyg_RunQueue *queue = &run_queue[index];
155
 
156
    CYG_ASSERT( !queue->empty(), "Queue for index empty");
157
    CYG_ASSERT( pending[index] > 0, "Pending array and map disagree");
158
 
159
    Cyg_Thread *thread = queue->get_head();
160
 
161
    // We know there is a runnable thread in this queue, If the thread
162
    // we got is not it, scan until we find it. While not constant time,
163
    // this search has an upper bound of the number of CPUs in the system.
164
 
165
    while( thread->cpu != CYG_KERNEL_CPU_NONE )
166
        thread = thread->get_next();
167
 
168
    // Take newly scheduled thread out of pending map
169
    thread->cpu = CYG_KERNEL_CPU_THIS();
170
    if( --pending[index] == 0 )
171
        pending_map &= ~(1<<index);
172
 
173
#else    
174
 
175
    register cyg_uint32 index;
176
 
177
    HAL_LSBIT_INDEX(index, queue_map);
178
 
179
    Cyg_RunQueue *queue = &run_queue[index];
180
 
181
    CYG_ASSERT( !queue->empty(), "Queue for index empty");
182
 
183
    Cyg_Thread *thread = queue->get_head();
184
 
185
#endif
186
 
187
    CYG_INSTRUMENT_MLQ( SCHEDULE, thread, index);
188
 
189
    CYG_ASSERT( thread != NULL , "No threads in run queue");
190
    CYG_ASSERT( thread->queue == NULL , "Runnable thread on a queue!");
191
 
192
    CYG_REPORT_RETVAL(thread);
193
 
194
    return thread;
195
}
196
 
197
// -------------------------------------------------------------------------
198
 
199
void
200
Cyg_Scheduler_Implementation::add_thread(Cyg_Thread *thread)
201
{
202
    CYG_REPORT_FUNCTION();
203
    CYG_REPORT_FUNCARG1("thread=%08x", thread);
204
 
205
    cyg_priority pri                               = thread->priority;
206
    Cyg_RunQueue *queue = &run_queue[pri];
207
 
208
    CYG_INSTRUMENT_MLQ( ADD, thread, pri);
209
 
210
    CYG_ASSERT((CYG_THREAD_MIN_PRIORITY >= pri)
211
               && (CYG_THREAD_MAX_PRIORITY <= pri),
212
               "Priority out of range!");
213
 
214
    CYG_ASSERT( ((queue_map & (1<<pri))!=0) == ((!run_queue[pri].empty())!=0), "Map and queue disagree");
215
 
216
    // If the thread is on some other queue, remove it
217
    // here.
218
    if( thread->queue != NULL )
219
    {
220
        thread->queue->remove(thread);
221
    }
222
 
223
    if( queue->empty() )
224
    {
225
        // set the map bit and ask for a reschedule if this is a
226
        // new highest priority thread.
227
 
228
        queue_map |= (1<<pri);
229
 
230
    }
231
    // else the queue already has an occupant, queue behind him
232
 
233
    queue->add_tail(thread);
234
 
235
    // If the new thread is higher priority than any
236
    // current thread, request a reschedule.
237
 
238
    set_need_reschedule(thread);
239
 
240
#ifdef CYGPKG_KERNEL_SMP_SUPPORT
241
 
242
    // If the thread is not currently running, increment the pending
243
    // count for the priority, and if necessary set the bit in the
244
    // pending map.
245
 
246
    if( thread->cpu == CYG_KERNEL_CPU_NONE )
247
    {
248
        if( pending[pri]++ == 0 )
249
            pending_map |= (1<<pri);
250
    }
251
    // Otherwise the pending count will be dealt with in schedule().
252
 
253
#endif    
254
 
255
    CYG_ASSERT( thread->queue == NULL , "Runnable thread on a queue!");
256
    CYG_ASSERT( queue_map != 0, "Run queue empty");
257
    CYG_ASSERT( queue_map & (1<<pri), "Queue map bit not set for pri");
258
    CYG_ASSERT( !run_queue[pri].empty(), "Queue for pri empty");
259
    CYG_ASSERT( ((queue_map & (1<<pri))!=0) == ((!run_queue[pri].empty())!=0), "Map and queue disagree");
260
    CYG_ASSERT( queue_map & (1<<CYG_THREAD_MIN_PRIORITY), "Idle thread vanished!!!");
261
    CYG_ASSERT( !run_queue[CYG_THREAD_MIN_PRIORITY].empty(), "Idle thread vanished!!!");
262
 
263
    CYG_REPORT_RETURN();
264
}
265
 
266
// -------------------------------------------------------------------------
267
 
268
void
269
Cyg_Scheduler_Implementation::rem_thread(Cyg_Thread *thread)
270
{
271
    CYG_REPORT_FUNCTION();
272
    CYG_REPORT_FUNCARG1("thread=%08x", thread);
273
 
274
    CYG_ASSERT( queue_map != 0, "Run queue empty");
275
 
276
    cyg_priority pri    = thread->priority;
277
    Cyg_RunQueue *queue = &run_queue[pri];
278
 
279
    CYG_INSTRUMENT_MLQ( REM, thread, pri);
280
 
281
    CYG_ASSERT( pri != CYG_THREAD_MIN_PRIORITY, "Idle thread trying to sleep!");
282
    CYG_ASSERT( !run_queue[CYG_THREAD_MIN_PRIORITY].empty(), "Idle thread vanished!!!");
283
 
284
#ifdef CYGPKG_KERNEL_SMP_SUPPORT
285
 
286
    if( thread->cpu == CYG_KERNEL_CPU_NONE )
287
    {
288
        // If the thread is not running, then we need to adjust the
289
        // pending count array and map if necessary.
290
 
291
        if( --pending[pri] == 0 )
292
            pending_map &= ~(1<<pri);
293
    }
294
    else
295
    {
296
        // If the target thread is currently running on a different
297
        // CPU, send a reschedule interrupt there to deschedule it.        
298
        if( thread->cpu != CYG_KERNEL_CPU_THIS() )
299
            CYG_KERNEL_CPU_RESCHEDULE_INTERRUPT( thread->cpu, 0 );
300
    }
301
    // If the thread is current running on this CPU, then the pending
302
    // count will be dealt with in schedule().
303
 
304
#endif
305
 
306
    CYG_ASSERT( queue_map & (1<<pri), "Queue map bit not set for pri");
307
    CYG_ASSERT( !run_queue[pri].empty(), "Queue for pri empty");
308
 
309
    // remove thread from queue
310
    queue->remove(thread);
311
 
312
    if( queue->empty() )
313
    {
314
        // If this was only thread in
315
        // queue, clear map.
316
 
317
        queue_map &= ~(1<<pri);
318
    }
319
 
320
    CYG_ASSERT( queue_map != 0, "Run queue empty");
321
    CYG_ASSERT( queue_map & (1<<CYG_THREAD_MIN_PRIORITY), "Idle thread vanished!!!");
322
    CYG_ASSERT( !run_queue[CYG_THREAD_MIN_PRIORITY].empty(), "Idle thread vanished!!!");
323
    CYG_ASSERT( ((queue_map & (1<<pri))!=0) == ((!run_queue[pri].empty())!=0), "Map and queue disagree");
324
 
325
    CYG_REPORT_RETURN();
326
}
327
 
328
// -------------------------------------------------------------------------
329
// Set the need_reschedule flag
330
// This function overrides the definition in Cyg_Scheduler_Base and tests
331
// for a reschedule condition based on the priorities of the given thread
332
// and the current thread(s).
333
 
334
void Cyg_Scheduler_Implementation::set_need_reschedule(Cyg_Thread *thread)
335
{
336
#ifndef CYGPKG_KERNEL_SMP_SUPPORT
337
 
338
    if( current_thread[0]->priority > thread->priority ||
339
        current_thread[0]->get_state() != Cyg_Thread::RUNNING )
340
        need_reschedule[0] = true;
341
 
342
#else
343
 
344
    HAL_SMP_CPU_TYPE cpu_this = CYG_KERNEL_CPU_THIS();
345
    HAL_SMP_CPU_TYPE cpu_count = CYG_KERNEL_CPU_COUNT();
346
 
347
    // Start with current CPU. If we can do the job locally then
348
    // that is most efficient. Only go on to other CPUs if that is
349
    // not possible.
350
 
351
    for(int i = 0; i < cpu_count; i++)
352
    {
353
        HAL_SMP_CPU_TYPE cpu = (i + cpu_this) % cpu_count;
354
 
355
        // If a CPU is not already marked for rescheduling, and its
356
        // current thread is of lower priority than _thread_, then
357
        // set its need_reschedule flag.
358
 
359
        Cyg_Thread *cur = current_thread[cpu];
360
 
361
        if( (!need_reschedule[cpu]) &&
362
            (cur->priority > thread->priority)
363
          )
364
        {
365
            need_reschedule[cpu] = true;
366
 
367
            if( cpu != cpu_this )
368
            {
369
                // All processors other than this one need to be sent
370
                // a reschedule interrupt.
371
 
372
                CYG_INSTRUMENT_SMP( RESCHED_SEND, cpu, 0 );
373
                CYG_KERNEL_CPU_RESCHEDULE_INTERRUPT( cpu, 0 );
374
            }
375
 
376
            // Having notionally rescheduled _thread_ onto the cpu, we
377
            // now see if we can reschedule the former current thread of
378
            // that CPU onto another.
379
 
380
            thread = cur;
381
        }
382
    }
383
 
384
#endif  
385
}
386
 
387
// -------------------------------------------------------------------------
388
// Set up initial idle thread
389
 
390
void Cyg_Scheduler_Implementation::set_idle_thread( Cyg_Thread *thread, HAL_SMP_CPU_TYPE cpu )
391
{
392
    // Make the thread the current thread for this CPU.
393
 
394
    current_thread[cpu] = thread;
395
 
396
    // This will insert the thread in the run queues and make it
397
    // available to execute.
398
    thread->resume();
399
 
400
#ifdef CYGPKG_KERNEL_SMP_SUPPORT
401
 
402
    thread->cpu = cpu;
403
 
404
    // In SMP, we need to take this thread out of the pending array
405
    // and map.
406
 
407
    cyg_priority pri    = thread->priority;
408
    if( --pending[pri] == 0 )
409
        pending_map &= ~(1<<pri);
410
#endif    
411
 
412
}
413
 
414
// -------------------------------------------------------------------------
415
// register thread with scheduler
416
 
417
void
418
Cyg_Scheduler_Implementation::register_thread(Cyg_Thread *thread)
419
{
420
    CYG_REPORT_FUNCTION();
421
    CYG_REPORT_FUNCARG1("thread=%08x", thread);
422
    // No registration necessary in this scheduler
423
    CYG_REPORT_RETURN();
424
}
425
 
426
// -------------------------------------------------------------------------
427
 
428
// deregister thread
429
void
430
Cyg_Scheduler_Implementation::deregister_thread(Cyg_Thread *thread)
431
{
432
    CYG_REPORT_FUNCTION();
433
    CYG_REPORT_FUNCARG1("thread=%08x", thread);
434
    // No registration necessary in this scheduler    
435
    CYG_REPORT_RETURN();
436
}
437
 
438
// -------------------------------------------------------------------------
439
// Test the given priority for uniqueness
440
 
441
cyg_bool
442
Cyg_Scheduler_Implementation::unique( cyg_priority priority)
443
{
444
    CYG_REPORT_FUNCTYPE("returning %d");
445
    CYG_REPORT_FUNCARG1("priority=%d", priority);
446
    // Priorities are not unique
447
    CYG_REPORT_RETVAL(true);
448
    return true;
449
}
450
 
451
//==========================================================================
452
// Support for timeslicing option
453
 
454
#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
455
 
456
// -------------------------------------------------------------------------
457
 
458
void
459
Cyg_Scheduler_Implementation::timeslice(void)
460
{
461
#ifdef CYGDBG_KERNEL_TRACE_TIMESLICE
462
    CYG_REPORT_FUNCTION();
463
#endif
464
 
465
#ifdef CYGPKG_KERNEL_SMP_SUPPORT
466
 
467
    HAL_SMP_CPU_TYPE cpu;
468
    HAL_SMP_CPU_TYPE cpu_count = CYG_KERNEL_CPU_COUNT();
469
    HAL_SMP_CPU_TYPE cpu_this = CYG_KERNEL_CPU_THIS();
470
 
471
    for( cpu = 0; cpu < cpu_count; cpu++ )
472
    {
473
        if( --timeslice_count[cpu] == 0 )
474
            if( cpu == cpu_this )
475
                timeslice_cpu();
476
            else CYG_KERNEL_CPU_TIMESLICE_INTERRUPT( cpu, 0 );
477
    }
478
 
479
#else    
480
 
481
    if( --timeslice_count[CYG_KERNEL_CPU_THIS()] == 0 )
482
        timeslice_cpu();
483
 
484
#endif
485
 
486
#ifdef CYGDBG_KERNEL_TRACE_TIMESLICE
487
    CYG_REPORT_RETURN();
488
#endif    
489
}
490
 
491
// -------------------------------------------------------------------------
492
 
493
void
494
Cyg_Scheduler_Implementation::timeslice_cpu(void)
495
{
496
#ifdef CYGDBG_KERNEL_TRACE_TIMESLICE
497
    CYG_REPORT_FUNCTION();
498
#endif
499
 
500
    Cyg_Thread *thread = get_current_thread();
501
    HAL_SMP_CPU_TYPE cpu_this = CYG_KERNEL_CPU_THIS();
502
 
503
    CYG_ASSERT( queue_map != 0, "Run queue empty");
504
    CYG_ASSERT( queue_map & (1<<CYG_THREAD_MIN_PRIORITY), "Idle thread vanished!!!");
505
 
506
#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE_ENABLE
507
    if( thread->timeslice_enabled &&
508
        timeslice_count[cpu_this] == 0 )
509
#else    
510
    if( timeslice_count[cpu_this] == 0 )
511
#endif
512
    {
513
        CYG_INSTRUMENT_SCHED(TIMESLICE,0,0);
514
#ifdef CYGDBG_KERNEL_TRACE_TIMESLICE
515
        CYG_TRACE0( true, "quantum consumed, time to reschedule" );
516
#endif
517
 
518
        CYG_ASSERT( get_sched_lock() > 0 , "Timeslice called with zero sched_lock");
519
 
520
        // Only try to rotate the run queue if the current thread is running.
521
        // Otherwise we are going to reschedule anyway.
522
        if( thread->get_state() == Cyg_Thread::RUNNING )
523
        {
524
            Cyg_Scheduler *sched = &Cyg_Scheduler::scheduler;
525
 
526
            CYG_INSTRUMENT_MLQ( TIMESLICE, thread, 0);
527
 
528
            CYG_ASSERTCLASS( thread, "Bad current thread");
529
            CYG_ASSERTCLASS( sched, "Bad scheduler");
530
 
531
            cyg_priority pri    = thread->priority;
532
            Cyg_RunQueue *queue = &sched->run_queue[pri];
533
 
534
#ifdef CYGPKG_KERNEL_SMP_SUPPORT
535
 
536
            // In SMP systems we set the head of the queue to point to
537
            // the thread immediately after the current
538
            // thread. schedule() will then pick that thread, or one
539
            // after it to run next.
540
 
541
            queue->to_head( thread->get_next() );
542
#else            
543
            queue->rotate();
544
#endif
545
 
546
            if( queue->get_head() != thread )
547
                sched->set_need_reschedule();
548
 
549
            timeslice_count[cpu_this] = CYGNUM_KERNEL_SCHED_TIMESLICE_TICKS;
550
        }
551
    }
552
 
553
 
554
    CYG_ASSERT( queue_map & (1<<CYG_THREAD_MIN_PRIORITY), "Idle thread vanished!!!");
555
    CYG_ASSERT( !run_queue[CYG_THREAD_MIN_PRIORITY].empty(), "Idle thread vanished!!!");
556
#ifdef CYGDBG_KERNEL_TRACE_TIMESLICE
557
    CYG_REPORT_RETURN();
558
#endif
559
}
560
 
561
// -------------------------------------------------------------------------
562
 
563
__externC void cyg_scheduler_timeslice_cpu(void)
564
{
565
    Cyg_Scheduler::scheduler.timeslice_cpu();
566
}
567
 
568
#endif
569
 
570
//==========================================================================
571
// Cyg_SchedThread_Implementation class members
572
 
573
Cyg_SchedThread_Implementation::Cyg_SchedThread_Implementation
574
(
575
    CYG_ADDRWORD sched_info
576
)
577
{
578
    CYG_REPORT_FUNCTION();
579
    CYG_REPORT_FUNCARG1("sched_info=%08x", sched_info);
580
 
581
    // Set priority to the supplied value.
582
    priority = (cyg_priority)sched_info;
583
 
584
#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE_ENABLE
585
    // If timeslice_enabled exists, set it true by default
586
    timeslice_enabled = true;
587
#endif
588
#ifdef CYGPKG_KERNEL_SMP_SUPPORT
589
    cpu = CYG_KERNEL_CPU_NONE;
590
#endif
591
 
592
    CYG_REPORT_RETURN();
593
}
594
 
595
// -------------------------------------------------------------------------
596
// Yield the processor to another thread
597
 
598
void
599
Cyg_SchedThread_Implementation::yield(void)
600
{
601
    CYG_REPORT_FUNCTION();
602
 
603
    // Prevent preemption
604
    Cyg_Scheduler::lock();
605
 
606
    Cyg_Thread *thread  = CYG_CLASSFROMBASE(Cyg_Thread,
607
                                            Cyg_SchedThread_Implementation,
608
                                            this);
609
 
610
    // Only do this if this thread is running. If it is not, there
611
    // is no point.
612
 
613
    if( thread->get_state() == Cyg_Thread::RUNNING )
614
    {
615
        // To yield we simply rotate the appropriate
616
        // run queue to the next thread and reschedule.
617
 
618
        CYG_INSTRUMENT_MLQ( YIELD, thread, 0);
619
 
620
        CYG_ASSERTCLASS( thread, "Bad current thread");
621
 
622
        Cyg_Scheduler *sched = &Cyg_Scheduler::scheduler;
623
 
624
        CYG_ASSERTCLASS( sched, "Bad scheduler");
625
 
626
        cyg_priority pri    = thread->priority;
627
        Cyg_RunQueue *queue = &sched->run_queue[pri];
628
 
629
#ifdef CYGPKG_KERNEL_SMP_SUPPORT
630
 
631
            // In SMP systems we set the head of the queue to point to
632
            // the thread immediately after the current
633
            // thread. schedule() will then pick that thread, or one
634
            // after it to run next.
635
 
636
            queue->to_head( thread->get_next() );
637
#else            
638
            queue->rotate();
639
#endif
640
 
641
        if( queue->get_head() != thread )
642
            sched->set_need_reschedule();
643
 
644
#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
645
            // Reset the timeslice counter so that this thread gets a full
646
            // quantum. 
647
        else Cyg_Scheduler::reset_timeslice_count();
648
#endif
649
    }
650
 
651
    // Unlock the scheduler and switch threads
652
#ifdef CYGDBG_USE_ASSERTS
653
    // This test keeps the assertions in unlock_inner() happy if
654
    // need_reschedule was not set above.
655
    if( !Cyg_Scheduler::get_need_reschedule() )
656
        Cyg_Scheduler::unlock();
657
    else
658
#endif    
659
    Cyg_Scheduler::unlock_reschedule();
660
 
661
 
662
    CYG_REPORT_RETURN();
663
}
664
 
665
// -------------------------------------------------------------------------
666
// Rotate the run queue at a specified priority.
667
// (pri is the decider, not this, so the routine is static)
668
 
669
void
670
Cyg_SchedThread_Implementation::rotate_queue( cyg_priority pri )
671
{
672
    CYG_REPORT_FUNCTION();
673
    CYG_REPORT_FUNCARG1("priority=%d", pri);
674
 
675
    // Prevent preemption
676
    Cyg_Scheduler::lock();
677
 
678
    Cyg_Scheduler *sched = &Cyg_Scheduler::scheduler;
679
 
680
    CYG_ASSERTCLASS( sched, "Bad scheduler");
681
 
682
    Cyg_RunQueue *queue = &sched->run_queue[pri];
683
 
684
    if ( !queue->empty() ) {
685
        queue->rotate();
686
        sched->set_need_reschedule();
687
    }
688
 
689
    // Unlock the scheduler and switch threads
690
    Cyg_Scheduler::unlock();
691
 
692
    CYG_REPORT_RETURN();
693
}
694
 
695
// -------------------------------------------------------------------------
696
// Move this thread to the head of its queue
697
// (not necessarily a scheduler queue)
698
 
699
void
700
Cyg_SchedThread_Implementation::to_queue_head( void )
701
{
702
    CYG_REPORT_FUNCTION();
703
 
704
    // Prevent preemption
705
    Cyg_Scheduler::lock();
706
 
707
    Cyg_Thread *thread  = CYG_CLASSFROMBASE(Cyg_Thread,
708
                                            Cyg_SchedThread_Implementation,
709
                                            this);
710
 
711
    CYG_ASSERTCLASS( thread, "Bad current thread");
712
 
713
    Cyg_ThreadQueue *q = thread->get_current_queue();
714
    if( q != NULL )
715
        q->to_head( thread );
716
    else if( thread->in_list() )
717
    {
718
        // If the queue pointer is NULL then it is on a run
719
        // queue. Move the thread to the head of it's priority list
720
        // and force a reschedule.
721
 
722
        Cyg_Scheduler *sched = &Cyg_Scheduler::scheduler;
723
        sched->run_queue[thread->priority].to_head( thread );
724
        sched->set_need_reschedule( thread );
725
    }
726
 
727
    // Unlock the scheduler and switch threads
728
    Cyg_Scheduler::unlock();
729
 
730
    CYG_REPORT_RETURN();
731
}
732
 
733
//==========================================================================
734
// Cyg_ThreadQueue_Implementation class members
735
 
736
// -------------------------------------------------------------------------        
737
 
738
void
739
Cyg_ThreadQueue_Implementation::enqueue(Cyg_Thread *thread)
740
{
741
    CYG_REPORT_FUNCTION();
742
    CYG_REPORT_FUNCARG1("thread=%08x", thread);
743
 
744
    CYG_INSTRUMENT_MLQ( ENQUEUE, this, thread );
745
 
746
#ifdef CYGIMP_KERNEL_SCHED_SORTED_QUEUES
747
 
748
    // Insert the thread into the queue in priority order.
749
 
750
    Cyg_Thread *qhead = get_head();
751
 
752
    if( qhead == NULL ) add_tail( thread );
753
    else if( qhead == qhead->get_next() )
754
    {
755
        // There is currently only one thread in the queue, join it
756
        // and adjust the queue pointer to point to the highest
757
        // priority of the two. If they are the same priority,
758
        // leave the pointer pointing to the oldest.
759
 
760
        qhead->insert( thread );
761
 
762
        if( thread->priority < qhead->priority )
763
            to_head(thread);
764
    }
765
    else
766
    {
767
        // There is more than one thread in the queue. First check
768
        // whether we are of higher priority than the head and if
769
        // so just jump in at the front. Also check whether we are
770
        // lower priority than the tail and jump onto the end.
771
        // Otherwise we really have to search the queue to find
772
        // our place.
773
 
774
        if( thread->priority < qhead->priority )
775
        {
776
            qhead->insert( thread );
777
            to_head(thread);
778
        }
779
        else if( thread->priority > get_tail()->priority )
780
        {
781
            // We are lower priority than any thread in the queue,
782
            // go in at the end.
783
 
784
            add_tail( thread );
785
        }
786
        else
787
        {
788
            // Search the queue. We do this backwards so that we
789
            // always add new threads after any that have the same
790
            // priority.
791
 
792
            // Because of the previous tests we know that this
793
            // search will terminate before we hit the head of the
794
            // queue, hence we do not need to check for that
795
            // condition.
796
 
797
            Cyg_Thread *qtmp = get_tail();
798
 
799
            // Scan the queue until we find a higher or equal
800
            // priority thread.
801
 
802
            while( thread->priority > qtmp->priority )
803
                qtmp = qtmp->get_prev();
804
 
805
            // Append ourself after the node pointed to by qtmp.
806
 
807
            qtmp->append( thread );
808
        }
809
    }
810
#else
811
    // Just add the thread to the tail of the list
812
    add_tail( thread );
813
#endif
814
 
815
    thread->queue = CYG_CLASSFROMBASE(Cyg_ThreadQueue,
816
                                      Cyg_ThreadQueue_Implementation,
817
                                      this);
818
    CYG_REPORT_RETURN();
819
}
820
 
821
// -------------------------------------------------------------------------
822
 
823
Cyg_Thread *
824
Cyg_ThreadQueue_Implementation::dequeue(void)
825
{
826
    CYG_REPORT_FUNCTYPE("returning thread %08x");
827
 
828
    Cyg_Thread *thread = rem_head();
829
 
830
    CYG_INSTRUMENT_MLQ( DEQUEUE, this, thread );
831
 
832
    if( thread != NULL )
833
        thread->queue = NULL;
834
 
835
    CYG_REPORT_RETVAL(thread);
836
    return thread;
837
}
838
 
839
// -------------------------------------------------------------------------
840
 
841
void
842
Cyg_ThreadQueue_Implementation::remove( Cyg_Thread *thread )
843
{
844
    CYG_REPORT_FUNCTION();
845
    CYG_REPORT_FUNCARG1("thread=%08x", thread);
846
 
847
    CYG_INSTRUMENT_MLQ( REMOVE, this, thread );
848
 
849
    thread->queue = NULL;
850
 
851
    Cyg_CList_T<Cyg_Thread>::remove( thread );
852
 
853
    CYG_REPORT_RETURN();
854
}
855
 
856
// -------------------------------------------------------------------------
857
 
858
Cyg_Thread *
859
Cyg_ThreadQueue_Implementation::highpri(void)
860
{
861
    CYG_REPORT_FUNCTYPE("returning thread %08x");
862
    CYG_REPORT_RETVAL(get_head());
863
    return get_head();
864
}
865
 
866
// -------------------------------------------------------------------------
867
 
868
inline void
869
Cyg_ThreadQueue_Implementation::set_thread_queue(Cyg_Thread *thread,
870
                                                 Cyg_ThreadQueue *tq )
871
 
872
{
873
    thread->queue = tq;
874
}
875
 
876
// -------------------------------------------------------------------------
877
 
878
#endif
879
 
880
// -------------------------------------------------------------------------
881
// EOF sched/mlqueue.cxx

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.