1 |
2 |
drasko |
/*
|
2 |
|
|
* A basic priority-based scheduler.
|
3 |
|
|
*
|
4 |
|
|
* Copyright (C) 2007, 2008 Bahadir Balban
|
5 |
|
|
*/
|
6 |
|
|
#include <l4/lib/list.h>
|
7 |
|
|
#include <l4/lib/printk.h>
|
8 |
|
|
#include <l4/lib/string.h>
|
9 |
|
|
#include <l4/lib/mutex.h>
|
10 |
|
|
#include <l4/lib/math.h>
|
11 |
|
|
#include <l4/lib/bit.h>
|
12 |
|
|
#include <l4/lib/spinlock.h>
|
13 |
|
|
#include <l4/generic/scheduler.h>
|
14 |
|
|
#include <l4/generic/resource.h>
|
15 |
|
|
#include <l4/generic/container.h>
|
16 |
|
|
#include <l4/generic/preempt.h>
|
17 |
|
|
#include <l4/generic/thread.h>
|
18 |
|
|
#include <l4/generic/debug.h>
|
19 |
|
|
#include <l4/generic/irq.h>
|
20 |
|
|
#include <l4/generic/tcb.h>
|
21 |
|
|
#include <l4/api/errno.h>
|
22 |
|
|
#include <l4/api/kip.h>
|
23 |
|
|
#include INC_SUBARCH(mm.h)
|
24 |
|
|
#include INC_GLUE(mapping.h)
|
25 |
|
|
#include INC_GLUE(init.h)
|
26 |
|
|
#include INC_PLAT(platform.h)
|
27 |
|
|
#include INC_ARCH(exception.h)
|
28 |
|
|
#include INC_SUBARCH(irq.h)
|
29 |
|
|
|
30 |
|
|
DECLARE_PERCPU(struct scheduler, scheduler);
|
31 |
|
|
|
32 |
|
|
/* This is incremented on each irq or voluntarily by preempt_disable() */
|
33 |
|
|
DECLARE_PERCPU(extern unsigned int, current_irq_nest_count);
|
34 |
|
|
|
35 |
|
|
/* This ensures no scheduling occurs after voluntary preempt_disable() */
|
36 |
|
|
DECLARE_PERCPU(static int, voluntary_preempt);
|
37 |
|
|
|
38 |
|
|
void sched_lock_runqueues(struct scheduler *sched, unsigned long *irqflags)
|
39 |
|
|
{
|
40 |
|
|
spin_lock_irq(&sched->sched_rq[0].lock, irqflags);
|
41 |
|
|
spin_lock(&sched->sched_rq[1].lock);
|
42 |
|
|
BUG_ON(irqs_enabled());
|
43 |
|
|
}
|
44 |
|
|
|
45 |
|
|
void sched_unlock_runqueues(struct scheduler *sched, unsigned long irqflags)
|
46 |
|
|
{
|
47 |
|
|
spin_unlock(&sched->sched_rq[1].lock);
|
48 |
|
|
spin_unlock_irq(&sched->sched_rq[0].lock, irqflags);
|
49 |
|
|
}
|
50 |
|
|
|
51 |
|
|
int preemptive()
|
52 |
|
|
{
|
53 |
|
|
return per_cpu(current_irq_nest_count) == 0;
|
54 |
|
|
}
|
55 |
|
|
|
56 |
|
|
int preempt_count()
|
57 |
|
|
{
|
58 |
|
|
return per_cpu(current_irq_nest_count);
|
59 |
|
|
}
|
60 |
|
|
|
61 |
|
|
#if !defined(CONFIG_PREEMPT_DISABLE)
|
62 |
|
|
|
63 |
|
|
void preempt_enable(void)
|
64 |
|
|
{
|
65 |
|
|
per_cpu(voluntary_preempt)--;
|
66 |
|
|
per_cpu(current_irq_nest_count)--;
|
67 |
|
|
}
|
68 |
|
|
|
69 |
|
|
/* A positive irq nest count implies current context cannot be preempted. */
|
70 |
|
|
void preempt_disable(void)
|
71 |
|
|
{
|
72 |
|
|
per_cpu(current_irq_nest_count)++;
|
73 |
|
|
per_cpu(voluntary_preempt)++;
|
74 |
|
|
}
|
75 |
|
|
|
76 |
|
|
#else /* End of !CONFIG_PREEMPT_DISABLE */
|
77 |
|
|
|
78 |
|
|
void preempt_enable(void) { }
|
79 |
|
|
void preempt_disable(void) { }
|
80 |
|
|
|
81 |
|
|
#endif /* CONFIG_PREEMPT_DISABLE */
|
82 |
|
|
|
83 |
|
|
int in_irq_context(void)
|
84 |
|
|
{
|
85 |
|
|
/*
|
86 |
|
|
* If there was a real irq, irq nest count must be
|
87 |
|
|
* one more than all preempt_disable()'s which are
|
88 |
|
|
* counted by voluntary_preempt.
|
89 |
|
|
*/
|
90 |
|
|
return (per_cpu(current_irq_nest_count) ==
|
91 |
|
|
(per_cpu(voluntary_preempt) + 1));
|
92 |
|
|
}
|
93 |
|
|
|
94 |
|
|
int in_nested_irq_context(void)
|
95 |
|
|
{
|
96 |
|
|
/* Deducing voluntary preemptions we get real irq nesting */
|
97 |
|
|
return (per_cpu(current_irq_nest_count) -
|
98 |
|
|
per_cpu(voluntary_preempt)) > 1;
|
99 |
|
|
}
|
100 |
|
|
|
101 |
|
|
int in_process_context(void)
|
102 |
|
|
{
|
103 |
|
|
return !in_irq_context();
|
104 |
|
|
}
|
105 |
|
|
|
106 |
|
|
void sched_init_runqueue(struct scheduler *sched, struct runqueue *rq)
|
107 |
|
|
{
|
108 |
|
|
link_init(&rq->task_list);
|
109 |
|
|
spin_lock_init(&rq->lock);
|
110 |
|
|
rq->sched = sched;
|
111 |
|
|
}
|
112 |
|
|
|
113 |
|
|
void sched_init()
|
114 |
|
|
{
|
115 |
|
|
struct scheduler *sched = &per_cpu(scheduler);
|
116 |
|
|
|
117 |
|
|
for (int i = 0; i < SCHED_RQ_TOTAL; i++)
|
118 |
|
|
sched_init_runqueue(sched, &sched->sched_rq[i]);
|
119 |
|
|
|
120 |
|
|
sched->rq_runnable = &sched->sched_rq[0];
|
121 |
|
|
sched->rq_expired = &sched->sched_rq[1];
|
122 |
|
|
sched->rq_rt_runnable = &sched->sched_rq[2];
|
123 |
|
|
sched->rq_rt_expired = &sched->sched_rq[3];
|
124 |
|
|
sched->prio_total = TASK_PRIO_TOTAL;
|
125 |
|
|
sched->idle_task = current;
|
126 |
|
|
}
|
127 |
|
|
|
128 |
|
|
/* Swap runnable and expired runqueues. */
|
129 |
|
|
static void sched_rq_swap_queues(void)
|
130 |
|
|
{
|
131 |
|
|
struct runqueue *temp;
|
132 |
|
|
|
133 |
|
|
BUG_ON(list_empty(&per_cpu(scheduler).rq_expired->task_list));
|
134 |
|
|
|
135 |
|
|
/* Queues are swapped and expired list becomes runnable */
|
136 |
|
|
temp = per_cpu(scheduler).rq_runnable;
|
137 |
|
|
per_cpu(scheduler).rq_runnable = per_cpu(scheduler).rq_expired;
|
138 |
|
|
per_cpu(scheduler).rq_expired = temp;
|
139 |
|
|
}
|
140 |
|
|
|
141 |
|
|
static void sched_rq_swap_rtqueues(void)
|
142 |
|
|
{
|
143 |
|
|
struct runqueue *temp;
|
144 |
|
|
|
145 |
|
|
BUG_ON(list_empty(&per_cpu(scheduler).rq_rt_expired->task_list));
|
146 |
|
|
|
147 |
|
|
/* Queues are swapped and expired list becomes runnable */
|
148 |
|
|
temp = per_cpu(scheduler).rq_rt_runnable;
|
149 |
|
|
per_cpu(scheduler).rq_rt_runnable = per_cpu(scheduler).rq_rt_expired;
|
150 |
|
|
per_cpu(scheduler).rq_rt_expired = temp;
|
151 |
|
|
}
|
152 |
|
|
|
153 |
|
|
/* Set policy on where to add tasks in the runqueue */
|
154 |
|
|
#define RQ_ADD_BEHIND 0
|
155 |
|
|
#define RQ_ADD_FRONT 1
|
156 |
|
|
|
157 |
|
|
/* Helper for adding a new task to a runqueue */
|
158 |
|
|
static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
|
159 |
|
|
{
|
160 |
|
|
unsigned long irqflags;
|
161 |
|
|
struct scheduler *sched =
|
162 |
|
|
&per_cpu_byid(scheduler, task->affinity);
|
163 |
|
|
|
164 |
|
|
BUG_ON(!list_empty(&task->rq_list));
|
165 |
|
|
|
166 |
|
|
/* Lock that particular cpu's runqueue set */
|
167 |
|
|
sched_lock_runqueues(sched, &irqflags);
|
168 |
|
|
if (front)
|
169 |
|
|
list_insert(&task->rq_list, &rq->task_list);
|
170 |
|
|
else
|
171 |
|
|
list_insert_tail(&task->rq_list, &rq->task_list);
|
172 |
|
|
rq->total++;
|
173 |
|
|
task->rq = rq;
|
174 |
|
|
|
175 |
|
|
/* Unlock that particular cpu's runqueue set */
|
176 |
|
|
sched_unlock_runqueues(sched, irqflags);
|
177 |
|
|
}
|
178 |
|
|
|
179 |
|
|
/* Helper for removing a task from its runqueue. */
|
180 |
|
|
static inline void sched_rq_remove_task(struct ktcb *task)
|
181 |
|
|
{
|
182 |
|
|
unsigned long irqflags;
|
183 |
|
|
struct scheduler *sched =
|
184 |
|
|
&per_cpu_byid(scheduler, task->affinity);
|
185 |
|
|
|
186 |
|
|
sched_lock_runqueues(sched, &irqflags);
|
187 |
|
|
|
188 |
|
|
/*
|
189 |
|
|
* We must lock both, otherwise rqs may swap and
|
190 |
|
|
* we may get the wrong rq.
|
191 |
|
|
*/
|
192 |
|
|
BUG_ON(list_empty(&task->rq_list));
|
193 |
|
|
list_remove_init(&task->rq_list);
|
194 |
|
|
|
195 |
|
|
task->rq->total--;
|
196 |
|
|
BUG_ON(task->rq->total < 0);
|
197 |
|
|
task->rq = 0;
|
198 |
|
|
|
199 |
|
|
sched_unlock_runqueues(sched, irqflags);
|
200 |
|
|
}
|
201 |
|
|
|
202 |
|
|
static inline void
|
203 |
|
|
sched_run_task(struct ktcb *task, struct scheduler *sched)
|
204 |
|
|
{
|
205 |
|
|
if (task->flags & TASK_REALTIME)
|
206 |
|
|
sched_rq_add_task(task, sched->rq_rt_runnable,
|
207 |
|
|
RQ_ADD_BEHIND);
|
208 |
|
|
else
|
209 |
|
|
sched_rq_add_task(task, sched->rq_runnable,
|
210 |
|
|
RQ_ADD_BEHIND);
|
211 |
|
|
}
|
212 |
|
|
|
213 |
|
|
static inline void
|
214 |
|
|
sched_expire_task(struct ktcb *task, struct scheduler *sched)
|
215 |
|
|
{
|
216 |
|
|
|
217 |
|
|
if (task->flags & TASK_REALTIME)
|
218 |
|
|
sched_rq_add_task(current, sched->rq_rt_expired,
|
219 |
|
|
RQ_ADD_BEHIND);
|
220 |
|
|
else
|
221 |
|
|
sched_rq_add_task(current, sched->rq_expired,
|
222 |
|
|
RQ_ADD_BEHIND);
|
223 |
|
|
}
|
224 |
|
|
|
225 |
|
|
void sched_init_task(struct ktcb *task, int prio)
|
226 |
|
|
{
|
227 |
|
|
link_init(&task->rq_list);
|
228 |
|
|
task->priority = prio;
|
229 |
|
|
task->ticks_left = 0;
|
230 |
|
|
task->state = TASK_INACTIVE;
|
231 |
|
|
task->ts_need_resched = 0;
|
232 |
|
|
task->flags |= TASK_RESUMING;
|
233 |
|
|
}
|
234 |
|
|
|
235 |
|
|
/* Synchronously resumes a task */
|
236 |
|
|
void sched_resume_sync(struct ktcb *task)
|
237 |
|
|
{
|
238 |
|
|
BUG_ON(task == current);
|
239 |
|
|
task->state = TASK_RUNNABLE;
|
240 |
|
|
sched_run_task(task, &per_cpu_byid(scheduler, task->affinity));
|
241 |
|
|
schedule();
|
242 |
|
|
}
|
243 |
|
|
|
244 |
|
|
/*
|
245 |
|
|
* Asynchronously resumes a task.
|
246 |
|
|
* The task will run in the future, but at
|
247 |
|
|
* the scheduler's discretion. It is possible that current
|
248 |
|
|
* task wakes itself up via this function in the scheduler().
|
249 |
|
|
*/
|
250 |
|
|
void sched_resume_async(struct ktcb *task)
|
251 |
|
|
{
|
252 |
|
|
task->state = TASK_RUNNABLE;
|
253 |
|
|
sched_run_task(task, &per_cpu_byid(scheduler, task->affinity));
|
254 |
|
|
}
|
255 |
|
|
|
256 |
|
|
/*
|
257 |
|
|
* Takes all the action that will make a task sleep
|
258 |
|
|
* in the scheduler. If the task is woken up before
|
259 |
|
|
* it schedules, then operations here are simply
|
260 |
|
|
* undone and task remains as runnable.
|
261 |
|
|
*/
|
262 |
|
|
void sched_prepare_sleep()
|
263 |
|
|
{
|
264 |
|
|
preempt_disable();
|
265 |
|
|
sched_rq_remove_task(current);
|
266 |
|
|
current->state = TASK_SLEEPING;
|
267 |
|
|
preempt_enable();
|
268 |
|
|
}
|
269 |
|
|
|
270 |
|
|
/*
|
271 |
|
|
* preempt_enable/disable()'s are for avoiding the
|
272 |
|
|
* entry to scheduler during this period - but this
|
273 |
|
|
* is only true for current cpu.
|
274 |
|
|
*/
|
275 |
|
|
void sched_suspend_sync(void)
|
276 |
|
|
{
|
277 |
|
|
preempt_disable();
|
278 |
|
|
sched_rq_remove_task(current);
|
279 |
|
|
current->state = TASK_INACTIVE;
|
280 |
|
|
current->flags &= ~TASK_SUSPENDING;
|
281 |
|
|
|
282 |
|
|
if (current->pagerid != current->tid)
|
283 |
|
|
wake_up(¤t->wqh_pager, 0);
|
284 |
|
|
preempt_enable();
|
285 |
|
|
|
286 |
|
|
schedule();
|
287 |
|
|
}
|
288 |
|
|
|
289 |
|
|
void sched_suspend_async(void)
|
290 |
|
|
{
|
291 |
|
|
preempt_disable();
|
292 |
|
|
sched_rq_remove_task(current);
|
293 |
|
|
current->state = TASK_INACTIVE;
|
294 |
|
|
current->flags &= ~TASK_SUSPENDING;
|
295 |
|
|
|
296 |
|
|
if (current->pagerid != current->tid)
|
297 |
|
|
wake_up(¤t->wqh_pager, 0);
|
298 |
|
|
preempt_enable();
|
299 |
|
|
|
300 |
|
|
need_resched = 1;
|
301 |
|
|
}
|
302 |
|
|
|
303 |
|
|
|
304 |
|
|
extern void arch_context_switch(struct ktcb *cur, struct ktcb *next);
|
305 |
|
|
|
306 |
|
|
static inline void context_switch(struct ktcb *next)
|
307 |
|
|
{
|
308 |
|
|
struct ktcb *cur = current;
|
309 |
|
|
|
310 |
|
|
// printk("Core:%d (%d) to (%d)\n", smp_get_cpuid(), cur->tid, next->tid);
|
311 |
|
|
|
312 |
|
|
system_account_context_switch();
|
313 |
|
|
|
314 |
|
|
/* Flush caches and everything */
|
315 |
|
|
BUG_ON(!current);
|
316 |
|
|
BUG_ON(!current->space);
|
317 |
|
|
BUG_ON(!next);
|
318 |
|
|
BUG_ON(!next->space);
|
319 |
|
|
BUG_ON(!next->space);
|
320 |
|
|
if (current->space->spid != next->space->spid)
|
321 |
|
|
arch_space_switch(next);
|
322 |
|
|
|
323 |
|
|
/* Update utcb region for next task */
|
324 |
|
|
task_update_utcb(next);
|
325 |
|
|
|
326 |
|
|
/* Switch context */
|
327 |
|
|
arch_context_switch(cur, next);
|
328 |
|
|
|
329 |
|
|
// printk("Returning from yield. Tid: (%d)\n", cur->tid);
|
330 |
|
|
}
|
331 |
|
|
|
332 |
|
|
/*
|
333 |
|
|
* Priority calculation is so simple it is inlined. The task gets
|
334 |
|
|
* the ratio of its priority to total priority of all runnable tasks.
|
335 |
|
|
*/
|
336 |
|
|
static inline int sched_recalc_ticks(struct ktcb *task, int prio_total)
|
337 |
|
|
{
|
338 |
|
|
BUG_ON(prio_total < task->priority);
|
339 |
|
|
BUG_ON(prio_total == 0);
|
340 |
|
|
return task->ticks_assigned =
|
341 |
|
|
CONFIG_SCHED_TICKS * task->priority / prio_total;
|
342 |
|
|
}
|
343 |
|
|
|
344 |
|
|
/*
|
345 |
|
|
* Select a real-time task 1/8th of any one selection
|
346 |
|
|
*/
|
347 |
|
|
static inline int sched_select_rt(struct scheduler *sched)
|
348 |
|
|
{
|
349 |
|
|
int ctr = sched->task_select_ctr++ & 0xF;
|
350 |
|
|
|
351 |
|
|
if (ctr == 0 || ctr == 8 || ctr == 15)
|
352 |
|
|
return 0;
|
353 |
|
|
else
|
354 |
|
|
return 1;
|
355 |
|
|
}
|
356 |
|
|
|
357 |
|
|
/*
|
358 |
|
|
* Selection happens as follows:
|
359 |
|
|
*
|
360 |
|
|
* A real-time task is chosen %87.5 of the time. This is evenly
|
361 |
|
|
* distributed to a given interval.
|
362 |
|
|
*
|
363 |
|
|
* Idle task is run once when it is explicitly suggested (e.g.
|
364 |
|
|
* for cleanup after a task exited) but only when no real-time
|
365 |
|
|
* tasks are in the queues.
|
366 |
|
|
*
|
367 |
|
|
* And idle task is otherwise run only when no other tasks are
|
368 |
|
|
* runnable.
|
369 |
|
|
*/
|
370 |
|
|
struct ktcb *sched_select_next(void)
|
371 |
|
|
{
|
372 |
|
|
struct scheduler *sched = &per_cpu(scheduler);
|
373 |
|
|
int realtime = sched_select_rt(sched);
|
374 |
|
|
struct ktcb *next = 0;
|
375 |
|
|
|
376 |
|
|
for (;;) {
|
377 |
|
|
|
378 |
|
|
/* Decision to run an RT task? */
|
379 |
|
|
if (realtime && sched->rq_rt_runnable->total > 0) {
|
380 |
|
|
/* Get a real-time task, if available */
|
381 |
|
|
next = link_to_struct(sched->rq_rt_runnable->task_list.next,
|
382 |
|
|
struct ktcb, rq_list);
|
383 |
|
|
break;
|
384 |
|
|
} else if (realtime && sched->rq_rt_expired->total > 0) {
|
385 |
|
|
/* Swap real-time queues */
|
386 |
|
|
sched_rq_swap_rtqueues();
|
387 |
|
|
/* Get a real-time task */
|
388 |
|
|
next = link_to_struct(sched->rq_rt_runnable->task_list.next,
|
389 |
|
|
struct ktcb, rq_list);
|
390 |
|
|
break;
|
391 |
|
|
/* Idle flagged for run? */
|
392 |
|
|
} else if (sched->flags & SCHED_RUN_IDLE) {
|
393 |
|
|
/* Clear idle flag */
|
394 |
|
|
sched->flags &= ~SCHED_RUN_IDLE;
|
395 |
|
|
next = sched->idle_task;
|
396 |
|
|
break;
|
397 |
|
|
} else if (sched->rq_runnable->total > 0) {
|
398 |
|
|
/* Get a regular runnable task, if available */
|
399 |
|
|
next = link_to_struct(sched->rq_runnable->task_list.next,
|
400 |
|
|
struct ktcb, rq_list);
|
401 |
|
|
break;
|
402 |
|
|
} else if (sched->rq_expired->total > 0) {
|
403 |
|
|
/* Swap queues and retry if not */
|
404 |
|
|
sched_rq_swap_queues();
|
405 |
|
|
next = link_to_struct(sched->rq_runnable->task_list.next,
|
406 |
|
|
struct ktcb, rq_list);
|
407 |
|
|
break;
|
408 |
|
|
} else if (in_process_context()) {
|
409 |
|
|
/* No runnable task. Do idle if in process context */
|
410 |
|
|
next = sched->idle_task;
|
411 |
|
|
break;
|
412 |
|
|
} else {
|
413 |
|
|
/*
|
414 |
|
|
* Nobody is runnable. Irq calls must return
|
415 |
|
|
* to interrupted current process to run idle task
|
416 |
|
|
*/
|
417 |
|
|
next = current;
|
418 |
|
|
break;
|
419 |
|
|
}
|
420 |
|
|
}
|
421 |
|
|
return next;
|
422 |
|
|
}
|
423 |
|
|
|
424 |
|
|
/* Prepare next runnable task right before switching to it */
|
425 |
|
|
void sched_prepare_next(struct ktcb *next)
|
426 |
|
|
{
|
427 |
|
|
/* New tasks affect runqueue total priority. */
|
428 |
|
|
if (next->flags & TASK_RESUMING)
|
429 |
|
|
next->flags &= ~TASK_RESUMING;
|
430 |
|
|
|
431 |
|
|
/* Zero ticks indicates task hasn't ran since last rq swap */
|
432 |
|
|
if (next->ticks_left == 0) {
|
433 |
|
|
/*
|
434 |
|
|
* Redistribute timeslice. We do this as each task
|
435 |
|
|
* becomes runnable rather than all at once. It is done
|
436 |
|
|
* every runqueue swap
|
437 |
|
|
*/
|
438 |
|
|
sched_recalc_ticks(next, per_cpu(scheduler).prio_total);
|
439 |
|
|
next->ticks_left = next->ticks_assigned;
|
440 |
|
|
}
|
441 |
|
|
|
442 |
|
|
/* Reinitialise task's schedule granularity boundary */
|
443 |
|
|
next->sched_granule = SCHED_GRANULARITY;
|
444 |
|
|
}
|
445 |
|
|
|
446 |
|
|
/*
|
447 |
|
|
* Tasks come here, either by setting need_resched (via next irq),
|
448 |
|
|
* or by directly calling it (in process context).
|
449 |
|
|
*
|
450 |
|
|
* The scheduler is similar to Linux's so called O(1) scheduler,
|
451 |
|
|
* although a lot simpler. Task priorities determine task timeslices.
|
452 |
|
|
* Each task gets a ratio of its priority to the total priority of
|
453 |
|
|
* all runnable tasks. When this total changes, (e.g. threads die or
|
454 |
|
|
* are created, or a thread's priority is changed) the timeslices are
|
455 |
|
|
* recalculated on a per-task basis as each thread becomes runnable.
|
456 |
|
|
* Once all runnable tasks expire, runqueues are swapped. Sleeping
|
457 |
|
|
* tasks are removed from the runnable queue, and added back later
|
458 |
|
|
* without affecting the timeslices. Suspended tasks however,
|
459 |
|
|
* necessitate a timeslice recalculation as they are considered to go
|
460 |
|
|
* inactive indefinitely or for a very long time. They are put back
|
461 |
|
|
* to the expired queue if they want to run again.
|
462 |
|
|
*
|
463 |
|
|
* A task is rescheduled either when it hits a SCHED_GRANULARITY
|
464 |
|
|
* boundary, or when its timeslice has expired. SCHED_GRANULARITY
|
465 |
|
|
* ensures context switches do occur at a maximum boundary even if a
|
466 |
|
|
* task's timeslice is very long. In the future, real-time tasks will
|
467 |
|
|
* be added, and they will be able to ignore SCHED_GRANULARITY.
|
468 |
|
|
*
|
469 |
|
|
* In the future, the tasks will be sorted by priority in their
|
470 |
|
|
* runqueue, as well as having an adjusted timeslice.
|
471 |
|
|
*
|
472 |
|
|
* Runqueues are swapped at a single second's interval. This implies
|
473 |
|
|
* the timeslice recalculations would also occur at this interval.
|
474 |
|
|
*/
|
475 |
|
|
void schedule()
|
476 |
|
|
{
|
477 |
|
|
struct ktcb *next;
|
478 |
|
|
|
479 |
|
|
/* Should not schedule with preemption
|
480 |
|
|
* disabled or in nested irq */
|
481 |
|
|
BUG_ON(per_cpu(voluntary_preempt));
|
482 |
|
|
BUG_ON(in_nested_irq_context());
|
483 |
|
|
|
484 |
|
|
/* Should not have more ticks than SCHED_TICKS */
|
485 |
|
|
BUG_ON(current->ticks_left > CONFIG_SCHED_TICKS);
|
486 |
|
|
|
487 |
|
|
/* If coming from process path, cannot have
|
488 |
|
|
* any irqs that schedule after this */
|
489 |
|
|
preempt_disable();
|
490 |
|
|
|
491 |
|
|
/* Reset schedule flag */
|
492 |
|
|
need_resched = 0;
|
493 |
|
|
|
494 |
|
|
/* Remove from runnable and put into appropriate runqueue */
|
495 |
|
|
if (current->state == TASK_RUNNABLE) {
|
496 |
|
|
sched_rq_remove_task(current);
|
497 |
|
|
if (current->ticks_left)
|
498 |
|
|
sched_run_task(current, &per_cpu(scheduler));
|
499 |
|
|
else
|
500 |
|
|
sched_expire_task(current, &per_cpu(scheduler));
|
501 |
|
|
}
|
502 |
|
|
|
503 |
|
|
/*
|
504 |
|
|
* FIXME: Are these smp-safe? BB: On first glance they
|
505 |
|
|
* should be because runqueues are per-cpu right now.
|
506 |
|
|
*
|
507 |
|
|
* If task is about to sleep and
|
508 |
|
|
* it has pending events, wake it up.
|
509 |
|
|
*/
|
510 |
|
|
if ((current->flags & TASK_PENDING_SIGNAL) &&
|
511 |
|
|
current->state == TASK_SLEEPING)
|
512 |
|
|
wake_up_task(current, WAKEUP_INTERRUPT);
|
513 |
|
|
|
514 |
|
|
/*
|
515 |
|
|
* If task has pending events, and is in userspace
|
516 |
|
|
* (guaranteed to have no unfinished jobs in kernel)
|
517 |
|
|
* handle those events
|
518 |
|
|
*/
|
519 |
|
|
if ((current->flags & TASK_PENDING_SIGNAL) &&
|
520 |
|
|
current->state == TASK_RUNNABLE &&
|
521 |
|
|
TASK_IN_USER(current)) {
|
522 |
|
|
if (current->flags & TASK_SUSPENDING)
|
523 |
|
|
sched_suspend_async();
|
524 |
|
|
}
|
525 |
|
|
|
526 |
|
|
/* Hint scheduler to run idle asap to free task */
|
527 |
|
|
if (current->flags & TASK_EXITED) {
|
528 |
|
|
current->flags &= ~TASK_EXITED;
|
529 |
|
|
per_cpu(scheduler).flags |= SCHED_RUN_IDLE;
|
530 |
|
|
}
|
531 |
|
|
|
532 |
|
|
/* Decide on next runnable task */
|
533 |
|
|
next = sched_select_next();
|
534 |
|
|
|
535 |
|
|
/* Prepare next task for running */
|
536 |
|
|
sched_prepare_next(next);
|
537 |
|
|
|
538 |
|
|
/* Finish */
|
539 |
|
|
disable_irqs();
|
540 |
|
|
preempt_enable();
|
541 |
|
|
context_switch(next);
|
542 |
|
|
}
|
543 |
|
|
|
544 |
|
|
/*
|
545 |
|
|
* Start the timer and switch to current task
|
546 |
|
|
* for first-ever scheduling.
|
547 |
|
|
*/
|
548 |
|
|
void scheduler_start()
|
549 |
|
|
{
|
550 |
|
|
platform_timer_start();
|
551 |
|
|
switch_to_user(current);
|
552 |
|
|
}
|
553 |
|
|
|