/*
|
/*
|
* linux/kernel/sched.c
|
* linux/kernel/sched.c
|
*
|
*
|
* Kernel scheduler and related syscalls
|
* Kernel scheduler and related syscalls
|
*
|
*
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
*
|
*
|
* 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
|
* 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
|
* make semaphores SMP safe
|
* make semaphores SMP safe
|
* 1998-11-19 Implemented schedule_timeout() and related stuff
|
* 1998-11-19 Implemented schedule_timeout() and related stuff
|
* by Andrea Arcangeli
|
* by Andrea Arcangeli
|
* 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
|
* 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
|
*/
|
*/
|
|
|
/*
|
/*
|
* 'sched.c' is the main kernel file. It contains scheduling primitives
|
* 'sched.c' is the main kernel file. It contains scheduling primitives
|
* (sleep_on, wakeup, schedule etc) as well as a number of simple system
|
* (sleep_on, wakeup, schedule etc) as well as a number of simple system
|
* call functions (type getpid()), which just extract a field from
|
* call functions (type getpid()), which just extract a field from
|
* current-task
|
* current-task
|
*/
|
*/
|
|
|
#include <linux/config.h>
|
#include <linux/config.h>
|
#include <linux/mm.h>
|
#include <linux/mm.h>
|
#include <linux/init.h>
|
#include <linux/init.h>
|
#include <linux/smp_lock.h>
|
#include <linux/smp_lock.h>
|
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
#include <linux/kernel_stat.h>
|
#include <linux/kernel_stat.h>
|
#include <linux/completion.h>
|
#include <linux/completion.h>
|
#include <linux/prefetch.h>
|
#include <linux/prefetch.h>
|
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
|
|
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
|
|
extern void timer_bh(void);
|
extern void timer_bh(void);
|
extern void tqueue_bh(void);
|
extern void tqueue_bh(void);
|
extern void immediate_bh(void);
|
extern void immediate_bh(void);
|
|
|
/*
|
/*
|
* scheduler variables
|
* scheduler variables
|
*/
|
*/
|
|
|
unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
|
unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
|
|
|
extern void mem_use(void);
|
extern void mem_use(void);
|
|
|
/*
|
/*
|
* Scheduling quanta.
|
* Scheduling quanta.
|
*
|
*
|
* NOTE! The unix "nice" value influences how long a process
|
* NOTE! The unix "nice" value influences how long a process
|
* gets. The nice value ranges from -20 to +19, where a -20
|
* gets. The nice value ranges from -20 to +19, where a -20
|
* is a "high-priority" task, and a "+10" is a low-priority
|
* is a "high-priority" task, and a "+10" is a low-priority
|
* task.
|
* task.
|
*
|
*
|
* We want the time-slice to be around 50ms or so, so this
|
* We want the time-slice to be around 50ms or so, so this
|
* calculation depends on the value of HZ.
|
* calculation depends on the value of HZ.
|
*/
|
*/
|
#if HZ < 200
|
#if HZ < 200
|
#define TICK_SCALE(x) ((x) >> 2)
|
#define TICK_SCALE(x) ((x) >> 2)
|
#elif HZ < 400
|
#elif HZ < 400
|
#define TICK_SCALE(x) ((x) >> 1)
|
#define TICK_SCALE(x) ((x) >> 1)
|
#elif HZ < 800
|
#elif HZ < 800
|
#define TICK_SCALE(x) (x)
|
#define TICK_SCALE(x) (x)
|
#elif HZ < 1600
|
#elif HZ < 1600
|
#define TICK_SCALE(x) ((x) << 1)
|
#define TICK_SCALE(x) ((x) << 1)
|
#else
|
#else
|
#define TICK_SCALE(x) ((x) << 2)
|
#define TICK_SCALE(x) ((x) << 2)
|
#endif
|
#endif
|
|
|
#define NICE_TO_TICKS(nice) (TICK_SCALE(20-(nice))+1)
|
#define NICE_TO_TICKS(nice) (TICK_SCALE(20-(nice))+1)
|
|
|
|
|
/*
|
/*
|
* Init task must be ok at boot for the ix86 as we will check its signals
|
* Init task must be ok at boot for the ix86 as we will check its signals
|
* via the SMP irq return path.
|
* via the SMP irq return path.
|
*/
|
*/
|
|
|
struct task_struct * init_tasks[NR_CPUS] = {&init_task, };
|
struct task_struct * init_tasks[NR_CPUS] = {&init_task, };
|
|
|
/*
|
/*
|
* The tasklist_lock protects the linked list of processes.
|
* The tasklist_lock protects the linked list of processes.
|
*
|
*
|
* The runqueue_lock locks the parts that actually access
|
* The runqueue_lock locks the parts that actually access
|
* and change the run-queues, and have to be interrupt-safe.
|
* and change the run-queues, and have to be interrupt-safe.
|
*
|
*
|
* If both locks are to be concurrently held, the runqueue_lock
|
* If both locks are to be concurrently held, the runqueue_lock
|
* nests inside the tasklist_lock.
|
* nests inside the tasklist_lock.
|
*
|
*
|
* task->alloc_lock nests inside tasklist_lock.
|
* task->alloc_lock nests inside tasklist_lock.
|
*/
|
*/
|
spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */
|
spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */
|
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
|
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
|
|
|
static LIST_HEAD(runqueue_head);
|
static LIST_HEAD(runqueue_head);
|
|
|
/*
|
/*
|
* We align per-CPU scheduling data on cacheline boundaries,
|
* We align per-CPU scheduling data on cacheline boundaries,
|
* to prevent cacheline ping-pong.
|
* to prevent cacheline ping-pong.
|
*/
|
*/
|
static union {
|
static union {
|
struct schedule_data {
|
struct schedule_data {
|
struct task_struct * curr;
|
struct task_struct * curr;
|
cycles_t last_schedule;
|
cycles_t last_schedule;
|
} schedule_data;
|
} schedule_data;
|
char __pad [SMP_CACHE_BYTES];
|
char __pad [SMP_CACHE_BYTES];
|
} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}};
|
} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}};
|
|
|
#define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
|
#define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
|
#define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
|
#define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
|
|
|
struct kernel_stat kstat;
|
struct kernel_stat kstat;
|
extern struct task_struct *child_reaper;
|
extern struct task_struct *child_reaper;
|
|
|
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
|
|
#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
|
#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
|
#define can_schedule(p,cpu) \
|
#define can_schedule(p,cpu) \
|
((p)->cpus_runnable & (p)->cpus_allowed & (1UL << cpu))
|
((p)->cpus_runnable & (p)->cpus_allowed & (1UL << cpu))
|
|
|
#else
|
#else
|
|
|
#define idle_task(cpu) (&init_task)
|
#define idle_task(cpu) (&init_task)
|
#define can_schedule(p,cpu) (1)
|
#define can_schedule(p,cpu) (1)
|
|
|
#endif
|
#endif
|
|
|
void scheduling_functions_start_here(void) { }
|
void scheduling_functions_start_here(void) { }
|
|
|
/*
|
/*
|
* This is the function that decides how desirable a process is..
|
* This is the function that decides how desirable a process is..
|
* You can weigh different processes against each other depending
|
* You can weigh different processes against each other depending
|
* on what CPU they've run on lately etc to try to handle cache
|
* on what CPU they've run on lately etc to try to handle cache
|
* and TLB miss penalties.
|
* and TLB miss penalties.
|
*
|
*
|
* Return values:
|
* Return values:
|
* -1000: never select this
|
* -1000: never select this
|
* 0: out of time, recalculate counters (but it might still be
|
* 0: out of time, recalculate counters (but it might still be
|
* selected)
|
* selected)
|
* +ve: "goodness" value (the larger, the better)
|
* +ve: "goodness" value (the larger, the better)
|
* +1000: realtime process, select this.
|
* +1000: realtime process, select this.
|
*/
|
*/
|
|
|
static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm)
|
static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm)
|
{
|
{
|
int weight;
|
int weight;
|
|
|
/*
|
/*
|
* select the current process after every other
|
* select the current process after every other
|
* runnable process, but before the idle thread.
|
* runnable process, but before the idle thread.
|
* Also, dont trigger a counter recalculation.
|
* Also, dont trigger a counter recalculation.
|
*/
|
*/
|
weight = -1;
|
weight = -1;
|
if (p->policy & SCHED_YIELD)
|
if (p->policy & SCHED_YIELD)
|
goto out;
|
goto out;
|
|
|
/*
|
/*
|
* Non-RT process - normal case first.
|
* Non-RT process - normal case first.
|
*/
|
*/
|
if (p->policy == SCHED_OTHER) {
|
if (p->policy == SCHED_OTHER) {
|
/*
|
/*
|
* Give the process a first-approximation goodness value
|
* Give the process a first-approximation goodness value
|
* according to the number of clock-ticks it has left.
|
* according to the number of clock-ticks it has left.
|
*
|
*
|
* Don't do any other calculations if the time slice is
|
* Don't do any other calculations if the time slice is
|
* over..
|
* over..
|
*/
|
*/
|
weight = p->counter;
|
weight = p->counter;
|
if (!weight)
|
if (!weight)
|
goto out;
|
goto out;
|
|
|
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
/* Give a largish advantage to the same processor... */
|
/* Give a largish advantage to the same processor... */
|
/* (this is equivalent to penalizing other processors) */
|
/* (this is equivalent to penalizing other processors) */
|
if (p->processor == this_cpu)
|
if (p->processor == this_cpu)
|
weight += PROC_CHANGE_PENALTY;
|
weight += PROC_CHANGE_PENALTY;
|
#endif
|
#endif
|
|
|
/* .. and a slight advantage to the current MM */
|
/* .. and a slight advantage to the current MM */
|
if (p->mm == this_mm || !p->mm)
|
if (p->mm == this_mm || !p->mm)
|
weight += 1;
|
weight += 1;
|
weight += 20 - p->nice;
|
weight += 20 - p->nice;
|
goto out;
|
goto out;
|
}
|
}
|
|
|
/*
|
/*
|
* Realtime process, select the first one on the
|
* Realtime process, select the first one on the
|
* runqueue (taking priorities within processes
|
* runqueue (taking priorities within processes
|
* into account).
|
* into account).
|
*/
|
*/
|
weight = 1000 + p->rt_priority;
|
weight = 1000 + p->rt_priority;
|
out:
|
out:
|
return weight;
|
return weight;
|
}
|
}
|
|
|
/*
|
/*
|
* the 'goodness value' of replacing a process on a given CPU.
|
* the 'goodness value' of replacing a process on a given CPU.
|
* positive value means 'replace', zero or negative means 'dont'.
|
* positive value means 'replace', zero or negative means 'dont'.
|
*/
|
*/
|
static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu)
|
static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu)
|
{
|
{
|
return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm);
|
return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm);
|
}
|
}
|
|
|
/*
|
/*
|
* This is ugly, but reschedule_idle() is very timing-critical.
|
* This is ugly, but reschedule_idle() is very timing-critical.
|
* We are called with the runqueue spinlock held and we must
|
* We are called with the runqueue spinlock held and we must
|
* not claim the tasklist_lock.
|
* not claim the tasklist_lock.
|
*/
|
*/
|
static FASTCALL(void reschedule_idle(struct task_struct * p));
|
static FASTCALL(void reschedule_idle(struct task_struct * p));
|
|
|
static void reschedule_idle(struct task_struct * p)
|
static void reschedule_idle(struct task_struct * p)
|
{
|
{
|
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
int this_cpu = smp_processor_id();
|
int this_cpu = smp_processor_id();
|
struct task_struct *tsk, *target_tsk;
|
struct task_struct *tsk, *target_tsk;
|
int cpu, best_cpu, i, max_prio;
|
int cpu, best_cpu, i, max_prio;
|
cycles_t oldest_idle;
|
cycles_t oldest_idle;
|
|
|
/*
|
/*
|
* shortcut if the woken up task's last CPU is
|
* shortcut if the woken up task's last CPU is
|
* idle now.
|
* idle now.
|
*/
|
*/
|
best_cpu = p->processor;
|
best_cpu = p->processor;
|
if (can_schedule(p, best_cpu)) {
|
if (can_schedule(p, best_cpu)) {
|
tsk = idle_task(best_cpu);
|
tsk = idle_task(best_cpu);
|
if (cpu_curr(best_cpu) == tsk) {
|
if (cpu_curr(best_cpu) == tsk) {
|
int need_resched;
|
int need_resched;
|
send_now_idle:
|
send_now_idle:
|
/*
|
/*
|
* If need_resched == -1 then we can skip sending
|
* If need_resched == -1 then we can skip sending
|
* the IPI altogether, tsk->need_resched is
|
* the IPI altogether, tsk->need_resched is
|
* actively watched by the idle thread.
|
* actively watched by the idle thread.
|
*/
|
*/
|
need_resched = tsk->need_resched;
|
need_resched = tsk->need_resched;
|
tsk->need_resched = 1;
|
tsk->need_resched = 1;
|
if ((best_cpu != this_cpu) && !need_resched)
|
if ((best_cpu != this_cpu) && !need_resched)
|
smp_send_reschedule(best_cpu);
|
smp_send_reschedule(best_cpu);
|
return;
|
return;
|
}
|
}
|
}
|
}
|
|
|
/*
|
/*
|
* We know that the preferred CPU has a cache-affine current
|
* We know that the preferred CPU has a cache-affine current
|
* process, lets try to find a new idle CPU for the woken-up
|
* process, lets try to find a new idle CPU for the woken-up
|
* process. Select the least recently active idle CPU. (that
|
* process. Select the least recently active idle CPU. (that
|
* one will have the least active cache context.) Also find
|
* one will have the least active cache context.) Also find
|
* the executing process which has the least priority.
|
* the executing process which has the least priority.
|
*/
|
*/
|
oldest_idle = (cycles_t) -1;
|
oldest_idle = (cycles_t) -1;
|
target_tsk = NULL;
|
target_tsk = NULL;
|
max_prio = 0;
|
max_prio = 0;
|
|
|
for (i = 0; i < smp_num_cpus; i++) {
|
for (i = 0; i < smp_num_cpus; i++) {
|
cpu = cpu_logical_map(i);
|
cpu = cpu_logical_map(i);
|
if (!can_schedule(p, cpu))
|
if (!can_schedule(p, cpu))
|
continue;
|
continue;
|
tsk = cpu_curr(cpu);
|
tsk = cpu_curr(cpu);
|
/*
|
/*
|
* We use the first available idle CPU. This creates
|
* We use the first available idle CPU. This creates
|
* a priority list between idle CPUs, but this is not
|
* a priority list between idle CPUs, but this is not
|
* a problem.
|
* a problem.
|
*/
|
*/
|
if (tsk == idle_task(cpu)) {
|
if (tsk == idle_task(cpu)) {
|
#if defined(__i386__) && defined(CONFIG_SMP)
|
#if defined(__i386__) && defined(CONFIG_SMP)
|
/*
|
/*
|
* Check if two siblings are idle in the same
|
* Check if two siblings are idle in the same
|
* physical package. Use them if found.
|
* physical package. Use them if found.
|
*/
|
*/
|
if (smp_num_siblings == 2) {
|
if (smp_num_siblings == 2) {
|
if (cpu_curr(cpu_sibling_map[cpu]) ==
|
if (cpu_curr(cpu_sibling_map[cpu]) ==
|
idle_task(cpu_sibling_map[cpu])) {
|
idle_task(cpu_sibling_map[cpu])) {
|
oldest_idle = last_schedule(cpu);
|
oldest_idle = last_schedule(cpu);
|
target_tsk = tsk;
|
target_tsk = tsk;
|
break;
|
break;
|
}
|
}
|
|
|
}
|
}
|
#endif
|
#endif
|
if (last_schedule(cpu) < oldest_idle) {
|
if (last_schedule(cpu) < oldest_idle) {
|
oldest_idle = last_schedule(cpu);
|
oldest_idle = last_schedule(cpu);
|
target_tsk = tsk;
|
target_tsk = tsk;
|
}
|
}
|
} else {
|
} else {
|
if (oldest_idle == (cycles_t)-1) {
|
if (oldest_idle == (cycles_t)-1) {
|
int prio = preemption_goodness(tsk, p, cpu);
|
int prio = preemption_goodness(tsk, p, cpu);
|
|
|
if (prio > max_prio) {
|
if (prio > max_prio) {
|
max_prio = prio;
|
max_prio = prio;
|
target_tsk = tsk;
|
target_tsk = tsk;
|
}
|
}
|
}
|
}
|
}
|
}
|
}
|
}
|
tsk = target_tsk;
|
tsk = target_tsk;
|
if (tsk) {
|
if (tsk) {
|
if (oldest_idle != (cycles_t)-1) {
|
if (oldest_idle != (cycles_t)-1) {
|
best_cpu = tsk->processor;
|
best_cpu = tsk->processor;
|
goto send_now_idle;
|
goto send_now_idle;
|
}
|
}
|
tsk->need_resched = 1;
|
tsk->need_resched = 1;
|
if (tsk->processor != this_cpu)
|
if (tsk->processor != this_cpu)
|
smp_send_reschedule(tsk->processor);
|
smp_send_reschedule(tsk->processor);
|
}
|
}
|
return;
|
return;
|
|
|
|
|
#else /* UP */
|
#else /* UP */
|
int this_cpu = smp_processor_id();
|
int this_cpu = smp_processor_id();
|
struct task_struct *tsk;
|
struct task_struct *tsk;
|
|
|
tsk = cpu_curr(this_cpu);
|
tsk = cpu_curr(this_cpu);
|
if (preemption_goodness(tsk, p, this_cpu) > 0)
|
if (preemption_goodness(tsk, p, this_cpu) > 0)
|
tsk->need_resched = 1;
|
tsk->need_resched = 1;
|
#endif
|
#endif
|
}
|
}
|
|
|
/*
|
/*
|
* Careful!
|
* Careful!
|
*
|
*
|
* This has to add the process to the _end_ of the
|
* This has to add the process to the _end_ of the
|
* run-queue, not the beginning. The goodness value will
|
* run-queue, not the beginning. The goodness value will
|
* determine whether this process will run next. This is
|
* determine whether this process will run next. This is
|
* important to get SCHED_FIFO and SCHED_RR right, where
|
* important to get SCHED_FIFO and SCHED_RR right, where
|
* a process that is either pre-empted or its time slice
|
* a process that is either pre-empted or its time slice
|
* has expired, should be moved to the tail of the run
|
* has expired, should be moved to the tail of the run
|
* queue for its priority - Bhavesh Davda
|
* queue for its priority - Bhavesh Davda
|
*/
|
*/
|
static inline void add_to_runqueue(struct task_struct * p)
|
static inline void add_to_runqueue(struct task_struct * p)
|
{
|
{
|
list_add_tail(&p->run_list, &runqueue_head);
|
list_add_tail(&p->run_list, &runqueue_head);
|
nr_running++;
|
nr_running++;
|
}
|
}
|
|
|
static inline void move_last_runqueue(struct task_struct * p)
|
static inline void move_last_runqueue(struct task_struct * p)
|
{
|
{
|
list_del(&p->run_list);
|
list_del(&p->run_list);
|
list_add_tail(&p->run_list, &runqueue_head);
|
list_add_tail(&p->run_list, &runqueue_head);
|
}
|
}
|
|
|
/*
|
/*
|
* Wake up a process. Put it on the run-queue if it's not
|
* Wake up a process. Put it on the run-queue if it's not
|
* already there. The "current" process is always on the
|
* already there. The "current" process is always on the
|
* run-queue (except when the actual re-schedule is in
|
* run-queue (except when the actual re-schedule is in
|
* progress), and as such you're allowed to do the simpler
|
* progress), and as such you're allowed to do the simpler
|
* "current->state = TASK_RUNNING" to mark yourself runnable
|
* "current->state = TASK_RUNNING" to mark yourself runnable
|
* without the overhead of this.
|
* without the overhead of this.
|
*/
|
*/
|
static inline int try_to_wake_up(struct task_struct * p, int synchronous)
|
static inline int try_to_wake_up(struct task_struct * p, int synchronous)
|
{
|
{
|
unsigned long flags;
|
unsigned long flags;
|
int success = 0;
|
int success = 0;
|
|
|
/*
|
/*
|
* We want the common case fall through straight, thus the goto.
|
* We want the common case fall through straight, thus the goto.
|
*/
|
*/
|
spin_lock_irqsave(&runqueue_lock, flags);
|
spin_lock_irqsave(&runqueue_lock, flags);
|
p->state = TASK_RUNNING;
|
p->state = TASK_RUNNING;
|
if (task_on_runqueue(p))
|
if (task_on_runqueue(p))
|
goto out;
|
goto out;
|
add_to_runqueue(p);
|
add_to_runqueue(p);
|
if (!synchronous || !(p->cpus_allowed & (1UL << smp_processor_id())))
|
if (!synchronous || !(p->cpus_allowed & (1UL << smp_processor_id())))
|
reschedule_idle(p);
|
reschedule_idle(p);
|
success = 1;
|
success = 1;
|
out:
|
out:
|
spin_unlock_irqrestore(&runqueue_lock, flags);
|
spin_unlock_irqrestore(&runqueue_lock, flags);
|
return success;
|
return success;
|
}
|
}
|
|
|
inline int wake_up_process(struct task_struct * p)
|
inline int wake_up_process(struct task_struct * p)
|
{
|
{
|
return try_to_wake_up(p, 0);
|
return try_to_wake_up(p, 0);
|
}
|
}
|
|
|
static void process_timeout(unsigned long __data)
|
static void process_timeout(unsigned long __data)
|
{
|
{
|
struct task_struct * p = (struct task_struct *) __data;
|
struct task_struct * p = (struct task_struct *) __data;
|
|
|
wake_up_process(p);
|
wake_up_process(p);
|
}
|
}
|
|
|
/**
|
/**
|
* schedule_timeout - sleep until timeout
|
* schedule_timeout - sleep until timeout
|
* @timeout: timeout value in jiffies
|
* @timeout: timeout value in jiffies
|
*
|
*
|
* Make the current task sleep until @timeout jiffies have
|
* Make the current task sleep until @timeout jiffies have
|
* elapsed. The routine will return immediately unless
|
* elapsed. The routine will return immediately unless
|
* the current task state has been set (see set_current_state()).
|
* the current task state has been set (see set_current_state()).
|
*
|
*
|
* You can set the task state as follows -
|
* You can set the task state as follows -
|
*
|
*
|
* %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
|
* %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
|
* pass before the routine returns. The routine will return 0
|
* pass before the routine returns. The routine will return 0
|
*
|
*
|
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
|
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
|
* delivered to the current task. In this case the remaining time
|
* delivered to the current task. In this case the remaining time
|
* in jiffies will be returned, or 0 if the timer expired in time
|
* in jiffies will be returned, or 0 if the timer expired in time
|
*
|
*
|
* The current task state is guaranteed to be TASK_RUNNING when this
|
* The current task state is guaranteed to be TASK_RUNNING when this
|
* routine returns.
|
* routine returns.
|
*
|
*
|
* Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
|
* Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
|
* the CPU away without a bound on the timeout. In this case the return
|
* the CPU away without a bound on the timeout. In this case the return
|
* value will be %MAX_SCHEDULE_TIMEOUT.
|
* value will be %MAX_SCHEDULE_TIMEOUT.
|
*
|
*
|
* In all cases the return value is guaranteed to be non-negative.
|
* In all cases the return value is guaranteed to be non-negative.
|
*/
|
*/
|
signed long schedule_timeout(signed long timeout)
|
signed long schedule_timeout(signed long timeout)
|
{
|
{
|
struct timer_list timer;
|
struct timer_list timer;
|
unsigned long expire;
|
unsigned long expire;
|
|
|
switch (timeout)
|
switch (timeout)
|
{
|
{
|
case MAX_SCHEDULE_TIMEOUT:
|
case MAX_SCHEDULE_TIMEOUT:
|
/*
|
/*
|
* These two special cases are useful to be comfortable
|
* These two special cases are useful to be comfortable
|
* in the caller. Nothing more. We could take
|
* in the caller. Nothing more. We could take
|
* MAX_SCHEDULE_TIMEOUT from one of the negative value
|
* MAX_SCHEDULE_TIMEOUT from one of the negative value
|
* but I' d like to return a valid offset (>=0) to allow
|
* but I' d like to return a valid offset (>=0) to allow
|
* the caller to do everything it want with the retval.
|
* the caller to do everything it want with the retval.
|
*/
|
*/
|
schedule();
|
schedule();
|
goto out;
|
goto out;
|
default:
|
default:
|
/*
|
/*
|
* Another bit of PARANOID. Note that the retval will be
|
* Another bit of PARANOID. Note that the retval will be
|
* 0 since no piece of kernel is supposed to do a check
|
* 0 since no piece of kernel is supposed to do a check
|
* for a negative retval of schedule_timeout() (since it
|
* for a negative retval of schedule_timeout() (since it
|
* should never happens anyway). You just have the printk()
|
* should never happens anyway). You just have the printk()
|
* that will tell you if something is gone wrong and where.
|
* that will tell you if something is gone wrong and where.
|
*/
|
*/
|
if (timeout < 0)
|
if (timeout < 0)
|
{
|
{
|
printk(KERN_ERR "schedule_timeout: wrong timeout "
|
printk(KERN_ERR "schedule_timeout: wrong timeout "
|
"value %lx from %p\n", timeout,
|
"value %lx from %p\n", timeout,
|
__builtin_return_address(0));
|
__builtin_return_address(0));
|
current->state = TASK_RUNNING;
|
current->state = TASK_RUNNING;
|
goto out;
|
goto out;
|
}
|
}
|
}
|
}
|
|
|
expire = timeout + jiffies;
|
expire = timeout + jiffies;
|
|
|
init_timer(&timer);
|
init_timer(&timer);
|
timer.expires = expire;
|
timer.expires = expire;
|
timer.data = (unsigned long) current;
|
timer.data = (unsigned long) current;
|
timer.function = process_timeout;
|
timer.function = process_timeout;
|
|
|
add_timer(&timer);
|
add_timer(&timer);
|
schedule();
|
schedule();
|
del_timer_sync(&timer);
|
del_timer_sync(&timer);
|
|
|
timeout = expire - jiffies;
|
timeout = expire - jiffies;
|
|
|
out:
|
out:
|
return timeout < 0 ? 0 : timeout;
|
return timeout < 0 ? 0 : timeout;
|
}
|
}
|
|
|
/*
|
/*
|
* schedule_tail() is getting called from the fork return path. This
|
* schedule_tail() is getting called from the fork return path. This
|
* cleans up all remaining scheduler things, without impacting the
|
* cleans up all remaining scheduler things, without impacting the
|
* common case.
|
* common case.
|
*/
|
*/
|
static inline void __schedule_tail(struct task_struct *prev)
|
static inline void __schedule_tail(struct task_struct *prev)
|
{
|
{
|
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
int policy;
|
int policy;
|
|
|
/*
|
/*
|
* prev->policy can be written from here only before `prev'
|
* prev->policy can be written from here only before `prev'
|
* can be scheduled (before setting prev->cpus_runnable to ~0UL).
|
* can be scheduled (before setting prev->cpus_runnable to ~0UL).
|
* Of course it must also be read before allowing prev
|
* Of course it must also be read before allowing prev
|
* to be rescheduled, but since the write depends on the read
|
* to be rescheduled, but since the write depends on the read
|
* to complete, wmb() is enough. (the spin_lock() acquired
|
* to complete, wmb() is enough. (the spin_lock() acquired
|
* before setting cpus_runnable is not enough because the spin_lock()
|
* before setting cpus_runnable is not enough because the spin_lock()
|
* common code semantics allows code outside the critical section
|
* common code semantics allows code outside the critical section
|
* to enter inside the critical section)
|
* to enter inside the critical section)
|
*/
|
*/
|
policy = prev->policy;
|
policy = prev->policy;
|
prev->policy = policy & ~SCHED_YIELD;
|
prev->policy = policy & ~SCHED_YIELD;
|
wmb();
|
wmb();
|
|
|
/*
|
/*
|
* fast path falls through. We have to clear cpus_runnable before
|
* fast path falls through. We have to clear cpus_runnable before
|
* checking prev->state to avoid a wakeup race. Protect against
|
* checking prev->state to avoid a wakeup race. Protect against
|
* the task exiting early.
|
* the task exiting early.
|
*/
|
*/
|
task_lock(prev);
|
task_lock(prev);
|
task_release_cpu(prev);
|
task_release_cpu(prev);
|
mb();
|
mb();
|
if (prev->state == TASK_RUNNING)
|
if (prev->state == TASK_RUNNING)
|
goto needs_resched;
|
goto needs_resched;
|
|
|
out_unlock:
|
out_unlock:
|
task_unlock(prev); /* Synchronise here with release_task() if prev is TASK_ZOMBIE */
|
task_unlock(prev); /* Synchronise here with release_task() if prev is TASK_ZOMBIE */
|
return;
|
return;
|
|
|
/*
|
/*
|
* Slow path - we 'push' the previous process and
|
* Slow path - we 'push' the previous process and
|
* reschedule_idle() will attempt to find a new
|
* reschedule_idle() will attempt to find a new
|
* processor for it. (but it might preempt the
|
* processor for it. (but it might preempt the
|
* current process as well.) We must take the runqueue
|
* current process as well.) We must take the runqueue
|
* lock and re-check prev->state to be correct. It might
|
* lock and re-check prev->state to be correct. It might
|
* still happen that this process has a preemption
|
* still happen that this process has a preemption
|
* 'in progress' already - but this is not a problem and
|
* 'in progress' already - but this is not a problem and
|
* might happen in other circumstances as well.
|
* might happen in other circumstances as well.
|
*/
|
*/
|
needs_resched:
|
needs_resched:
|
{
|
{
|
unsigned long flags;
|
unsigned long flags;
|
|
|
/*
|
/*
|
* Avoid taking the runqueue lock in cases where
|
* Avoid taking the runqueue lock in cases where
|
* no preemption-check is necessery:
|
* no preemption-check is necessery:
|
*/
|
*/
|
if ((prev == idle_task(smp_processor_id())) ||
|
if ((prev == idle_task(smp_processor_id())) ||
|
(policy & SCHED_YIELD))
|
(policy & SCHED_YIELD))
|
goto out_unlock;
|
goto out_unlock;
|
|
|
spin_lock_irqsave(&runqueue_lock, flags);
|
spin_lock_irqsave(&runqueue_lock, flags);
|
if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
|
if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
|
reschedule_idle(prev);
|
reschedule_idle(prev);
|
spin_unlock_irqrestore(&runqueue_lock, flags);
|
spin_unlock_irqrestore(&runqueue_lock, flags);
|
goto out_unlock;
|
goto out_unlock;
|
}
|
}
|
#else
|
#else
|
prev->policy &= ~SCHED_YIELD;
|
prev->policy &= ~SCHED_YIELD;
|
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
}
|
}
|
|
|
asmlinkage void schedule_tail(struct task_struct *prev)
|
asmlinkage void schedule_tail(struct task_struct *prev)
|
{
|
{
|
__schedule_tail(prev);
|
__schedule_tail(prev);
|
}
|
}
|
|
|
/*
|
/*
|
* 'schedule()' is the scheduler function. It's a very simple and nice
|
* 'schedule()' is the scheduler function. It's a very simple and nice
|
* scheduler: it's not perfect, but certainly works for most things.
|
* scheduler: it's not perfect, but certainly works for most things.
|
*
|
*
|
* The goto is "interesting".
|
* The goto is "interesting".
|
*
|
*
|
* NOTE!! Task 0 is the 'idle' task, which gets called when no other
|
* NOTE!! Task 0 is the 'idle' task, which gets called when no other
|
* tasks can run. It can not be killed, and it cannot sleep. The 'state'
|
* tasks can run. It can not be killed, and it cannot sleep. The 'state'
|
* information in task[0] is never used.
|
* information in task[0] is never used.
|
*/
|
*/
|
asmlinkage void schedule(void)
|
asmlinkage void schedule(void)
|
{
|
{
|
struct schedule_data * sched_data;
|
struct schedule_data * sched_data;
|
struct task_struct *prev, *next, *p;
|
struct task_struct *prev, *next, *p;
|
struct list_head *tmp;
|
struct list_head *tmp;
|
int this_cpu, c;
|
int this_cpu, c;
|
|
|
|
|
spin_lock_prefetch(&runqueue_lock);
|
spin_lock_prefetch(&runqueue_lock);
|
|
|
BUG_ON(!current->active_mm);
|
BUG_ON(!current->active_mm);
|
need_resched_back:
|
need_resched_back:
|
prev = current;
|
prev = current;
|
this_cpu = prev->processor;
|
this_cpu = prev->processor;
|
|
|
if (unlikely(in_interrupt())) {
|
if (unlikely(in_interrupt())) {
|
printk("Scheduling in interrupt\n");
|
printk("Scheduling in interrupt\n");
|
BUG();
|
BUG();
|
}
|
}
|
|
|
release_kernel_lock(prev, this_cpu);
|
release_kernel_lock(prev, this_cpu);
|
|
|
/*
|
/*
|
* 'sched_data' is protected by the fact that we can run
|
* 'sched_data' is protected by the fact that we can run
|
* only one process per CPU.
|
* only one process per CPU.
|
*/
|
*/
|
sched_data = & aligned_data[this_cpu].schedule_data;
|
sched_data = & aligned_data[this_cpu].schedule_data;
|
|
|
spin_lock_irq(&runqueue_lock);
|
spin_lock_irq(&runqueue_lock);
|
|
|
/* move an exhausted RR process to be last.. */
|
/* move an exhausted RR process to be last.. */
|
if (unlikely(prev->policy == SCHED_RR))
|
if (unlikely(prev->policy == SCHED_RR))
|
if (!prev->counter) {
|
if (!prev->counter) {
|
prev->counter = NICE_TO_TICKS(prev->nice);
|
prev->counter = NICE_TO_TICKS(prev->nice);
|
move_last_runqueue(prev);
|
move_last_runqueue(prev);
|
}
|
}
|
|
|
switch (prev->state) {
|
switch (prev->state) {
|
case TASK_INTERRUPTIBLE:
|
case TASK_INTERRUPTIBLE:
|
if (signal_pending(prev)) {
|
if (signal_pending(prev)) {
|
prev->state = TASK_RUNNING;
|
prev->state = TASK_RUNNING;
|
break;
|
break;
|
}
|
}
|
default:
|
default:
|
del_from_runqueue(prev);
|
del_from_runqueue(prev);
|
case TASK_RUNNING:;
|
case TASK_RUNNING:;
|
}
|
}
|
prev->need_resched = 0;
|
prev->need_resched = 0;
|
|
|
/*
|
/*
|
* this is the scheduler proper:
|
* this is the scheduler proper:
|
*/
|
*/
|
|
|
repeat_schedule:
|
repeat_schedule:
|
/*
|
/*
|
* Default process to select..
|
* Default process to select..
|
*/
|
*/
|
next = idle_task(this_cpu);
|
next = idle_task(this_cpu);
|
c = -1000;
|
c = -1000;
|
list_for_each(tmp, &runqueue_head) {
|
list_for_each(tmp, &runqueue_head) {
|
p = list_entry(tmp, struct task_struct, run_list);
|
p = list_entry(tmp, struct task_struct, run_list);
|
if (can_schedule(p, this_cpu)) {
|
if (can_schedule(p, this_cpu)) {
|
int weight = goodness(p, this_cpu, prev->active_mm);
|
int weight = goodness(p, this_cpu, prev->active_mm);
|
if (weight > c)
|
if (weight > c)
|
c = weight, next = p;
|
c = weight, next = p;
|
}
|
}
|
}
|
}
|
|
|
/* Do we need to re-calculate counters? */
|
/* Do we need to re-calculate counters? */
|
if (unlikely(!c)) {
|
if (unlikely(!c)) {
|
struct task_struct *p;
|
struct task_struct *p;
|
|
|
spin_unlock_irq(&runqueue_lock);
|
spin_unlock_irq(&runqueue_lock);
|
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
for_each_task(p)
|
for_each_task(p)
|
p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
|
p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
|
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
spin_lock_irq(&runqueue_lock);
|
spin_lock_irq(&runqueue_lock);
|
goto repeat_schedule;
|
goto repeat_schedule;
|
}
|
}
|
|
|
/*
|
/*
|
* from this point on nothing can prevent us from
|
* from this point on nothing can prevent us from
|
* switching to the next task, save this fact in
|
* switching to the next task, save this fact in
|
* sched_data.
|
* sched_data.
|
*/
|
*/
|
sched_data->curr = next;
|
sched_data->curr = next;
|
task_set_cpu(next, this_cpu);
|
task_set_cpu(next, this_cpu);
|
spin_unlock_irq(&runqueue_lock);
|
spin_unlock_irq(&runqueue_lock);
|
|
|
if (unlikely(prev == next)) {
|
if (unlikely(prev == next)) {
|
/* We won't go through the normal tail, so do this by hand */
|
/* We won't go through the normal tail, so do this by hand */
|
prev->policy &= ~SCHED_YIELD;
|
prev->policy &= ~SCHED_YIELD;
|
goto same_process;
|
goto same_process;
|
}
|
}
|
|
|
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
/*
|
/*
|
* maintain the per-process 'last schedule' value.
|
* maintain the per-process 'last schedule' value.
|
* (this has to be recalculated even if we reschedule to
|
* (this has to be recalculated even if we reschedule to
|
* the same process) Currently this is only used on SMP,
|
* the same process) Currently this is only used on SMP,
|
* and it's approximate, so we do not have to maintain
|
* and it's approximate, so we do not have to maintain
|
* it while holding the runqueue spinlock.
|
* it while holding the runqueue spinlock.
|
*/
|
*/
|
sched_data->last_schedule = get_cycles();
|
sched_data->last_schedule = get_cycles();
|
|
|
/*
|
/*
|
* We drop the scheduler lock early (it's a global spinlock),
|
* We drop the scheduler lock early (it's a global spinlock),
|
* thus we have to lock the previous process from getting
|
* thus we have to lock the previous process from getting
|
* rescheduled during switch_to().
|
* rescheduled during switch_to().
|
*/
|
*/
|
|
|
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
|
|
kstat.context_swtch++;
|
kstat.context_swtch++;
|
/*
|
/*
|
* there are 3 processes which are affected by a context switch:
|
* there are 3 processes which are affected by a context switch:
|
*
|
*
|
* prev == .... ==> (last => next)
|
* prev == .... ==> (last => next)
|
*
|
*
|
* It's the 'much more previous' 'prev' that is on next's stack,
|
* It's the 'much more previous' 'prev' that is on next's stack,
|
* but prev is set to (the just run) 'last' process by switch_to().
|
* but prev is set to (the just run) 'last' process by switch_to().
|
* This might sound slightly confusing but makes tons of sense.
|
* This might sound slightly confusing but makes tons of sense.
|
*/
|
*/
|
prepare_to_switch();
|
prepare_to_switch();
|
{
|
{
|
struct mm_struct *mm = next->mm;
|
struct mm_struct *mm = next->mm;
|
struct mm_struct *oldmm = prev->active_mm;
|
struct mm_struct *oldmm = prev->active_mm;
|
if (!mm) {
|
if (!mm) {
|
BUG_ON(next->active_mm);
|
BUG_ON(next->active_mm);
|
next->active_mm = oldmm;
|
next->active_mm = oldmm;
|
atomic_inc(&oldmm->mm_count);
|
atomic_inc(&oldmm->mm_count);
|
enter_lazy_tlb(oldmm, next, this_cpu);
|
enter_lazy_tlb(oldmm, next, this_cpu);
|
} else {
|
} else {
|
BUG_ON(next->active_mm != mm);
|
BUG_ON(next->active_mm != mm);
|
switch_mm(oldmm, mm, next, this_cpu);
|
switch_mm(oldmm, mm, next, this_cpu);
|
}
|
}
|
|
|
if (!prev->mm) {
|
if (!prev->mm) {
|
prev->active_mm = NULL;
|
prev->active_mm = NULL;
|
mmdrop(oldmm);
|
mmdrop(oldmm);
|
}
|
}
|
}
|
}
|
|
|
/*
|
/*
|
* This just switches the register state and the
|
* This just switches the register state and the
|
* stack.
|
* stack.
|
*/
|
*/
|
switch_to(prev, next, prev);
|
switch_to(prev, next, prev);
|
__schedule_tail(prev);
|
__schedule_tail(prev);
|
|
|
same_process:
|
same_process:
|
reacquire_kernel_lock(current);
|
reacquire_kernel_lock(current);
|
if (current->need_resched)
|
if (current->need_resched)
|
goto need_resched_back;
|
goto need_resched_back;
|
return;
|
return;
|
}
|
}
|
|
|
/*
|
/*
|
* The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
|
* The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
|
* up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
|
* up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
|
* non-exclusive tasks and one exclusive task.
|
* non-exclusive tasks and one exclusive task.
|
*
|
*
|
* There are circumstances in which we can try to wake a task which has already
|
* There are circumstances in which we can try to wake a task which has already
|
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns zero
|
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns zero
|
* in this (rare) case, and we handle it by contonuing to scan the queue.
|
* in this (rare) case, and we handle it by contonuing to scan the queue.
|
*/
|
*/
|
static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
|
static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
|
int nr_exclusive, const int sync)
|
int nr_exclusive, const int sync)
|
{
|
{
|
struct list_head *tmp;
|
struct list_head *tmp;
|
struct task_struct *p;
|
struct task_struct *p;
|
|
|
CHECK_MAGIC_WQHEAD(q);
|
CHECK_MAGIC_WQHEAD(q);
|
WQ_CHECK_LIST_HEAD(&q->task_list);
|
WQ_CHECK_LIST_HEAD(&q->task_list);
|
|
|
list_for_each(tmp,&q->task_list) {
|
list_for_each(tmp,&q->task_list) {
|
unsigned int state;
|
unsigned int state;
|
wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
|
wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
|
|
|
CHECK_MAGIC(curr->__magic);
|
CHECK_MAGIC(curr->__magic);
|
p = curr->task;
|
p = curr->task;
|
state = p->state;
|
state = p->state;
|
if (state & mode) {
|
if (state & mode) {
|
WQ_NOTE_WAKER(curr);
|
WQ_NOTE_WAKER(curr);
|
if (try_to_wake_up(p, sync) && (curr->flags&WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
|
if (try_to_wake_up(p, sync) && (curr->flags&WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
|
break;
|
break;
|
}
|
}
|
}
|
}
|
}
|
}
|
|
|
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr)
|
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr)
|
{
|
{
|
if (q) {
|
if (q) {
|
unsigned long flags;
|
unsigned long flags;
|
wq_read_lock_irqsave(&q->lock, flags);
|
wq_read_lock_irqsave(&q->lock, flags);
|
__wake_up_common(q, mode, nr, 0);
|
__wake_up_common(q, mode, nr, 0);
|
wq_read_unlock_irqrestore(&q->lock, flags);
|
wq_read_unlock_irqrestore(&q->lock, flags);
|
}
|
}
|
}
|
}
|
|
|
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)
|
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)
|
{
|
{
|
if (q) {
|
if (q) {
|
unsigned long flags;
|
unsigned long flags;
|
wq_read_lock_irqsave(&q->lock, flags);
|
wq_read_lock_irqsave(&q->lock, flags);
|
__wake_up_common(q, mode, nr, 1);
|
__wake_up_common(q, mode, nr, 1);
|
wq_read_unlock_irqrestore(&q->lock, flags);
|
wq_read_unlock_irqrestore(&q->lock, flags);
|
}
|
}
|
}
|
}
|
|
|
void complete(struct completion *x)
|
void complete(struct completion *x)
|
{
|
{
|
unsigned long flags;
|
unsigned long flags;
|
|
|
spin_lock_irqsave(&x->wait.lock, flags);
|
spin_lock_irqsave(&x->wait.lock, flags);
|
x->done++;
|
x->done++;
|
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
|
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
|
spin_unlock_irqrestore(&x->wait.lock, flags);
|
spin_unlock_irqrestore(&x->wait.lock, flags);
|
}
|
}
|
|
|
void wait_for_completion(struct completion *x)
|
void wait_for_completion(struct completion *x)
|
{
|
{
|
spin_lock_irq(&x->wait.lock);
|
spin_lock_irq(&x->wait.lock);
|
if (!x->done) {
|
if (!x->done) {
|
DECLARE_WAITQUEUE(wait, current);
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
wait.flags |= WQ_FLAG_EXCLUSIVE;
|
wait.flags |= WQ_FLAG_EXCLUSIVE;
|
__add_wait_queue_tail(&x->wait, &wait);
|
__add_wait_queue_tail(&x->wait, &wait);
|
do {
|
do {
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
spin_unlock_irq(&x->wait.lock);
|
spin_unlock_irq(&x->wait.lock);
|
schedule();
|
schedule();
|
spin_lock_irq(&x->wait.lock);
|
spin_lock_irq(&x->wait.lock);
|
} while (!x->done);
|
} while (!x->done);
|
__remove_wait_queue(&x->wait, &wait);
|
__remove_wait_queue(&x->wait, &wait);
|
}
|
}
|
x->done--;
|
x->done--;
|
spin_unlock_irq(&x->wait.lock);
|
spin_unlock_irq(&x->wait.lock);
|
}
|
}
|
|
|
#define SLEEP_ON_VAR \
|
#define SLEEP_ON_VAR \
|
unsigned long flags; \
|
unsigned long flags; \
|
wait_queue_t wait; \
|
wait_queue_t wait; \
|
init_waitqueue_entry(&wait, current);
|
init_waitqueue_entry(&wait, current);
|
|
|
#define SLEEP_ON_HEAD \
|
#define SLEEP_ON_HEAD \
|
wq_write_lock_irqsave(&q->lock,flags); \
|
wq_write_lock_irqsave(&q->lock,flags); \
|
__add_wait_queue(q, &wait); \
|
__add_wait_queue(q, &wait); \
|
wq_write_unlock(&q->lock);
|
wq_write_unlock(&q->lock);
|
|
|
#define SLEEP_ON_TAIL \
|
#define SLEEP_ON_TAIL \
|
wq_write_lock_irq(&q->lock); \
|
wq_write_lock_irq(&q->lock); \
|
__remove_wait_queue(q, &wait); \
|
__remove_wait_queue(q, &wait); \
|
wq_write_unlock_irqrestore(&q->lock,flags);
|
wq_write_unlock_irqrestore(&q->lock,flags);
|
|
|
void interruptible_sleep_on(wait_queue_head_t *q)
|
void interruptible_sleep_on(wait_queue_head_t *q)
|
{
|
{
|
SLEEP_ON_VAR
|
SLEEP_ON_VAR
|
|
|
current->state = TASK_INTERRUPTIBLE;
|
current->state = TASK_INTERRUPTIBLE;
|
|
|
SLEEP_ON_HEAD
|
SLEEP_ON_HEAD
|
schedule();
|
schedule();
|
SLEEP_ON_TAIL
|
SLEEP_ON_TAIL
|
}
|
}
|
|
|
long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
|
long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
|
{
|
{
|
SLEEP_ON_VAR
|
SLEEP_ON_VAR
|
|
|
current->state = TASK_INTERRUPTIBLE;
|
current->state = TASK_INTERRUPTIBLE;
|
|
|
SLEEP_ON_HEAD
|
SLEEP_ON_HEAD
|
timeout = schedule_timeout(timeout);
|
timeout = schedule_timeout(timeout);
|
SLEEP_ON_TAIL
|
SLEEP_ON_TAIL
|
|
|
return timeout;
|
return timeout;
|
}
|
}
|
|
|
void sleep_on(wait_queue_head_t *q)
|
void sleep_on(wait_queue_head_t *q)
|
{
|
{
|
SLEEP_ON_VAR
|
SLEEP_ON_VAR
|
|
|
current->state = TASK_UNINTERRUPTIBLE;
|
current->state = TASK_UNINTERRUPTIBLE;
|
|
|
SLEEP_ON_HEAD
|
SLEEP_ON_HEAD
|
schedule();
|
schedule();
|
SLEEP_ON_TAIL
|
SLEEP_ON_TAIL
|
}
|
}
|
|
|
long sleep_on_timeout(wait_queue_head_t *q, long timeout)
|
long sleep_on_timeout(wait_queue_head_t *q, long timeout)
|
{
|
{
|
SLEEP_ON_VAR
|
SLEEP_ON_VAR
|
|
|
current->state = TASK_UNINTERRUPTIBLE;
|
current->state = TASK_UNINTERRUPTIBLE;
|
|
|
SLEEP_ON_HEAD
|
SLEEP_ON_HEAD
|
timeout = schedule_timeout(timeout);
|
timeout = schedule_timeout(timeout);
|
SLEEP_ON_TAIL
|
SLEEP_ON_TAIL
|
|
|
return timeout;
|
return timeout;
|
}
|
}
|
|
|
void scheduling_functions_end_here(void) { }
|
void scheduling_functions_end_here(void) { }
|
|
|
#if CONFIG_SMP
|
#if CONFIG_SMP
|
/**
|
/**
|
* set_cpus_allowed() - change a given task's processor affinity
|
* set_cpus_allowed() - change a given task's processor affinity
|
* @p: task to bind
|
* @p: task to bind
|
* @new_mask: bitmask of allowed processors
|
* @new_mask: bitmask of allowed processors
|
*
|
*
|
* Upon return, the task is running on a legal processor. Note the caller
|
* Upon return, the task is running on a legal processor. Note the caller
|
* must have a valid reference to the task: it must not exit() prematurely.
|
* must have a valid reference to the task: it must not exit() prematurely.
|
* This call can sleep; do not hold locks on call.
|
* This call can sleep; do not hold locks on call.
|
*/
|
*/
|
void set_cpus_allowed(struct task_struct *p, unsigned long new_mask)
|
void set_cpus_allowed(struct task_struct *p, unsigned long new_mask)
|
{
|
{
|
new_mask &= cpu_online_map;
|
new_mask &= cpu_online_map;
|
BUG_ON(!new_mask);
|
BUG_ON(!new_mask);
|
|
|
p->cpus_allowed = new_mask;
|
p->cpus_allowed = new_mask;
|
|
|
/*
|
/*
|
* If the task is on a no-longer-allowed processor, we need to move
|
* If the task is on a no-longer-allowed processor, we need to move
|
* it. If the task is not current, then set need_resched and send
|
* it. If the task is not current, then set need_resched and send
|
* its processor an IPI to reschedule.
|
* its processor an IPI to reschedule.
|
*/
|
*/
|
if (!(p->cpus_runnable & p->cpus_allowed)) {
|
if (!(p->cpus_runnable & p->cpus_allowed)) {
|
if (p != current) {
|
if (p != current) {
|
p->need_resched = 1;
|
p->need_resched = 1;
|
smp_send_reschedule(p->processor);
|
smp_send_reschedule(p->processor);
|
}
|
}
|
/*
|
/*
|
* Wait until we are on a legal processor. If the task is
|
* Wait until we are on a legal processor. If the task is
|
* current, then we should be on a legal processor the next
|
* current, then we should be on a legal processor the next
|
* time we reschedule. Otherwise, we need to wait for the IPI.
|
* time we reschedule. Otherwise, we need to wait for the IPI.
|
*/
|
*/
|
while (!(p->cpus_runnable & p->cpus_allowed))
|
while (!(p->cpus_runnable & p->cpus_allowed))
|
schedule();
|
schedule();
|
}
|
}
|
}
|
}
|
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
|
|
#ifndef __alpha__
|
#ifndef __alpha__
|
|
|
/*
|
/*
|
* This has been replaced by sys_setpriority. Maybe it should be
|
* This has been replaced by sys_setpriority. Maybe it should be
|
* moved into the arch dependent tree for those ports that require
|
* moved into the arch dependent tree for those ports that require
|
* it for backward compatibility?
|
* it for backward compatibility?
|
*/
|
*/
|
|
|
asmlinkage long sys_nice(int increment)
|
asmlinkage long sys_nice(int increment)
|
{
|
{
|
long newprio;
|
long newprio;
|
|
|
/*
|
/*
|
* Setpriority might change our priority at the same moment.
|
* Setpriority might change our priority at the same moment.
|
* We don't have to worry. Conceptually one call occurs first
|
* We don't have to worry. Conceptually one call occurs first
|
* and we have a single winner.
|
* and we have a single winner.
|
*/
|
*/
|
if (increment < 0) {
|
if (increment < 0) {
|
if (!capable(CAP_SYS_NICE))
|
if (!capable(CAP_SYS_NICE))
|
return -EPERM;
|
return -EPERM;
|
if (increment < -40)
|
if (increment < -40)
|
increment = -40;
|
increment = -40;
|
}
|
}
|
if (increment > 40)
|
if (increment > 40)
|
increment = 40;
|
increment = 40;
|
|
|
newprio = current->nice + increment;
|
newprio = current->nice + increment;
|
if (newprio < -20)
|
if (newprio < -20)
|
newprio = -20;
|
newprio = -20;
|
if (newprio > 19)
|
if (newprio > 19)
|
newprio = 19;
|
newprio = 19;
|
current->nice = newprio;
|
current->nice = newprio;
|
return 0;
|
return 0;
|
}
|
}
|
|
|
#endif
|
#endif
|
|
|
static inline struct task_struct *find_process_by_pid(pid_t pid)
|
static inline struct task_struct *find_process_by_pid(pid_t pid)
|
{
|
{
|
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
|
|
if (pid)
|
if (pid)
|
tsk = find_task_by_pid(pid);
|
tsk = find_task_by_pid(pid);
|
return tsk;
|
return tsk;
|
}
|
}
|
|
|
static int setscheduler(pid_t pid, int policy,
|
static int setscheduler(pid_t pid, int policy,
|
struct sched_param *param)
|
struct sched_param *param)
|
{
|
{
|
struct sched_param lp;
|
struct sched_param lp;
|
struct task_struct *p;
|
struct task_struct *p;
|
int retval;
|
int retval;
|
|
|
retval = -EINVAL;
|
retval = -EINVAL;
|
if (!param || pid < 0)
|
if (!param || pid < 0)
|
goto out_nounlock;
|
goto out_nounlock;
|
|
|
retval = -EFAULT;
|
retval = -EFAULT;
|
if (copy_from_user(&lp, param, sizeof(struct sched_param)))
|
if (copy_from_user(&lp, param, sizeof(struct sched_param)))
|
goto out_nounlock;
|
goto out_nounlock;
|
|
|
/*
|
/*
|
* We play safe to avoid deadlocks.
|
* We play safe to avoid deadlocks.
|
*/
|
*/
|
read_lock_irq(&tasklist_lock);
|
read_lock_irq(&tasklist_lock);
|
spin_lock(&runqueue_lock);
|
spin_lock(&runqueue_lock);
|
|
|
p = find_process_by_pid(pid);
|
p = find_process_by_pid(pid);
|
|
|
retval = -ESRCH;
|
retval = -ESRCH;
|
if (!p)
|
if (!p)
|
goto out_unlock;
|
goto out_unlock;
|
|
|
if (policy < 0)
|
if (policy < 0)
|
policy = p->policy;
|
policy = p->policy;
|
else {
|
else {
|
retval = -EINVAL;
|
retval = -EINVAL;
|
if (policy != SCHED_FIFO && policy != SCHED_RR &&
|
if (policy != SCHED_FIFO && policy != SCHED_RR &&
|
policy != SCHED_OTHER)
|
policy != SCHED_OTHER)
|
goto out_unlock;
|
goto out_unlock;
|
}
|
}
|
|
|
/*
|
/*
|
* Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
|
* Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
|
* priority for SCHED_OTHER is 0.
|
* priority for SCHED_OTHER is 0.
|
*/
|
*/
|
retval = -EINVAL;
|
retval = -EINVAL;
|
if (lp.sched_priority < 0 || lp.sched_priority > 99)
|
if (lp.sched_priority < 0 || lp.sched_priority > 99)
|
goto out_unlock;
|
goto out_unlock;
|
if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
|
if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
|
goto out_unlock;
|
goto out_unlock;
|
|
|
retval = -EPERM;
|
retval = -EPERM;
|
if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
|
if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
|
!capable(CAP_SYS_NICE))
|
!capable(CAP_SYS_NICE))
|
goto out_unlock;
|
goto out_unlock;
|
if ((current->euid != p->euid) && (current->euid != p->uid) &&
|
if ((current->euid != p->euid) && (current->euid != p->uid) &&
|
!capable(CAP_SYS_NICE))
|
!capable(CAP_SYS_NICE))
|
goto out_unlock;
|
goto out_unlock;
|
|
|
retval = 0;
|
retval = 0;
|
p->policy = policy;
|
p->policy = policy;
|
p->rt_priority = lp.sched_priority;
|
p->rt_priority = lp.sched_priority;
|
|
|
current->need_resched = 1;
|
current->need_resched = 1;
|
|
|
out_unlock:
|
out_unlock:
|
spin_unlock(&runqueue_lock);
|
spin_unlock(&runqueue_lock);
|
read_unlock_irq(&tasklist_lock);
|
read_unlock_irq(&tasklist_lock);
|
|
|
out_nounlock:
|
out_nounlock:
|
return retval;
|
return retval;
|
}
|
}
|
|
|
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
|
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
|
struct sched_param *param)
|
struct sched_param *param)
|
{
|
{
|
return setscheduler(pid, policy, param);
|
return setscheduler(pid, policy, param);
|
}
|
}
|
|
|
asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param)
|
asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param)
|
{
|
{
|
return setscheduler(pid, -1, param);
|
return setscheduler(pid, -1, param);
|
}
|
}
|
|
|
asmlinkage long sys_sched_getscheduler(pid_t pid)
|
asmlinkage long sys_sched_getscheduler(pid_t pid)
|
{
|
{
|
struct task_struct *p;
|
struct task_struct *p;
|
int retval;
|
int retval;
|
|
|
retval = -EINVAL;
|
retval = -EINVAL;
|
if (pid < 0)
|
if (pid < 0)
|
goto out_nounlock;
|
goto out_nounlock;
|
|
|
retval = -ESRCH;
|
retval = -ESRCH;
|
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
p = find_process_by_pid(pid);
|
p = find_process_by_pid(pid);
|
if (p)
|
if (p)
|
retval = p->policy & ~SCHED_YIELD;
|
retval = p->policy & ~SCHED_YIELD;
|
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
|
|
out_nounlock:
|
out_nounlock:
|
return retval;
|
return retval;
|
}
|
}
|
|
|
asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param)
|
asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param)
|
{
|
{
|
struct task_struct *p;
|
struct task_struct *p;
|
struct sched_param lp;
|
struct sched_param lp;
|
int retval;
|
int retval;
|
|
|
retval = -EINVAL;
|
retval = -EINVAL;
|
if (!param || pid < 0)
|
if (!param || pid < 0)
|
goto out_nounlock;
|
goto out_nounlock;
|
|
|
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
p = find_process_by_pid(pid);
|
p = find_process_by_pid(pid);
|
retval = -ESRCH;
|
retval = -ESRCH;
|
if (!p)
|
if (!p)
|
goto out_unlock;
|
goto out_unlock;
|
lp.sched_priority = p->rt_priority;
|
lp.sched_priority = p->rt_priority;
|
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
|
|
/*
|
/*
|
* This one might sleep, we cannot do it with a spinlock held ...
|
* This one might sleep, we cannot do it with a spinlock held ...
|
*/
|
*/
|
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
|
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
|
|
|
out_nounlock:
|
out_nounlock:
|
return retval;
|
return retval;
|
|
|
out_unlock:
|
out_unlock:
|
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
return retval;
|
return retval;
|
}
|
}
|
|
|
asmlinkage long sys_sched_yield(void)
|
asmlinkage long sys_sched_yield(void)
|
{
|
{
|
/*
|
/*
|
* Trick. sched_yield() first counts the number of truly
|
* Trick. sched_yield() first counts the number of truly
|
* 'pending' runnable processes, then returns if it's
|
* 'pending' runnable processes, then returns if it's
|
* only the current processes. (This test does not have
|
* only the current processes. (This test does not have
|
* to be atomic.) In threaded applications this optimization
|
* to be atomic.) In threaded applications this optimization
|
* gets triggered quite often.
|
* gets triggered quite often.
|
*/
|
*/
|
|
|
int nr_pending = nr_running;
|
int nr_pending = nr_running;
|
|
|
#if CONFIG_SMP
|
#if CONFIG_SMP
|
int i;
|
int i;
|
|
|
// Subtract non-idle processes running on other CPUs.
|
// Subtract non-idle processes running on other CPUs.
|
for (i = 0; i < smp_num_cpus; i++) {
|
for (i = 0; i < smp_num_cpus; i++) {
|
int cpu = cpu_logical_map(i);
|
int cpu = cpu_logical_map(i);
|
if (aligned_data[cpu].schedule_data.curr != idle_task(cpu))
|
if (aligned_data[cpu].schedule_data.curr != idle_task(cpu))
|
nr_pending--;
|
nr_pending--;
|
}
|
}
|
#else
|
#else
|
// on UP this process is on the runqueue as well
|
// on UP this process is on the runqueue as well
|
nr_pending--;
|
nr_pending--;
|
#endif
|
#endif
|
if (nr_pending) {
|
if (nr_pending) {
|
/*
|
/*
|
* This process can only be rescheduled by us,
|
* This process can only be rescheduled by us,
|
* so this is safe without any locking.
|
* so this is safe without any locking.
|
*/
|
*/
|
if (current->policy == SCHED_OTHER)
|
if (current->policy == SCHED_OTHER)
|
current->policy |= SCHED_YIELD;
|
current->policy |= SCHED_YIELD;
|
current->need_resched = 1;
|
current->need_resched = 1;
|
|
|
spin_lock_irq(&runqueue_lock);
|
spin_lock_irq(&runqueue_lock);
|
move_last_runqueue(current);
|
move_last_runqueue(current);
|
spin_unlock_irq(&runqueue_lock);
|
spin_unlock_irq(&runqueue_lock);
|
}
|
}
|
return 0;
|
return 0;
|
}
|
}
|
|
|
/**
|
/**
|
* yield - yield the current processor to other threads.
|
* yield - yield the current processor to other threads.
|
*
|
*
|
* this is a shortcut for kernel-space yielding - it marks the
|
* this is a shortcut for kernel-space yielding - it marks the
|
* thread runnable and calls sys_sched_yield().
|
* thread runnable and calls sys_sched_yield().
|
*/
|
*/
|
void yield(void)
|
void yield(void)
|
{
|
{
|
set_current_state(TASK_RUNNING);
|
set_current_state(TASK_RUNNING);
|
sys_sched_yield();
|
sys_sched_yield();
|
schedule();
|
schedule();
|
}
|
}
|
|
|
void __cond_resched(void)
|
void __cond_resched(void)
|
{
|
{
|
set_current_state(TASK_RUNNING);
|
set_current_state(TASK_RUNNING);
|
schedule();
|
schedule();
|
}
|
}
|
|
|
asmlinkage long sys_sched_get_priority_max(int policy)
|
asmlinkage long sys_sched_get_priority_max(int policy)
|
{
|
{
|
int ret = -EINVAL;
|
int ret = -EINVAL;
|
|
|
switch (policy) {
|
switch (policy) {
|
case SCHED_FIFO:
|
case SCHED_FIFO:
|
case SCHED_RR:
|
case SCHED_RR:
|
ret = 99;
|
ret = 99;
|
break;
|
break;
|
case SCHED_OTHER:
|
case SCHED_OTHER:
|
ret = 0;
|
ret = 0;
|
break;
|
break;
|
}
|
}
|
return ret;
|
return ret;
|
}
|
}
|
|
|
asmlinkage long sys_sched_get_priority_min(int policy)
|
asmlinkage long sys_sched_get_priority_min(int policy)
|
{
|
{
|
int ret = -EINVAL;
|
int ret = -EINVAL;
|
|
|
switch (policy) {
|
switch (policy) {
|
case SCHED_FIFO:
|
case SCHED_FIFO:
|
case SCHED_RR:
|
case SCHED_RR:
|
ret = 1;
|
ret = 1;
|
break;
|
break;
|
case SCHED_OTHER:
|
case SCHED_OTHER:
|
ret = 0;
|
ret = 0;
|
}
|
}
|
return ret;
|
return ret;
|
}
|
}
|
|
|
asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
|
asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
|
{
|
{
|
struct timespec t;
|
struct timespec t;
|
struct task_struct *p;
|
struct task_struct *p;
|
int retval = -EINVAL;
|
int retval = -EINVAL;
|
|
|
if (pid < 0)
|
if (pid < 0)
|
goto out_nounlock;
|
goto out_nounlock;
|
|
|
retval = -ESRCH;
|
retval = -ESRCH;
|
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
p = find_process_by_pid(pid);
|
p = find_process_by_pid(pid);
|
if (p)
|
if (p)
|
jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : NICE_TO_TICKS(p->nice),
|
jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : NICE_TO_TICKS(p->nice),
|
&t);
|
&t);
|
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
if (p)
|
if (p)
|
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
|
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
|
out_nounlock:
|
out_nounlock:
|
return retval;
|
return retval;
|
}
|
}
|
|
|
static void show_task(struct task_struct * p)
|
static void show_task(struct task_struct * p)
|
{
|
{
|
unsigned long free = 0;
|
unsigned long free = 0;
|
int state;
|
int state;
|
static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
|
static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
|
|
|
printk("%-13.13s ", p->comm);
|
printk("%-13.13s ", p->comm);
|
state = p->state ? ffz(~p->state) + 1 : 0;
|
state = p->state ? ffz(~p->state) + 1 : 0;
|
if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))
|
if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))
|
printk(stat_nam[state]);
|
printk(stat_nam[state]);
|
else
|
else
|
printk(" ");
|
printk(" ");
|
#if (BITS_PER_LONG == 32)
|
#if (BITS_PER_LONG == 32)
|
if (p == current)
|
if (p == current)
|
printk(" current ");
|
printk(" current ");
|
else
|
else
|
printk(" %08lX ", thread_saved_pc(&p->thread));
|
printk(" %08lX ", thread_saved_pc(&p->thread));
|
#else
|
#else
|
if (p == current)
|
if (p == current)
|
printk(" current task ");
|
printk(" current task ");
|
else
|
else
|
printk(" %016lx ", thread_saved_pc(&p->thread));
|
printk(" %016lx ", thread_saved_pc(&p->thread));
|
#endif
|
#endif
|
{
|
{
|
unsigned long * n = (unsigned long *) (p+1);
|
unsigned long * n = (unsigned long *) (p+1);
|
while (!*n)
|
while (!*n)
|
n++;
|
n++;
|
free = (unsigned long) n - (unsigned long)(p+1);
|
free = (unsigned long) n - (unsigned long)(p+1);
|
}
|
}
|
printk("%5lu %5d %6d ", free, p->pid, p->p_pptr->pid);
|
printk("%5lu %5d %6d ", free, p->pid, p->p_pptr->pid);
|
if (p->p_cptr)
|
if (p->p_cptr)
|
printk("%5d ", p->p_cptr->pid);
|
printk("%5d ", p->p_cptr->pid);
|
else
|
else
|
printk(" ");
|
printk(" ");
|
if (p->p_ysptr)
|
if (p->p_ysptr)
|
printk("%7d", p->p_ysptr->pid);
|
printk("%7d", p->p_ysptr->pid);
|
else
|
else
|
printk(" ");
|
printk(" ");
|
if (p->p_osptr)
|
if (p->p_osptr)
|
printk(" %5d", p->p_osptr->pid);
|
printk(" %5d", p->p_osptr->pid);
|
else
|
else
|
printk(" ");
|
printk(" ");
|
if (!p->mm)
|
if (!p->mm)
|
printk(" (L-TLB)\n");
|
printk(" (L-TLB)\n");
|
else
|
else
|
printk(" (NOTLB)\n");
|
printk(" (NOTLB)\n");
|
|
|
{
|
{
|
extern void show_trace_task(struct task_struct *tsk);
|
extern void show_trace_task(struct task_struct *tsk);
|
show_trace_task(p);
|
show_trace_task(p);
|
}
|
}
|
}
|
}
|
|
|
char * render_sigset_t(sigset_t *set, char *buffer)
|
char * render_sigset_t(sigset_t *set, char *buffer)
|
{
|
{
|
int i = _NSIG, x;
|
int i = _NSIG, x;
|
do {
|
do {
|
i -= 4, x = 0;
|
i -= 4, x = 0;
|
if (sigismember(set, i+1)) x |= 1;
|
if (sigismember(set, i+1)) x |= 1;
|
if (sigismember(set, i+2)) x |= 2;
|
if (sigismember(set, i+2)) x |= 2;
|
if (sigismember(set, i+3)) x |= 4;
|
if (sigismember(set, i+3)) x |= 4;
|
if (sigismember(set, i+4)) x |= 8;
|
if (sigismember(set, i+4)) x |= 8;
|
*buffer++ = (x < 10 ? '0' : 'a' - 10) + x;
|
*buffer++ = (x < 10 ? '0' : 'a' - 10) + x;
|
} while (i >= 4);
|
} while (i >= 4);
|
*buffer = 0;
|
*buffer = 0;
|
return buffer;
|
return buffer;
|
}
|
}
|
|
|
void show_state(void)
|
void show_state(void)
|
{
|
{
|
struct task_struct *p;
|
struct task_struct *p;
|
|
|
#if (BITS_PER_LONG == 32)
|
#if (BITS_PER_LONG == 32)
|
printk("\n"
|
printk("\n"
|
" free sibling\n");
|
" free sibling\n");
|
printk(" task PC stack pid father child younger older\n");
|
printk(" task PC stack pid father child younger older\n");
|
#else
|
#else
|
printk("\n"
|
printk("\n"
|
" free sibling\n");
|
" free sibling\n");
|
printk(" task PC stack pid father child younger older\n");
|
printk(" task PC stack pid father child younger older\n");
|
#endif
|
#endif
|
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
for_each_task(p) {
|
for_each_task(p) {
|
/*
|
/*
|
* reset the NMI-timeout, listing all files on a slow
|
* reset the NMI-timeout, listing all files on a slow
|
* console might take alot of time:
|
* console might take alot of time:
|
*/
|
*/
|
touch_nmi_watchdog();
|
touch_nmi_watchdog();
|
show_task(p);
|
show_task(p);
|
}
|
}
|
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
}
|
}
|
|
|
/**
|
/**
|
* reparent_to_init() - Reparent the calling kernel thread to the init task.
|
* reparent_to_init() - Reparent the calling kernel thread to the init task.
|
*
|
*
|
* If a kernel thread is launched as a result of a system call, or if
|
* If a kernel thread is launched as a result of a system call, or if
|
* it ever exits, it should generally reparent itself to init so that
|
* it ever exits, it should generally reparent itself to init so that
|
* it is correctly cleaned up on exit.
|
* it is correctly cleaned up on exit.
|
*
|
*
|
* The various task state such as scheduling policy and priority may have
|
* The various task state such as scheduling policy and priority may have
|
* been inherited fro a user process, so we reset them to sane values here.
|
* been inherited fro a user process, so we reset them to sane values here.
|
*
|
*
|
* NOTE that reparent_to_init() gives the caller full capabilities.
|
* NOTE that reparent_to_init() gives the caller full capabilities.
|
*/
|
*/
|
void reparent_to_init(void)
|
void reparent_to_init(void)
|
{
|
{
|
struct task_struct *this_task = current;
|
struct task_struct *this_task = current;
|
|
|
write_lock_irq(&tasklist_lock);
|
write_lock_irq(&tasklist_lock);
|
|
|
/* Reparent to init */
|
/* Reparent to init */
|
REMOVE_LINKS(this_task);
|
REMOVE_LINKS(this_task);
|
this_task->p_pptr = child_reaper;
|
this_task->p_pptr = child_reaper;
|
this_task->p_opptr = child_reaper;
|
this_task->p_opptr = child_reaper;
|
SET_LINKS(this_task);
|
SET_LINKS(this_task);
|
|
|
/* Set the exit signal to SIGCHLD so we signal init on exit */
|
/* Set the exit signal to SIGCHLD so we signal init on exit */
|
this_task->exit_signal = SIGCHLD;
|
this_task->exit_signal = SIGCHLD;
|
|
|
/* We also take the runqueue_lock while altering task fields
|
/* We also take the runqueue_lock while altering task fields
|
* which affect scheduling decisions */
|
* which affect scheduling decisions */
|
spin_lock(&runqueue_lock);
|
spin_lock(&runqueue_lock);
|
|
|
this_task->ptrace = 0;
|
this_task->ptrace = 0;
|
this_task->nice = DEF_NICE;
|
this_task->nice = DEF_NICE;
|
this_task->policy = SCHED_OTHER;
|
this_task->policy = SCHED_OTHER;
|
/* cpus_allowed? */
|
/* cpus_allowed? */
|
/* rt_priority? */
|
/* rt_priority? */
|
/* signals? */
|
/* signals? */
|
this_task->cap_effective = CAP_INIT_EFF_SET;
|
this_task->cap_effective = CAP_INIT_EFF_SET;
|
this_task->cap_inheritable = CAP_INIT_INH_SET;
|
this_task->cap_inheritable = CAP_INIT_INH_SET;
|
this_task->cap_permitted = CAP_FULL_SET;
|
this_task->cap_permitted = CAP_FULL_SET;
|
this_task->keep_capabilities = 0;
|
this_task->keep_capabilities = 0;
|
memcpy(this_task->rlim, init_task.rlim, sizeof(*(this_task->rlim)));
|
memcpy(this_task->rlim, init_task.rlim, sizeof(*(this_task->rlim)));
|
switch_uid(INIT_USER);
|
switch_uid(INIT_USER);
|
|
|
spin_unlock(&runqueue_lock);
|
spin_unlock(&runqueue_lock);
|
write_unlock_irq(&tasklist_lock);
|
write_unlock_irq(&tasklist_lock);
|
}
|
}
|
|
|
/*
|
/*
|
* Put all the gunge required to become a kernel thread without
|
* Put all the gunge required to become a kernel thread without
|
* attached user resources in one place where it belongs.
|
* attached user resources in one place where it belongs.
|
*/
|
*/
|
|
|
void daemonize(void)
|
void daemonize(void)
|
{
|
{
|
struct fs_struct *fs;
|
struct fs_struct *fs;
|
|
|
|
|
/*
|
/*
|
* If we were started as result of loading a module, close all of the
|
* If we were started as result of loading a module, close all of the
|
* user space pages. We don't need them, and if we didn't close them
|
* user space pages. We don't need them, and if we didn't close them
|
* they would be locked into memory.
|
* they would be locked into memory.
|
*/
|
*/
|
exit_mm(current);
|
exit_mm(current);
|
|
|
current->session = 1;
|
current->session = 1;
|
current->pgrp = 1;
|
current->pgrp = 1;
|
current->tty = NULL;
|
current->tty = NULL;
|
|
|
/* Become as one with the init task */
|
/* Become as one with the init task */
|
|
|
exit_fs(current); /* current->fs->count--; */
|
exit_fs(current); /* current->fs->count--; */
|
fs = init_task.fs;
|
fs = init_task.fs;
|
current->fs = fs;
|
current->fs = fs;
|
atomic_inc(&fs->count);
|
atomic_inc(&fs->count);
|
exit_files(current);
|
exit_files(current);
|
current->files = init_task.files;
|
current->files = init_task.files;
|
atomic_inc(¤t->files->count);
|
atomic_inc(¤t->files->count);
|
}
|
}
|
|
|
extern unsigned long wait_init_idle;
|
extern unsigned long wait_init_idle;
|
|
|
void __init init_idle(void)
|
void __init init_idle(void)
|
{
|
{
|
struct schedule_data * sched_data;
|
struct schedule_data * sched_data;
|
sched_data = &aligned_data[smp_processor_id()].schedule_data;
|
sched_data = &aligned_data[smp_processor_id()].schedule_data;
|
|
|
if (current != &init_task && task_on_runqueue(current)) {
|
if (current != &init_task && task_on_runqueue(current)) {
|
printk("UGH! (%d:%d) was on the runqueue, removing.\n",
|
printk("UGH! (%d:%d) was on the runqueue, removing.\n",
|
smp_processor_id(), current->pid);
|
smp_processor_id(), current->pid);
|
del_from_runqueue(current);
|
del_from_runqueue(current);
|
}
|
}
|
sched_data->curr = current;
|
sched_data->curr = current;
|
sched_data->last_schedule = get_cycles();
|
sched_data->last_schedule = get_cycles();
|
clear_bit(current->processor, &wait_init_idle);
|
clear_bit(current->processor, &wait_init_idle);
|
}
|
}
|
|
|
extern void init_timervecs (void);
|
extern void init_timervecs (void);
|
|
|
void __init sched_init(void)
|
void __init sched_init(void)
|
{
|
{
|
/*
|
/*
|
* We have to do a little magic to get the first
|
* We have to do a little magic to get the first
|
* process right in SMP mode.
|
* process right in SMP mode.
|
*/
|
*/
|
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
int nr;
|
int nr;
|
|
|
init_task.processor = cpu;
|
init_task.processor = cpu;
|
|
|
for(nr = 0; nr < PIDHASH_SZ; nr++)
|
for(nr = 0; nr < PIDHASH_SZ; nr++)
|
pidhash[nr] = NULL;
|
pidhash[nr] = NULL;
|
|
|
init_timervecs();
|
init_timervecs();
|
|
|
init_bh(TIMER_BH, timer_bh);
|
init_bh(TIMER_BH, timer_bh);
|
init_bh(TQUEUE_BH, tqueue_bh);
|
init_bh(TQUEUE_BH, tqueue_bh);
|
init_bh(IMMEDIATE_BH, immediate_bh);
|
init_bh(IMMEDIATE_BH, immediate_bh);
|
|
|
/*
|
/*
|
* The boot idle thread does lazy MMU switching as well:
|
* The boot idle thread does lazy MMU switching as well:
|
*/
|
*/
|
atomic_inc(&init_mm.mm_count);
|
atomic_inc(&init_mm.mm_count);
|
enter_lazy_tlb(&init_mm, current, cpu);
|
enter_lazy_tlb(&init_mm, current, cpu);
|
}
|
}
|
|
|