OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [kernel/] [sched_rt.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3
 * policies)
4
 */
5
 
6
/*
7
 * Update the current task's runtime statistics. Skip current tasks that
8
 * are not in our scheduling class.
9
 */
10
static void update_curr_rt(struct rq *rq)
11
{
12
        struct task_struct *curr = rq->curr;
13
        u64 delta_exec;
14
 
15
        if (!task_has_rt_policy(curr))
16
                return;
17
 
18
        delta_exec = rq->clock - curr->se.exec_start;
19
        if (unlikely((s64)delta_exec < 0))
20
                delta_exec = 0;
21
 
22
        schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
23
 
24
        curr->se.sum_exec_runtime += delta_exec;
25
        curr->se.exec_start = rq->clock;
26
        cpuacct_charge(curr, delta_exec);
27
}
28
 
29
static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
30
{
31
        struct rt_prio_array *array = &rq->rt.active;
32
 
33
        list_add_tail(&p->run_list, array->queue + p->prio);
34
        __set_bit(p->prio, array->bitmap);
35
}
36
 
37
/*
38
 * Adding/removing a task to/from a priority array:
39
 */
40
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
41
{
42
        struct rt_prio_array *array = &rq->rt.active;
43
 
44
        update_curr_rt(rq);
45
 
46
        list_del(&p->run_list);
47
        if (list_empty(array->queue + p->prio))
48
                __clear_bit(p->prio, array->bitmap);
49
}
50
 
51
/*
52
 * Put task to the end of the run list without the overhead of dequeue
53
 * followed by enqueue.
54
 */
55
static void requeue_task_rt(struct rq *rq, struct task_struct *p)
56
{
57
        struct rt_prio_array *array = &rq->rt.active;
58
 
59
        list_move_tail(&p->run_list, array->queue + p->prio);
60
}
61
 
62
static void
63
yield_task_rt(struct rq *rq)
64
{
65
        requeue_task_rt(rq, rq->curr);
66
}
67
 
68
/*
69
 * Preempt the current task with a newly woken task if needed:
70
 */
71
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
72
{
73
        if (p->prio < rq->curr->prio)
74
                resched_task(rq->curr);
75
}
76
 
77
static struct task_struct *pick_next_task_rt(struct rq *rq)
78
{
79
        struct rt_prio_array *array = &rq->rt.active;
80
        struct task_struct *next;
81
        struct list_head *queue;
82
        int idx;
83
 
84
        idx = sched_find_first_bit(array->bitmap);
85
        if (idx >= MAX_RT_PRIO)
86
                return NULL;
87
 
88
        queue = array->queue + idx;
89
        next = list_entry(queue->next, struct task_struct, run_list);
90
 
91
        next->se.exec_start = rq->clock;
92
 
93
        return next;
94
}
95
 
96
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
97
{
98
        update_curr_rt(rq);
99
        p->se.exec_start = 0;
100
}
101
 
102
#ifdef CONFIG_SMP
103
/*
104
 * Load-balancing iterator. Note: while the runqueue stays locked
105
 * during the whole iteration, the current task might be
106
 * dequeued so the iterator has to be dequeue-safe. Here we
107
 * achieve that by always pre-iterating before returning
108
 * the current task:
109
 */
110
static struct task_struct *load_balance_start_rt(void *arg)
111
{
112
        struct rq *rq = arg;
113
        struct rt_prio_array *array = &rq->rt.active;
114
        struct list_head *head, *curr;
115
        struct task_struct *p;
116
        int idx;
117
 
118
        idx = sched_find_first_bit(array->bitmap);
119
        if (idx >= MAX_RT_PRIO)
120
                return NULL;
121
 
122
        head = array->queue + idx;
123
        curr = head->prev;
124
 
125
        p = list_entry(curr, struct task_struct, run_list);
126
 
127
        curr = curr->prev;
128
 
129
        rq->rt.rt_load_balance_idx = idx;
130
        rq->rt.rt_load_balance_head = head;
131
        rq->rt.rt_load_balance_curr = curr;
132
 
133
        return p;
134
}
135
 
136
static struct task_struct *load_balance_next_rt(void *arg)
137
{
138
        struct rq *rq = arg;
139
        struct rt_prio_array *array = &rq->rt.active;
140
        struct list_head *head, *curr;
141
        struct task_struct *p;
142
        int idx;
143
 
144
        idx = rq->rt.rt_load_balance_idx;
145
        head = rq->rt.rt_load_balance_head;
146
        curr = rq->rt.rt_load_balance_curr;
147
 
148
        /*
149
         * If we arrived back to the head again then
150
         * iterate to the next queue (if any):
151
         */
152
        if (unlikely(head == curr)) {
153
                int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
154
 
155
                if (next_idx >= MAX_RT_PRIO)
156
                        return NULL;
157
 
158
                idx = next_idx;
159
                head = array->queue + idx;
160
                curr = head->prev;
161
 
162
                rq->rt.rt_load_balance_idx = idx;
163
                rq->rt.rt_load_balance_head = head;
164
        }
165
 
166
        p = list_entry(curr, struct task_struct, run_list);
167
 
168
        curr = curr->prev;
169
 
170
        rq->rt.rt_load_balance_curr = curr;
171
 
172
        return p;
173
}
174
 
175
static unsigned long
176
load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
177
                unsigned long max_load_move,
178
                struct sched_domain *sd, enum cpu_idle_type idle,
179
                int *all_pinned, int *this_best_prio)
180
{
181
        struct rq_iterator rt_rq_iterator;
182
 
183
        rt_rq_iterator.start = load_balance_start_rt;
184
        rt_rq_iterator.next = load_balance_next_rt;
185
        /* pass 'busiest' rq argument into
186
         * load_balance_[start|next]_rt iterators
187
         */
188
        rt_rq_iterator.arg = busiest;
189
 
190
        return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
191
                             idle, all_pinned, this_best_prio, &rt_rq_iterator);
192
}
193
 
194
static int
195
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
196
                 struct sched_domain *sd, enum cpu_idle_type idle)
197
{
198
        struct rq_iterator rt_rq_iterator;
199
 
200
        rt_rq_iterator.start = load_balance_start_rt;
201
        rt_rq_iterator.next = load_balance_next_rt;
202
        rt_rq_iterator.arg = busiest;
203
 
204
        return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
205
                                  &rt_rq_iterator);
206
}
207
#endif
208
 
209
static void task_tick_rt(struct rq *rq, struct task_struct *p)
210
{
211
        update_curr_rt(rq);
212
 
213
        /*
214
         * RR tasks need a special form of timeslice management.
215
         * FIFO tasks have no timeslices.
216
         */
217
        if (p->policy != SCHED_RR)
218
                return;
219
 
220
        if (--p->time_slice)
221
                return;
222
 
223
        p->time_slice = DEF_TIMESLICE;
224
 
225
        /*
226
         * Requeue to the end of queue if we are not the only element
227
         * on the queue:
228
         */
229
        if (p->run_list.prev != p->run_list.next) {
230
                requeue_task_rt(rq, p);
231
                set_tsk_need_resched(p);
232
        }
233
}
234
 
235
static void set_curr_task_rt(struct rq *rq)
236
{
237
        struct task_struct *p = rq->curr;
238
 
239
        p->se.exec_start = rq->clock;
240
}
241
 
242
const struct sched_class rt_sched_class = {
243
        .next                   = &fair_sched_class,
244
        .enqueue_task           = enqueue_task_rt,
245
        .dequeue_task           = dequeue_task_rt,
246
        .yield_task             = yield_task_rt,
247
 
248
        .check_preempt_curr     = check_preempt_curr_rt,
249
 
250
        .pick_next_task         = pick_next_task_rt,
251
        .put_prev_task          = put_prev_task_rt,
252
 
253
#ifdef CONFIG_SMP
254
        .load_balance           = load_balance_rt,
255
        .move_one_task          = move_one_task_rt,
256
#endif
257
 
258
        .set_curr_task          = set_curr_task_rt,
259
        .task_tick              = task_tick_rt,
260
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.