OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [generic/] [tcb.c] - Blame information for rev 7

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Some ktcb related data
3
 *
4
 * Copyright (C) 2007 - 2009 Bahadir Balban
5
 */
6
#include <l4/generic/tcb.h>
7
#include <l4/generic/space.h>
8
#include <l4/generic/scheduler.h>
9
#include <l4/generic/container.h>
10
#include <l4/generic/preempt.h>
11
#include <l4/generic/space.h>
12
#include <l4/lib/idpool.h>
13
#include <l4/api/ipc.h>
14
#include <l4/api/kip.h>
15
#include <l4/api/errno.h>
16
#include INC_ARCH(exception.h)
17
#include INC_SUBARCH(mm.h)
18
#include INC_GLUE(memory.h)
19
#include INC_GLUE(mapping.h)
20
#include INC_SUBARCH(mmu_ops.h)
21
 
22
void init_ktcb_list(struct ktcb_list *ktcb_list)
23
{
24
        memset(ktcb_list, 0, sizeof(*ktcb_list));
25
        spin_lock_init(&ktcb_list->list_lock);
26
        link_init(&ktcb_list->list);
27
}
28
 
29
void tcb_init(struct ktcb *new)
30
{
31
 
32
        link_init(&new->task_list);
33
        mutex_init(&new->thread_control_lock);
34
 
35
        spin_lock_init(&new->thread_lock);
36
 
37
        cap_list_init(&new->cap_list);
38
 
39
        /* Initialise task's scheduling state and parameters. */
40
        sched_init_task(new, TASK_PRIO_NORMAL);
41
 
42
        /* Initialise ipc waitqueues */
43
        spin_lock_init(&new->waitlock);
44
        waitqueue_head_init(&new->wqh_send);
45
        waitqueue_head_init(&new->wqh_recv);
46
        waitqueue_head_init(&new->wqh_pager);
47
}
48
 
49
struct ktcb *tcb_alloc_init(l4id_t cid)
50
{
51
        struct ktcb *tcb;
52
        struct task_ids ids;
53
 
54
        if (!(tcb = alloc_ktcb()))
55
                return 0;
56
 
57
        ids.tid = id_new(&kernel_resources.ktcb_ids);
58
        ids.tid |= TASK_CID_MASK & (cid << TASK_CID_SHIFT);
59
        ids.tgid = L4_NILTHREAD;
60
        ids.spid = L4_NILTHREAD;
61
 
62
        set_task_ids(tcb, &ids);
63
 
64
        tcb_init(tcb);
65
 
66
        return tcb;
67
}
68
 
69
void tcb_delete(struct ktcb *tcb)
70
{
71
        struct ktcb *pager, *acc_task;
72
 
73
        /* Sanity checks first */
74
        BUG_ON(!is_page_aligned(tcb));
75
        BUG_ON(tcb->wqh_pager.sleepers > 0);
76
        BUG_ON(tcb->wqh_send.sleepers > 0);
77
        BUG_ON(tcb->wqh_recv.sleepers > 0);
78
        BUG_ON(tcb->affinity != current->affinity);
79
        BUG_ON(tcb->state != TASK_INACTIVE);
80
        BUG_ON(!list_empty(&tcb->rq_list));
81
        BUG_ON(tcb->rq);
82
        BUG_ON(tcb == current);
83
        BUG_ON(tcb->nlocks);
84
        BUG_ON(tcb->waiting_on);
85
        BUG_ON(tcb->wq);
86
 
87
        /* Remove from zombie list */
88
        list_remove(&tcb->task_list);
89
 
90
        /* Determine task to account deletions */
91
        if (!(pager = tcb_find(tcb->pagerid)))
92
                acc_task = current;
93
        else
94
                acc_task = pager;
95
 
96
        /*
97
         * NOTE: This protects single threaded space
98
         * deletion against space modification.
99
         *
100
         * If space deletion were multi-threaded, list
101
         * traversal would be needed to ensure list is
102
         * still there.
103
         */
104
        mutex_lock(&tcb->container->space_list.lock);
105
        mutex_lock(&tcb->space->lock);
106
        BUG_ON(--tcb->space->ktcb_refs < 0);
107
 
108
        /* No refs left for the space, delete it */
109
        if (tcb->space->ktcb_refs == 0) {
110
                address_space_remove(tcb->space, tcb->container);
111
                mutex_unlock(&tcb->space->lock);
112
                address_space_delete(tcb->space, acc_task);
113
                mutex_unlock(&tcb->container->space_list.lock);
114
        } else {
115
                mutex_unlock(&tcb->space->lock);
116
                mutex_unlock(&tcb->container->space_list.lock);
117
        }
118
 
119
        /* Clear container id part */
120
        tcb->tid &= ~TASK_CID_MASK;
121
 
122
        /* Deallocate tcb ids */
123
        id_del(&kernel_resources.ktcb_ids, tcb->tid);
124
 
125
        /* Free the tcb */
126
        free_ktcb(tcb, acc_task);
127
}
128
 
129
struct ktcb *tcb_find_by_space(l4id_t spid)
130
{
131
        struct ktcb *task;
132
 
133
        spin_lock(&curcont->ktcb_list.list_lock);
134
        list_foreach_struct(task, &curcont->ktcb_list.list, task_list) {
135
                if (task->space->spid == spid) {
136
                        spin_unlock(&curcont->ktcb_list.list_lock);
137
                        return task;
138
                }
139
        }
140
        spin_unlock(&curcont->ktcb_list.list_lock);
141
        return 0;
142
}
143
 
144
struct ktcb *container_find_tcb(struct container *c, l4id_t tid)
145
{
146
        struct ktcb *task;
147
 
148
        spin_lock(&c->ktcb_list.list_lock);
149
        list_foreach_struct(task, &c->ktcb_list.list, task_list) {
150
                if (task->tid == tid) {
151
                        spin_unlock(&c->ktcb_list.list_lock);
152
                        return task;
153
                }
154
        }
155
        spin_unlock(&c->ktcb_list.list_lock);
156
        return 0;
157
}
158
 
159
struct ktcb *container_find_lock_tcb(struct container *c, l4id_t tid)
160
{
161
        struct ktcb *task;
162
 
163
        spin_lock(&c->ktcb_list.list_lock);
164
        list_foreach_struct(task, &c->ktcb_list.list, task_list) {
165
                if (task->tid == tid) {
166
                        spin_lock(&task->thread_lock);
167
                        spin_unlock(&c->ktcb_list.list_lock);
168
                        return task;
169
                }
170
        }
171
        spin_unlock(&c->ktcb_list.list_lock);
172
        return 0;
173
}
174
 
175
/*
176
 * Threads are the only resource where inter-container searches are
177
 * allowed. This is because on other containers, only threads can be
178
 * targeted for operations. E.g. ipc, sharing memory. Currently you
179
 * can't reach a space, a mutex, or any other resouce on another
180
 * container.
181
 */
182
struct ktcb *tcb_find(l4id_t tid)
183
{
184
        struct container *c;
185
 
186
        if (current->tid == tid)
187
                return current;
188
 
189
        if (tid_to_cid(tid) == curcont->cid) {
190
                return container_find_tcb(curcont, tid);
191
        } else {
192
                if (!(c = container_find(&kernel_resources,
193
                                         tid_to_cid(tid))))
194
                        return 0;
195
                else
196
                        return container_find_tcb(c, tid);
197
        }
198
}
199
 
200
struct ktcb *tcb_find_lock(l4id_t tid)
201
{
202
        struct container *c;
203
 
204
        if (current->tid == tid) {
205
                spin_lock(&current->thread_lock);
206
                return current;
207
        }
208
 
209
        if (tid_to_cid(tid) == curcont->cid) {
210
                return container_find_lock_tcb(curcont, tid);
211
        } else {
212
                if (!(c = container_find(&kernel_resources,
213
                                         tid_to_cid(tid))))
214
                        return 0;
215
                else
216
                        return container_find_lock_tcb(c, tid);
217
        }
218
}
219
 
220
void ktcb_list_add(struct ktcb *new, struct ktcb_list *ktcb_list)
221
{
222
        spin_lock(&ktcb_list->list_lock);
223
        BUG_ON(!list_empty(&new->task_list));
224
        BUG_ON(!++ktcb_list->count);
225
        list_insert(&new->task_list, &ktcb_list->list);
226
        spin_unlock(&ktcb_list->list_lock);
227
}
228
 
229
void tcb_add(struct ktcb *new)
230
{
231
        struct container *c = new->container;
232
 
233
        spin_lock(&c->ktcb_list.list_lock);
234
        BUG_ON(!list_empty(&new->task_list));
235
        BUG_ON(!++c->ktcb_list.count);
236
        list_insert(&new->task_list, &c->ktcb_list.list);
237
        spin_unlock(&c->ktcb_list.list_lock);
238
}
239
 
240
/*
241
 * Its important that this is per-cpu. This is
242
 * because it must be guaranteed that the task
243
 * is not runnable. Idle task on that cpu guarantees it.
244
 */
245
void tcb_delete_zombies(void)
246
{
247
        struct ktcb *zombie, *n;
248
        struct ktcb_list *ktcb_list =
249
                &per_cpu(kernel_resources.zombie_list);
250
 
251
        /* Traverse the per-cpu zombie list */
252
        spin_lock(&ktcb_list->list_lock);
253
        list_foreach_removable_struct(zombie, n,
254
                                      &ktcb_list->list,
255
                                      task_list)
256
                /* Delete all zombies one by one */
257
                tcb_delete(zombie);
258
        spin_unlock(&ktcb_list->list_lock);
259
}
260
 
261
 
262
/*
263
 * It's enough to lock list and thread without
264
 * traversing the list, because we're only
265
 * protecting against thread modification.
266
 * Deletion is a single-threaded operation
267
 */
268
void tcb_remove(struct ktcb *task)
269
{
270
        /* Lock list */
271
        spin_lock(&curcont->ktcb_list.list_lock);
272
        BUG_ON(list_empty(&task->task_list));
273
        BUG_ON(--curcont->ktcb_list.count < 0);
274
        spin_lock(&task->thread_lock);
275
 
276
        list_remove_init(&task->task_list);
277
        spin_unlock(&curcont->ktcb_list.list_lock);
278
        spin_unlock(&task->thread_lock);
279
}
280
 
281
void ktcb_list_remove(struct ktcb *new, struct ktcb_list *ktcb_list)
282
{
283
        spin_lock(&ktcb_list->list_lock);
284
        BUG_ON(list_empty(&new->task_list));
285
        BUG_ON(--ktcb_list->count < 0);
286
        list_remove(&new->task_list);
287
        spin_unlock(&ktcb_list->list_lock);
288
}
289
 
290
/* Offsets for ktcb fields that are accessed from assembler */
291
unsigned int need_resched_offset = offsetof(struct ktcb, ts_need_resched);
292
unsigned int syscall_regs_offset = offsetof(struct ktcb, syscall_regs);
293
 
294
/*
295
 * Every thread has a unique utcb region that is mapped to its address
296
 * space as its context is loaded. The utcb region is a function of
297
 * this mapping and its offset that is reached via the KIP UTCB pointer
298
 */
299
void task_update_utcb(struct ktcb *task)
300
{
301
        arch_update_utcb(task->utcb_address);
302
}
303
 
304
/*
305
 * Checks whether a task's utcb is currently accessible by the kernel.
306
 * Returns an error if its not paged in yet, maps a non-current task's
307
 * utcb to current task for kernel-only access if it is unmapped.
308
 *
309
 * UTCB Mappings: The design is that each task maps its utcb with user
310
 * access, and any other utcb is mapped with kernel-only access privileges
311
 * upon an ipc that requires the kernel to access that utcb, in other
312
 * words foreign utcbs are mapped lazily.
313
 */
314
int tcb_check_and_lazy_map_utcb(struct ktcb *task, int page_in)
315
{
316
        unsigned int phys;
317
        int ret;
318
 
319
        if (!task->utcb_address)
320
                return -ENOUTCB;
321
 
322
        /*
323
         * If task == current && not mapped && page_in,
324
         *      page-in, if not return -EFAULT
325
         * If task == current && not mapped && !page_in,
326
         *      return -EFAULT
327
         * If task != current && not mapped,
328
         *      return -EFAULT since can't page-in on behalf of it.
329
         * If task != current && task mapped,
330
         *      but mapped != current mapped, map it, return 0
331
         * If task != current && task mapped,
332
         *      but mapped == current mapped, return 0
333
         */
334
 
335
        /* FIXME:
336
         *
337
         * Do the check_access part without distinguishing current/non-current
338
         * Do the rest (i.e. mapping the value to the current table) only if the utcb is non-current
339
         */
340
 
341
        if (current == task) {
342
                /* Check own utcb, if not there, page it in */
343
                if ((ret = check_access(task->utcb_address, UTCB_SIZE,
344
                                        MAP_KERN_RW, page_in)) < 0)
345
                        return -EFAULT;
346
                else
347
                        return 0;
348
        } else {
349
                /* Check another's utcb, but don't try to map in */
350
                if ((ret = check_access_task(task->utcb_address,
351
                                             UTCB_SIZE,
352
                                             MAP_KERN_RW, 0,
353
                                             task)) < 0) {
354
                        return -EFAULT;
355
                } else {
356
                        /*
357
                         * Task has it mapped, map it to self
358
                         * unless they're identical
359
                         */
360
                        if ((phys =
361
                             virt_to_phys_by_pgd(TASK_PGD(task),
362
                                                 task->utcb_address)) !=
363
                             virt_to_phys_by_pgd(TASK_PGD(current),
364
                                                 task->utcb_address)) {
365
                                /*
366
                                 * We have none or an old reference.
367
                                 * Update it with privileged flags,
368
                                 * so that only kernel can access.
369
                                 */
370
                                add_mapping_pgd(phys, page_align(task->utcb_address),
371
                                                page_align_up(UTCB_SIZE),
372
                                                MAP_KERN_RW,
373
                                                TASK_PGD(current));
374
                        }
375
                        BUG_ON(!phys);
376
                }
377
        }
378
        return 0;
379
}
380
 
381
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.