OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [api/] [thread.c] - Blame information for rev 6

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Thread related system calls.
3
 *
4
 * Copyright (C) 2007 Bahadir Balban
5
 */
6
#include <l4/generic/scheduler.h>
7
#include <l4/generic/container.h>
8
#include <l4/api/thread.h>
9
#include <l4/api/syscall.h>
10
#include <l4/api/errno.h>
11
#include <l4/generic/tcb.h>
12
#include <l4/lib/idpool.h>
13
#include <l4/lib/mutex.h>
14
#include <l4/lib/wait.h>
15
#include <l4/generic/resource.h>
16
#include <l4/generic/capability.h>
17
#include INC_ARCH(asm.h)
18
#include INC_SUBARCH(mm.h)
19
#include INC_GLUE(mapping.h)
20
 
21
int sys_thread_switch(void)
22
{
23
        schedule();
24
        return 0;
25
}
26
 
27
/*
28
 * This signals a thread so that the thread stops what it is
29
 * doing, and take action on the signal provided. Currently this
30
 * may be a suspension or an exit signal.
31
 */
32
int thread_signal(struct ktcb *task, unsigned int flags,
33
                  unsigned int task_state)
34
{
35
        int ret = 0;
36
 
37
        if (task->state == task_state)
38
                return 0;
39
 
40
        /* Signify we want to suspend the thread */
41
        task->flags |= flags;
42
 
43
        /* Wake it up if it's sleeping */
44
        wake_up_task(task, WAKEUP_INTERRUPT | WAKEUP_SYNC);
45
 
46
        /* Wait until task switches to desired state */
47
        WAIT_EVENT(&task->wqh_pager,
48
                   task->state == task_state, ret);
49
 
50
        return ret;
51
}
52
 
53
int thread_suspend(struct ktcb *task)
54
{
55
        return thread_signal(task, TASK_SUSPENDING, TASK_INACTIVE);
56
}
57
 
58
int thread_exit(struct ktcb *task)
59
{
60
        return thread_signal(task, TASK_SUSPENDING, TASK_INACTIVE);
61
}
62
 
63
static inline int task_is_child(struct ktcb *task)
64
{
65
        return (((task) != current) &&
66
                ((task)->pagerid == current->tid));
67
}
68
 
69
int thread_destroy_child(struct ktcb *task)
70
{
71
        /* Wait until thread exits */
72
        thread_exit(task);
73
 
74
        /* Hint scheduler that an exit occured */
75
        current->flags |= TASK_EXITED;
76
 
77
        /* Now remove it atomically */
78
        tcb_remove(task);
79
 
80
        /* Wake up waiters that arrived before removing */
81
        wake_up_all(&task->wqh_send, WAKEUP_INTERRUPT);
82
        wake_up_all(&task->wqh_recv, WAKEUP_INTERRUPT);
83
 
84
        BUG_ON(task->wqh_pager.sleepers > 0);
85
        BUG_ON(task->state != TASK_INACTIVE);
86
 
87
        /* Place the task on the zombie queue for its cpu */
88
        ktcb_list_add(task, &per_cpu_byid(kernel_resources.zombie_list,
89
                                          task->affinity));
90
 
91
        return 0;
92
}
93
 
94
int thread_destroy_children(void)
95
{
96
        struct ktcb *task, *n;
97
 
98
        spin_lock(&curcont->ktcb_list.list_lock);
99
        list_foreach_removable_struct(task, n,
100
                                      &curcont->ktcb_list.list,
101
                                      task_list) {
102
                if (task_is_child(task)) {
103
                        spin_unlock(&curcont->ktcb_list.list_lock);
104
                        thread_destroy_child(task);
105
                        spin_lock(&curcont->ktcb_list.list_lock);
106
                }
107
        }
108
        spin_unlock(&curcont->ktcb_list.list_lock);
109
        return 0;
110
 
111
}
112
 
113
void thread_destroy_self(unsigned int exit_code)
114
{
115
        /* Destroy all children first */
116
        thread_destroy_children();
117
 
118
        /* If self-paged, finish everything except deletion */
119
        if (current->tid == current->pagerid) {
120
                /* Remove self safe against ipc */
121
                tcb_remove(current);
122
 
123
                /* Wake up any waiters queued up before removal */
124
                wake_up_all(&current->wqh_send, WAKEUP_INTERRUPT);
125
                wake_up_all(&current->wqh_recv, WAKEUP_INTERRUPT);
126
 
127
                /* Move capabilities to current cpu idle task */
128
                cap_list_move(&per_cpu(scheduler).idle_task->cap_list,
129
                              &current->cap_list);
130
 
131
                /* Place self on the per-cpu zombie queue */
132
                ktcb_list_add(current, &per_cpu(kernel_resources.zombie_list));
133
        }
134
 
135
        /*
136
         * Both child and a self-paging would set exit
137
         * code and quit the scheduler
138
         */
139
        current->exit_code = exit_code;
140
 
141
        /*
142
         * Hint scheduler that an exit has occured
143
         */
144
        current->flags |= TASK_EXITED;
145
        sched_suspend_sync();
146
}
147
 
148
int thread_wait(struct ktcb *task)
149
{
150
        unsigned int exit_code;
151
        int ret;
152
 
153
        // printk("%s: (%d) for (%d)\n", __FUNCTION__, current->tid, task->tid);
154
 
155
        /* Wait until task switches to desired state */
156
        WAIT_EVENT(&task->wqh_pager,
157
                   task->state == TASK_INACTIVE, ret);
158
 
159
        /* Return if interrupted by async event */
160
        if (ret < 0)
161
                return ret;
162
 
163
        /* Now remove it safe against ipc */
164
        tcb_remove(task);
165
 
166
        /* Wake up waiters that arrived before removing */
167
        wake_up_all(&task->wqh_send, WAKEUP_INTERRUPT);
168
        wake_up_all(&task->wqh_recv, WAKEUP_INTERRUPT);
169
 
170
        BUG_ON(task->wqh_pager.sleepers > 0);
171
        BUG_ON(task->state != TASK_INACTIVE);
172
 
173
        /* Obtain exit code */
174
        exit_code = (int)task->exit_code;
175
 
176
        /* Place it on the zombie queue */
177
        ktcb_list_add(task,
178
                      &per_cpu_byid(kernel_resources.zombie_list,
179
                                    task->affinity));
180
 
181
        return exit_code;
182
}
183
 
184
int thread_destroy(struct ktcb *task, unsigned int exit_code)
185
{
186
        // printk("%s: (%d) for (%d)\n", __FUNCTION__, current->tid, task->tid);
187
 
188
        exit_code &= THREAD_EXIT_MASK;
189
 
190
        if (task_is_child(task))
191
                return thread_destroy_child(task);
192
        else if (task == current)
193
                thread_destroy_self(exit_code);
194
        else
195
                BUG();
196
 
197
        return 0;
198
}
199
 
200
int arch_clear_thread(struct ktcb *tcb)
201
{
202
        /* Remove from the global list */
203
        tcb_remove(tcb);
204
 
205
        /* Sanity checks */
206
        BUG_ON(!is_page_aligned(tcb));
207
        BUG_ON(tcb->wqh_pager.sleepers > 0);
208
        BUG_ON(tcb->wqh_send.sleepers > 0);
209
        BUG_ON(tcb->wqh_recv.sleepers > 0);
210
        BUG_ON(!list_empty(&tcb->task_list));
211
        BUG_ON(!list_empty(&tcb->rq_list));
212
        BUG_ON(tcb->rq);
213
        BUG_ON(tcb->nlocks);
214
        BUG_ON(tcb->waiting_on);
215
        BUG_ON(tcb->wq);
216
 
217
        /* Reinitialise the context */
218
        memset(&tcb->context, 0, sizeof(tcb->context));
219
        tcb->context.spsr = ARM_MODE_USR;
220
 
221
        /* Clear the page tables */
222
        remove_mapping_pgd_all_user(TASK_PGD(tcb));
223
 
224
        /* Reinitialize all other fields */
225
        tcb_init(tcb);
226
 
227
        /* Add back to global list */
228
        tcb_add(tcb);
229
 
230
        return 0;
231
}
232
 
233
int thread_recycle(struct ktcb *task)
234
{
235
        int ret;
236
 
237
        if ((ret = thread_suspend(task)) < 0)
238
                return ret;
239
 
240
        /*
241
         * If there are any sleepers on any of the task's
242
         * waitqueues, we need to wake those tasks up.
243
         */
244
        wake_up_all(&task->wqh_send, 0);
245
        wake_up_all(&task->wqh_recv, 0);
246
 
247
        /*
248
         * The thread cannot have a pager waiting for it
249
         * since we ought to be the pager.
250
         */
251
        BUG_ON(task->wqh_pager.sleepers > 0);
252
 
253
        /* Clear the task's tcb */
254
        arch_clear_thread(task);
255
 
256
        return 0;
257
}
258
 
259
/* Runs a thread for the first time */
260
int thread_start(struct ktcb *task)
261
{
262
        if (!mutex_trylock(&task->thread_control_lock))
263
                return -EAGAIN;
264
 
265
        /* Notify scheduler of task resume */
266
        sched_resume_async(task);
267
 
268
        /* Release lock and return */
269
        mutex_unlock(&task->thread_control_lock);
270
 
271
        return 0;
272
}
273
 
274
int arch_setup_new_thread(struct ktcb *new, struct ktcb *orig,
275
                          unsigned int flags)
276
{
277
        /* New threads just need their mode set up */
278
        if (flags & TC_NEW_SPACE) {
279
                BUG_ON(orig);
280
                new->context.spsr = ARM_MODE_USR;
281
                return 0;
282
        }
283
 
284
        BUG_ON(!orig);
285
 
286
        /* If original has no syscall context yet, don't copy */
287
        if (!orig->syscall_regs)
288
                return 0;
289
 
290
        /*
291
         * For duplicated threads pre-syscall context is saved on
292
         * the kernel stack. We copy this context of original
293
         * into the duplicate thread's current context structure,
294
         *
295
         * No locks needed as the thread is not known to the system yet.
296
         */
297
        BUG_ON(!(new->context.spsr = orig->syscall_regs->spsr)); /* User mode */
298
        new->context.r0 = orig->syscall_regs->r0;
299
        new->context.r1 = orig->syscall_regs->r1;
300
        new->context.r2 = orig->syscall_regs->r2;
301
        new->context.r3 = orig->syscall_regs->r3;
302
        new->context.r4 = orig->syscall_regs->r4;
303
        new->context.r5 = orig->syscall_regs->r5;
304
        new->context.r6 = orig->syscall_regs->r6;
305
        new->context.r7 = orig->syscall_regs->r7;
306
        new->context.r8 = orig->syscall_regs->r8;
307
        new->context.r9 = orig->syscall_regs->r9;
308
        new->context.r10 = orig->syscall_regs->r10;
309
        new->context.r11 = orig->syscall_regs->r11;
310
        new->context.r12 = orig->syscall_regs->r12;
311
        new->context.sp = orig->syscall_regs->sp_usr;
312
        /* Skip lr_svc since it's not going to be used */
313
        new->context.pc = orig->syscall_regs->lr_usr;
314
 
315
        /* Distribute original thread's ticks into two threads */
316
        new->ticks_left = (orig->ticks_left + 1) >> 1;
317
        if (!(orig->ticks_left >>= 1))
318
                orig->ticks_left = 1;
319
 
320
        return 0;
321
}
322
 
323
static DECLARE_SPINLOCK(task_select_affinity_lock);
324
static unsigned int cpu_rr_affinity;
325
 
326
/* Select which cpu to place the new task in round-robin fashion */
327
void thread_setup_affinity(struct ktcb *task)
328
{
329
        spin_lock(&task_select_affinity_lock);
330
        task->affinity = cpu_rr_affinity;
331
 
332
        //printk("Set up thread %d affinity=%d\n",
333
        //       task->tid, task->affinity);
334
        cpu_rr_affinity++;
335
        if (cpu_rr_affinity >= CONFIG_NCPU)
336
                cpu_rr_affinity = 0;
337
 
338
        spin_unlock(&task_select_affinity_lock);
339
}
340
 
341
static inline void
342
thread_setup_new_ids(struct task_ids *ids, unsigned int flags,
343
                     struct ktcb *new, struct ktcb *orig)
344
{
345
        if (flags & TC_SHARE_GROUP)
346
                new->tgid = orig->tgid;
347
        else
348
                new->tgid = new->tid;
349
 
350
        /* Update ids to be returned back to caller */
351
        ids->tid = new->tid;
352
        ids->tgid = new->tgid;
353
}
354
 
355
int thread_setup_space(struct ktcb *tcb, struct task_ids *ids, unsigned int flags)
356
{
357
        struct address_space *space, *new;
358
        int ret = 0;
359
 
360
        if (flags & TC_SHARE_SPACE) {
361
                mutex_lock(&curcont->space_list.lock);
362
                if (!(space = address_space_find(ids->spid))) {
363
                        mutex_unlock(&curcont->space_list.lock);
364
                        ret = -ESRCH;
365
                        goto out;
366
                }
367
                mutex_lock(&space->lock);
368
                mutex_unlock(&curcont->space_list.lock);
369
                address_space_attach(tcb, space);
370
                mutex_unlock(&space->lock);
371
        }
372
        else if (flags & TC_COPY_SPACE) {
373
                mutex_lock(&curcont->space_list.lock);
374
                if (!(space = address_space_find(ids->spid))) {
375
                        mutex_unlock(&curcont->space_list.lock);
376
                        ret = -ESRCH;
377
                        goto out;
378
                }
379
                mutex_lock(&space->lock);
380
                if (IS_ERR(new = address_space_create(space))) {
381
                        mutex_unlock(&curcont->space_list.lock);
382
                        mutex_unlock(&space->lock);
383
                        ret = (int)new;
384
                        goto out;
385
                }
386
                mutex_unlock(&space->lock);
387
                ids->spid = new->spid;  /* Return newid to caller */
388
                address_space_attach(tcb, new);
389
                address_space_add(new);
390
                mutex_unlock(&curcont->space_list.lock);
391
        }
392
        else if (flags & TC_NEW_SPACE) {
393
                if (IS_ERR(new = address_space_create(0))) {
394
                        ret = (int)new;
395
                        goto out;
396
                }
397
                /* New space id to be returned back to caller */
398
                ids->spid = new->spid;
399
                address_space_attach(tcb, new);
400
                mutex_lock(&curcont->space_list.lock);
401
                address_space_add(new);
402
                mutex_unlock(&curcont->space_list.lock);
403
        }
404
 
405
out:
406
        return ret;
407
}
408
 
409
int thread_create(struct task_ids *ids, unsigned int flags)
410
{
411
        struct ktcb *new;
412
        struct ktcb *orig = 0;
413
        int err;
414
 
415
        /* Clear flags to just include creation flags */
416
        flags &= THREAD_CREATE_MASK;
417
 
418
        /* Can't have multiple space directives in flags */
419
        if ((flags & TC_SHARE_SPACE
420
             & TC_COPY_SPACE & TC_NEW_SPACE) || !flags)
421
                return -EINVAL;
422
 
423
        /* Must have one space flag */
424
        if ((flags & THREAD_SPACE_MASK) == 0)
425
                return -EINVAL;
426
 
427
        /* Can't request shared utcb or tgid without shared space */
428
        if (!(flags & TC_SHARE_SPACE)) {
429
                if ((flags & TC_SHARE_UTCB) ||
430
                    (flags & TC_SHARE_GROUP)) {
431
                        return -EINVAL;
432
                }
433
        }
434
 
435
        if (!(new = tcb_alloc_init(curcont->cid)))
436
                return -ENOMEM;
437
 
438
        /* Set up new thread space by using space id and flags */
439
        if ((err = thread_setup_space(new, ids, flags)) < 0)
440
                goto out_err;
441
 
442
        /* Obtain parent thread if there is one */
443
        if (flags & TC_SHARE_SPACE || flags & TC_COPY_SPACE) {
444
                if (!(orig = tcb_find(ids->tid))) {
445
                        err = -EINVAL;
446
                        goto out_err;
447
                }
448
        }
449
 
450
        /* Set creator as pager */
451
        new->pagerid = current->tid;
452
 
453
        /* Setup container-generic fields from current task */
454
        new->container = current->container;
455
 
456
        /*
457
         * Set up cpu affinity.
458
         *
459
         * This is the default setting, it may be changed
460
         * by a subsequent exchange_registers call
461
         */
462
        thread_setup_affinity(new);
463
 
464
        /* Set up new thread context by using parent ids and flags */
465
        thread_setup_new_ids(ids, flags, new, orig);
466
 
467
        arch_setup_new_thread(new, orig, flags);
468
 
469
        tcb_add(new);
470
 
471
        //printk("%s: %d created: %d, %d, %d \n",
472
        //       __FUNCTION__, current->tid, ids->tid,
473
        //       ids->tgid, ids->spid);
474
 
475
        return 0;
476
 
477
out_err:
478
        /* Pre-mature tcb needs freeing by free_ktcb */
479
        free_ktcb(new, current);
480
        return err;
481
}
482
 
483
/*
484
 * Creates, destroys and modifies threads. Also implicitly creates an address
485
 * space for a thread that doesn't already have one, or destroys it if the last
486
 * thread that uses it is destroyed.
487
 */
488
int sys_thread_control(unsigned int flags, struct task_ids *ids)
489
{
490
        struct ktcb *task = 0;
491
        int err, ret = 0;
492
 
493
        if ((err = check_access((unsigned long)ids, sizeof(*ids),
494
                                MAP_USR_RW, 1)) < 0)
495
                return err;
496
 
497
        if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE) {
498
                if (!(task = tcb_find(ids->tid)))
499
                        return -ESRCH;
500
 
501
                /*
502
                 * Tasks may only operate on their children. They may
503
                 * also destroy themselves or any children.
504
                 */
505
                if ((flags & THREAD_ACTION_MASK) == THREAD_DESTROY &&
506
                    !task_is_child(task) && task != current)
507
                        return -EPERM;
508
                if ((flags & THREAD_ACTION_MASK) != THREAD_DESTROY
509
                    && !task_is_child(task))
510
                        return -EPERM;
511
        }
512
 
513
        if ((err = cap_thread_check(task, flags, ids)) < 0)
514
                return err;
515
 
516
        switch (flags & THREAD_ACTION_MASK) {
517
        case THREAD_CREATE:
518
                ret = thread_create(ids, flags);
519
                break;
520
        case THREAD_RUN:
521
                ret = thread_start(task);
522
                break;
523
        case THREAD_SUSPEND:
524
                ret = thread_suspend(task);
525
                break;
526
        case THREAD_DESTROY:
527
                ret = thread_destroy(task, flags);
528
                break;
529
        case THREAD_RECYCLE:
530
                ret = thread_recycle(task);
531
                break;
532
        case THREAD_WAIT:
533
                ret = thread_wait(task);
534
                break;
535
 
536
        default:
537
                ret = -EINVAL;
538
        }
539
 
540
        return ret;
541
}
542
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.