OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [conts/] [posix/] [mm0/] [mm/] [task.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Task management.
3
 *
4
 * Copyright (C) 2007 Bahadir Balban
5
 */
6
#include <l4/macros.h>
7
#include <l4/config.h>
8
#include <l4/types.h>
9
#include <l4/lib/list.h>
10
#include <l4/lib/math.h>
11
#include <l4/api/thread.h>
12
#include <l4/api/kip.h>
13
#include <l4/api/errno.h>
14
#include INC_GLUE(memory.h)
15
#include L4LIB_INC_ARCH(syscalls.h)
16
#include L4LIB_INC_ARCH(syslib.h)
17
#include L4LIB_INC_ARCH(utcb.h)
18
 
19
#include <l4lib/ipcdefs.h>
20
#include <l4lib/exregs.h>
21
 
22
#include <lib/addr.h>
23
#include <malloc/malloc.h>
24
 
25
#include <init.h>
26
#include <string.h>
27
#include <vm_area.h>
28
#include <memory.h>
29
#include <globals.h>
30
#include <file.h>
31
#include <task.h>
32
#include <exec.h>
33
#include <shm.h>
34
#include <mmap.h>
35
#include <test.h>
36
#include <utcb.h>
37
#include <vfs.h>
38
 
39
struct global_list global_tasks = {
40
        .list = { &global_tasks.list, &global_tasks.list },
41
        .total = 0,
42
};
43
 
44
void print_tasks(void)
45
{
46
        struct tcb *task;
47
        printf("Tasks:\n========\n");
48
        list_foreach_struct(task, &global_tasks.list, list) {
49
                printf("Task tid: %d, spid: %d\n", task->tid, task->spid);
50
        }
51
}
52
 
53
void global_add_task(struct tcb *task)
54
{
55
        BUG_ON(!list_empty(&task->list));
56
        list_insert_tail(&task->list, &global_tasks.list);
57
        global_tasks.total++;
58
}
59
 
60
void global_remove_task(struct tcb *task)
61
{
62
        BUG_ON(list_empty(&task->list));
63
        list_remove_init(&task->list);
64
        BUG_ON(--global_tasks.total < 0);
65
}
66
 
67
struct tcb *find_task(int tid)
68
{
69
        struct tcb *t;
70
 
71
        list_foreach_struct(t, &global_tasks.list, list)
72
                if (t->tid == tid)
73
                        return t;
74
        return 0;
75
}
76
 
77
 
78
struct tcb *tcb_alloc_init(unsigned int flags)
79
{
80
        struct tcb *task;
81
 
82
        if (!(task = kzalloc(sizeof(struct tcb))))
83
                return PTR_ERR(-ENOMEM);
84
 
85
        /* Allocate new vma head if its not shared */
86
        if (!(flags & TCB_SHARED_VM)) {
87
                if (!(task->vm_area_head =
88
                      kzalloc(sizeof(*task->vm_area_head)))) {
89
                        kfree(task);
90
                        return PTR_ERR(-ENOMEM);
91
                }
92
                task->vm_area_head->tcb_refs = 1;
93
                link_init(&task->vm_area_head->list);
94
 
95
                /* Also allocate a utcb head for new address space */
96
                if (!(task->utcb_head =
97
                      kzalloc(sizeof(*task->utcb_head)))) {
98
                        kfree(task->vm_area_head);
99
                        kfree(task);
100
                        return PTR_ERR(-ENOMEM);
101
                }
102
                task->utcb_head->tcb_refs = 1;
103
                link_init(&task->utcb_head->list);
104
        }
105
 
106
        /* Allocate new fs data struct if its not shared */
107
        if (!(flags & TCB_SHARED_FS)) {
108
                if (!(task->fs_data =
109
                      kzalloc(sizeof(*task->fs_data)))) {
110
                        kfree(task->vm_area_head);
111
                        kfree(task->utcb_head);
112
                        kfree(task);
113
                        return PTR_ERR(-ENOMEM);
114
                }
115
                task->fs_data->tcb_refs = 1;
116
        }
117
 
118
        /* Allocate file structures if not shared */
119
        if (!(flags & TCB_SHARED_FILES)) {
120
                if (!(task->files =
121
                      kzalloc(sizeof(*task->files)))) {
122
                        kfree(task->vm_area_head);
123
                        kfree(task->utcb_head);
124
                        kfree(task->fs_data);
125
                        kfree(task);
126
 
127
                        return PTR_ERR(-ENOMEM);
128
                }
129
                if (IS_ERR(task->files->fdpool =
130
                           id_pool_new_init(TASK_FILES_MAX))) {
131
                        void *err = task->files->fdpool;
132
                        kfree(task->vm_area_head);
133
                        kfree(task->utcb_head);
134
                        kfree(task->fs_data);
135
                        kfree(task->files);
136
                        kfree(task);
137
 
138
                        return err;
139
                }
140
                task->files->tcb_refs = 1;
141
        }
142
 
143
        /* Ids will be acquired from the kernel */
144
        task->tid = TASK_ID_INVALID;
145
        task->spid = TASK_ID_INVALID;
146
        task->tgid = TASK_ID_INVALID;
147
 
148
        /* Initialise list structure */
149
        link_init(&task->list);
150
        link_init(&task->child_ref);
151
        link_init(&task->children);
152
 
153
        return task;
154
}
155
 
156
/*
157
 * Free vmas, fd structure and utcb address.
158
 * Make sure to sync all IO beforehand
159
 */
160
int task_free_resources(struct tcb *task)
161
{
162
        /*
163
         * Threads may share file descriptor structure
164
         * if no users left, free it.
165
         */
166
        if (--task->files->tcb_refs == 0) {
167
                kfree(task->files->fdpool);
168
                kfree(task->files);
169
        }
170
 
171
        /* Similarly free filesystem view structure */
172
        if (--task->fs_data->tcb_refs == 0)
173
                kfree(task->fs_data);
174
 
175
        /*
176
         * Threads may share the virtual space.
177
         * if no users of the vma struct left,
178
         * free it along with all its vma links.
179
         */
180
        if (!(--task->vm_area_head->tcb_refs)) {
181
                /* Free all vmas */
182
                task_release_vmas(task->vm_area_head);
183
 
184
                /* Free the head */
185
                kfree(task->vm_area_head);
186
        }
187
 
188
        /*
189
         * Threads may share utcb chain
190
         */
191
        if (!(--task->utcb_head->tcb_refs)) {
192
                /* UTCBs must have been deleted explicitly */
193
                BUG_ON(!list_empty(&task->utcb_head->list));
194
 
195
                /* Free the head */
196
                kfree(task->utcb_head);
197
        }
198
 
199
        return 0;
200
}
201
 
202
int tcb_destroy(struct tcb *task)
203
{
204
        struct tcb *child, *n;
205
 
206
        global_remove_task(task);
207
 
208
        /* Free all resources of the task */
209
        task_free_resources(task);
210
 
211
        /*
212
         * All children of the current task becomes children
213
         * of the parent of this task.
214
         */
215
        list_foreach_removable_struct(child, n, &task->children,
216
                                 child_ref) {
217
                list_remove_init(&child->child_ref);
218
                list_insert_tail(&child->child_ref,
219
                              &task->parent->children);
220
                child->parent = task->parent;
221
        }
222
        /* The task is not a child of its parent */
223
        list_remove_init(&task->child_ref);
224
 
225
        /* Now task deletion make sure task is in no list */
226
        BUG_ON(!list_empty(&task->list));
227
        BUG_ON(!list_empty(&task->child_ref));
228
        BUG_ON(!list_empty(&task->children));
229
        kfree(task);
230
 
231
        return 0;
232
}
233
 
234
/*
235
 * Copy all vmas from the given task and populate each with
236
 * links to every object that the original vma is linked to.
237
 * Note, that we don't copy vm objects but just the links to
238
 * them, because vm objects are not per-process data.
239
 */
240
int task_copy_vmas(struct tcb *to, struct tcb *from)
241
{
242
        struct vm_area *vma, *new_vma;
243
 
244
        list_foreach_struct(vma, &from->vm_area_head->list, list) {
245
 
246
                /* Create a new vma */
247
                new_vma = vma_new(vma->pfn_start, vma->pfn_end - vma->pfn_start,
248
                                  vma->flags, vma->file_offset);
249
 
250
                /* Copy all object links */
251
                vma_copy_links(new_vma, vma);
252
 
253
                /* All link copying is finished, now add the new vma to task */
254
                task_insert_vma(new_vma, &to->vm_area_head->list);
255
        }
256
 
257
        return 0;
258
}
259
 
260
/*
261
 * Traverse all vmas, release all links to vm_objects.
262
 * Used when a task or thread group with a shared vm is exiting.
263
 */
264
int task_release_vmas(struct task_vma_head *vma_head)
265
{
266
        struct vm_area *vma, *n;
267
 
268
        list_foreach_removable_struct(vma, n, &vma_head->list, list) {
269
                /* Release all links */
270
                vma_drop_merge_delete_all(vma);
271
 
272
                /* Delete the vma from task's vma list */
273
                list_remove(&vma->list);
274
 
275
                /* Free the vma */
276
                kfree(vma);
277
        }
278
        return 0;
279
}
280
 
281
int copy_tcb(struct tcb *to, struct tcb *from, unsigned int share_flags)
282
{
283
        /* Copy program segment boundary information */
284
        to->start = from->start;
285
        to->end = from->end;
286
        to->text_start = from->text_start;
287
        to->text_end = from->text_end;
288
        to->data_start = from->data_start;
289
        to->data_end = from->data_end;
290
        to->bss_start = from->bss_start;
291
        to->bss_end = from->bss_end;
292
        to->stack_start = from->stack_start;
293
        to->stack_end = from->stack_end;
294
        to->heap_start = from->heap_start;
295
        to->heap_end = from->heap_end;
296
        to->args_start = from->args_start;
297
        to->args_end = from->args_end;
298
        to->map_start = from->map_start;
299
        to->map_end = from->map_end;
300
 
301
        /* Sharing the list of vmas and utcbs */
302
        if (share_flags & TCB_SHARED_VM) {
303
                to->vm_area_head = from->vm_area_head;
304
                to->vm_area_head->tcb_refs++;
305
                to->utcb_head = from->utcb_head;
306
                to->utcb_head->tcb_refs++;
307
        } else {
308
                /* Copy all vm areas */
309
                task_copy_vmas(to, from);
310
 
311
                /*
312
                 * NOTE:
313
                 * No copy for utcb descriptor list,
314
                 * forker shall start its own unique.
315
                 */
316
        }
317
 
318
        if (share_flags & TCB_SHARED_FILES) {
319
                to->files = from->files;
320
                to->files->tcb_refs++;
321
        } else {
322
                /* Copy all file descriptors */
323
                memcpy(to->files->fd, from->files->fd,
324
                       TASK_FILES_MAX * sizeof(to->files->fd[0]));
325
 
326
                /* Copy the idpool */
327
                id_pool_copy(to->files->fdpool, from->files->fdpool, TASK_FILES_MAX);
328
 
329
                /* Increase refcount for all open files */
330
                for (int i = 0; i < TASK_FILES_MAX; i++)
331
                        if (to->files->fd[i].vmfile)
332
                                to->files->fd[i].vmfile->openers++;
333
        }
334
 
335
        if (share_flags & TCB_SHARED_FS) {
336
                to->fs_data = from->fs_data;
337
                to->fs_data->tcb_refs++;
338
        } else
339
                memcpy(to->fs_data, from->fs_data, sizeof(*to->fs_data));
340
 
341
        return 0;
342
}
343
 
344
struct tcb *task_create(struct tcb *parent, struct task_ids *ids,
345
                        unsigned int share_flags, unsigned int ctrl_flags)
346
{
347
        struct tcb *task;
348
        int err;
349
 
350
        /* Can't have some share flags with no parent task */
351
        BUG_ON(!parent && share_flags);
352
 
353
        /* Set task ids if a parent is supplied */
354
        if (parent) {
355
                ids->tid = parent->tid;
356
                ids->spid = parent->spid;
357
                ids->tgid = parent->tgid;
358
        }
359
 
360
        /* Create the thread structures and address space as the pager */
361
        if ((err = l4_thread_control(THREAD_CREATE | ctrl_flags, ids)) < 0) {
362
                printf("l4_thread_control failed with %d.\n", err);
363
                return PTR_ERR(err);
364
        }
365
 
366
        /* Create a task and use given space and thread ids. */
367
        if (IS_ERR(task = tcb_alloc_init(share_flags)))
368
                return PTR_ERR(task);
369
 
370
        /* Set task's ids */
371
        task->tid = ids->tid;
372
        task->spid = ids->spid;
373
        task->tgid = ids->tgid;
374
 
375
        /* Set task's creation flags */
376
        task->clone_flags = share_flags;
377
 
378
        /*
379
         * If a parent task has been specified, that means either
380
         * we are forking, or we are cloning the original tcb fully
381
         * or partially. Therefore we copy tcbs depending on share flags.
382
         */
383
        if (parent) {
384
                copy_tcb(task, parent, share_flags);
385
 
386
                /* Set up a new utcb for new thread */
387
                task_setup_utcb(task);
388
 
389
                /* Set up parent-child relationship */
390
                if ((share_flags & TCB_SHARED_PARENT) ||
391
                    (share_flags & TCB_SHARED_TGROUP)) {
392
 
393
                        /*
394
                         * On these conditions child shares
395
                         * the parent of the caller
396
                         */
397
                        list_insert_tail(&task->child_ref,
398
                                      &parent->parent->children);
399
                        task->parent = parent->parent;
400
                } else {
401
                        list_insert_tail(&task->child_ref,
402
                                      &parent->children);
403
                        task->parent = parent;
404
                }
405
        } else {
406
                struct tcb *pager = find_task(self_tid());
407
 
408
                /* Initialise vfs specific fields. */
409
                task->fs_data->rootdir = vfs_root.pivot;
410
                task->fs_data->curdir = vfs_root.pivot;
411
 
412
                /* All parentless tasks are children of the pager */
413
                list_insert_tail(&task->child_ref, &pager->children);
414
                task->parent = pager;
415
        }
416
 
417
        return task;
418
}
419
 
420
 
421
/*
422
 * Copy argument and environment strings into task's stack in a
423
 * format that is expected by the C runtime.
424
 *
425
 * e.g. uclibc expects stack state:
426
 *
427
 * (low) |->argc|argv[0]|argv[1]|...|argv[argc] = 0|envp[0]|envp[1]|...|NULL| (high)
428
 *
429
 * argc
430
 * argv pointers
431
 * null
432
 * env pointers
433
 * null
434
 *
435
 * After the final null, we place the strings, but this is unspecified.
436
 * On setting new environment strings, instead of using this fixed
437
 * space, heap seems to get used in uClibc.
438
 *
439
 */
440
int task_copy_args_to_user(char *user_stack,
441
                           unsigned long user_ptr,
442
                           struct args_struct *args,
443
                           struct args_struct *env)
444
{
445
        char **argv_start, **envp_start;
446
 
447
        BUG_ON(!is_aligned(user_stack, 8));
448
 
449
        /* Copy argc */
450
        *((int *)user_stack) = args->argc;
451
        user_stack += sizeof(int);
452
 
453
        /* Set beginning of argv */
454
        argv_start = (char **)user_stack;
455
 
456
        /* Forward by number of argv ptrs */
457
        user_stack += sizeof(int) * args->argc;
458
 
459
        /* Put the null terminator integer */
460
        *((int *)user_stack) = 0;
461
        user_stack = user_stack + sizeof(int);
462
 
463
        /* Set beginning of envp */
464
        envp_start = (char **)user_stack;
465
 
466
        /* Forward by number of envp ptrs */
467
        user_stack += sizeof(int) * env->argc;
468
 
469
        /* Put the null terminator integer */
470
        *((int *)user_stack) = 0;
471
        user_stack = user_stack + sizeof(int);
472
 
473
        /* Copy argument strings one by one */
474
        for (int i = 0; i < args->argc; i++) {
475
                /* Copy string */
476
                strcpy(user_stack, args->argv[i]);
477
 
478
                /* Set its pointer on stack */
479
                argv_start[i] = (char *)
480
                        ((user_ptr & ~PAGE_MASK)
481
                         | ((unsigned long)user_stack &
482
                            PAGE_MASK));
483
 
484
                /* Update location */
485
                user_stack += strlen(args->argv[i]) + 1;
486
        }
487
 
488
        /* Copy environment strings one by one */
489
        for (int i = 0; i < env->argc; i++) {
490
                /* Copy string */
491
                strcpy(user_stack, env->argv[i]);
492
 
493
                /* Set its pointer on stack */
494
                envp_start[i] = (char *)
495
                        ((user_ptr & ~PAGE_MASK)
496
                         | ((unsigned long)user_stack &
497
                            PAGE_MASK));
498
 
499
                /* Update location */
500
                user_stack += strlen(env->argv[i]) + 1;
501
        }
502
 
503
        return 0;
504
}
505
 
506
int task_prefault_range(struct tcb *task, unsigned long start,
507
                        unsigned long size, unsigned int vm_flags)
508
{
509
        struct page *p;
510
 
511
 
512
        for (unsigned long i = start;  i < start + size; i += PAGE_SIZE)
513
                if (IS_ERR(p = task_prefault_page(task, i, vm_flags)))
514
                        return (int)p;
515
        return 0;
516
}
517
 
518
 
519
int task_map_stack(struct vm_file *f, struct exec_file_desc *efd,
520
                   struct tcb *task, struct args_struct *args,
521
                   struct args_struct *env)
522
{
523
        unsigned long stack_used;
524
        unsigned long arg_pages;
525
        char *args_on_stack;
526
        void *mapped;
527
 
528
        /*
529
         * Stack contains: args, environment, argc integer,
530
         * 2 Null integers as terminators.
531
         *
532
         * It also needs to be 8-byte aligned.
533
         */
534
        stack_used = align_up(args->size + env->size + sizeof(int) * 3 + 8, 8);
535
        arg_pages = __pfn(page_align_up(stack_used));
536
        task->stack_end = __pfn_to_addr(cont_mem_regions.task->end);
537
        task->stack_start = __pfn_to_addr(cont_mem_regions.task->end) - DEFAULT_STACK_SIZE;
538
        task->args_end = task->stack_end;
539
        task->args_start = task->stack_end - stack_used;
540
 
541
        BUG_ON(stack_used > DEFAULT_STACK_SIZE);
542
 
543
        /*
544
         * mmap task's stack as anonymous memory.
545
         * TODO: Add VMA_GROWSDOWN here so the stack can expand.
546
         */
547
        if (IS_ERR(mapped = do_mmap(0, 0, task, task->stack_start,
548
                                    VM_READ | VM_WRITE |
549
                                    VMA_PRIVATE | VMA_ANONYMOUS,
550
                                    __pfn(task->stack_end -
551
                                          task->stack_start)))) {
552
                printf("do_mmap: Mapping stack failed with %d.\n",
553
                       (int)mapped);
554
                return (int)mapped;
555
        }
556
 
557
        /* FIXME: Probably not necessary anymore. Prefault the stack for writing. */
558
        //BUG_ON(task_prefault_range(task, task->args_start, stack_used, VM_READ | VM_WRITE) < 0);
559
 
560
        /* Map the stack's part that will contain args and environment */
561
        if (IS_ERR(args_on_stack =
562
                   pager_validate_map_user_range2(task,
563
                                                  (void *)task->args_start,
564
                                                  stack_used,
565
                                                  VM_READ | VM_WRITE)))
566
                return (int)args_on_stack;
567
 
568
        /* Copy arguments and env */
569
        task_copy_args_to_user(args_on_stack,
570
                               task->args_start,
571
                               args, env);
572
 
573
        /* Unmap task's those stack pages from pager */
574
        pager_unmap_pages(args_on_stack, arg_pages);
575
 
576
        return 0;
577
}
578
 
579
/*
580
 * If bss comes consecutively after the data section, prefault the
581
 * last page of the data section and zero out the bit that contains
582
 * the beginning of bss. If bss spans into more pages, then map those
583
 * pages as anonymous pages which are mapped by the devzero file.
584
 */
585
int task_map_bss(struct vm_file *f, struct exec_file_desc *efd, struct tcb *task)
586
{
587
        unsigned long bss_mmap_start;
588
        void *mapped;
589
 
590
        /*
591
         * Test if bss starts right from the end of data,
592
         * and not on a new page boundary.
593
         */
594
        if ((task->data_end == task->bss_start) &&
595
            !is_page_aligned(task->bss_start)) {
596
                unsigned long bss_size = task->bss_end - task->bss_start;
597
                struct page *last_data_page;
598
                void *pagebuf, *bss;
599
 
600
                /* Get the page */
601
                last_data_page = task_prefault_page(task, task->data_end,
602
                                                    VM_READ | VM_WRITE);
603
 
604
                /* Map the page. FIXME: PAGE COLOR!!! */
605
                pagebuf = l4_map_helper((void *)page_to_phys(last_data_page), 1);
606
 
607
                /* Find the bss offset */
608
                bss = (void *)((unsigned long)pagebuf |
609
                               (PAGE_MASK & task->bss_start));
610
 
611
                /*
612
                 * Zero out the part that is bss. This is minimum of either
613
                 * end of bss or until the end of page, whichever is met first.
614
                 */
615
                memset((void *)bss, 0, min(TILL_PAGE_ENDS(task->data_end),
616
                       (int)bss_size));
617
 
618
                /* Unmap the page */
619
                l4_unmap_helper(pagebuf, 1);
620
 
621
                /* Push bss mmap start to next page */
622
                bss_mmap_start = page_align_up(task->bss_start);
623
        } else  /* Otherwise bss mmap start is same as bss_start */
624
                bss_mmap_start = task->bss_start;
625
 
626
        /*
627
         * Now if there are more pages covering bss,
628
         * map those as anonymous zero pages
629
         */
630
        if (task->bss_end > bss_mmap_start) {
631
                if (IS_ERR(mapped = do_mmap(0, 0, task, bss_mmap_start,
632
                                            VM_READ | VM_WRITE |
633
                                            VMA_PRIVATE | VMA_ANONYMOUS,
634
                                            __pfn(page_align_up(task->bss_end) -
635
                                                  page_align(task->bss_start))))) {
636
                        printf("do_mmap: Mapping environment failed with %d.\n",
637
                               (int)mapped);
638
                        return (int)mapped;
639
                }
640
        }
641
 
642
        return 0;
643
}
644
 
645
 
646
int task_mmap_segments(struct tcb *task, struct vm_file *file, struct exec_file_desc *efd,
647
                       struct args_struct *args, struct args_struct *env)
648
{
649
        void *mapped;
650
        //struct vm_file *shm;
651
        int err;
652
        int text_size, data_size;
653
 
654
        /* Set up task's user boundary regions */
655
        task->start = __pfn_to_addr(cont_mem_regions.task->start);
656
        task->end = __pfn_to_addr(cont_mem_regions.task->end);
657
        task->map_start = task->start;
658
        task->map_end = task->end;
659
 
660
        text_size = __pfn(page_align_up(task->text_end) -
661
                          page_align(task->text_start));
662
        data_size = __pfn(page_align_up(task->data_end) -
663
                          page_align(task->data_start));
664
 
665
        /* mmap task's text to task's address space. */
666
        if (IS_ERR(mapped = do_mmap(file, efd->text_offset, task,
667
                                    task->text_start, VM_READ | VM_WRITE |
668
                                    VM_EXEC | VMA_PRIVATE, text_size))) {
669
                printf("do_mmap: failed with %d.\n", (int)mapped);
670
                err = (int)mapped;
671
                goto out_err;
672
        }
673
 
674
        /* mmap task's data to task's address space. */
675
        if (IS_ERR(mapped = do_mmap(file, efd->data_offset, task,
676
                                    task->data_start,  VM_READ | VM_WRITE |
677
                                    VMA_PRIVATE, data_size))) {
678
                printf("do_mmap: failed with %d.\n", (int)mapped);
679
                err = (int)mapped;
680
                goto out_err;
681
        }
682
 
683
        /* mmap task's bss as anonymous memory. */
684
        if ((err = task_map_bss(file, efd, task)) < 0) {
685
                printf("%s: Mapping bss has failed.\n",
686
                       __FUNCTION__);
687
                goto out_err;
688
        }
689
 
690
        /* mmap task's stack, writing in the arguments and environment */
691
        if ((err = task_map_stack(file, efd, task, args, env)) < 0) {
692
                printf("%s: Mapping task's stack has failed.\n",
693
                       __FUNCTION__);
694
                goto out_err;
695
        }
696
 
697
        /* Get a new utcb slot for new task */
698
        if ((err = task_setup_utcb(task)) < 0) {
699
                printf("%s: Mapping task's utcb has failed.\n",
700
                       __FUNCTION__);
701
                goto out_err;
702
        }
703
 
704
        return 0;
705
 
706
out_err:
707
        task_free_resources(task);
708
        return err;
709
}
710
 
711
int task_setup_registers(struct tcb *task, unsigned int pc,
712
                         unsigned int sp, l4id_t pager)
713
{
714
        int err;
715
        struct exregs_data exregs;
716
 
717
        /* Set up task's registers to default. */
718
        if (!sp)
719
                sp = align(task->stack_end - 1, 8);
720
        if (!pc)
721
                if (!(pc = task->entry))
722
                        pc = task->text_start;
723
        if (!pager)
724
                pager = self_tid();
725
 
726
        /* Set up the task's thread details, (pc, sp, pager etc.) */
727
        exregs_set_stack(&exregs, sp);
728
        exregs_set_pc(&exregs, pc);
729
        exregs_set_pager(&exregs, pager);
730
        exregs_set_utcb(&exregs, task->utcb_address);
731
 
732
        if ((err = l4_exchange_registers(&exregs, task->tid)) < 0) {
733
                printf("l4_exchange_registers failed with %d.\n", err);
734
                return err;
735
        }
736
 
737
        return 0;
738
}
739
 
740
int task_start(struct tcb *task)
741
{
742
        int err;
743
        struct task_ids ids = {
744
                .tid = task->tid,
745
                .spid = task->spid,
746
                .tgid = task->tgid,
747
        };
748
 
749
        /* Start the thread */
750
        // printf("%s: Starting task with thread id: %d, space id: %d\n",
751
        // __TASKNAME__, task->tid, task->spid);
752
        if ((err = l4_thread_control(THREAD_RUN, &ids)) < 0) {
753
                printf("l4_thread_control failed with %d\n", err);
754
                return err;
755
        }
756
 
757
        return 0;
758
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.