OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [conts/] [posix/] [mm0/] [mm/] [init.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Initialise the system.
3
 *
4
 * Copyright (C) 2007 - 2009 Bahadir Balban
5
 */
6
#include L4LIB_INC_ARCH(syscalls.h)
7
#include L4LIB_INC_ARCH(syslib.h)
8
#include __INC_ARCH(debug.h)
9
#include <l4lib/utcb.h>
10
#include <l4lib/exregs.h>
11
#include <l4/lib/list.h>
12
#include <l4/generic/cap-types.h>       /* TODO: Move this to API */
13
#include <l4/api/capability.h>
14
 
15
#include <stdio.h>
16
#include <string.h>
17
#include <mm/alloc_page.h>
18
#include <malloc/malloc.h>
19
#include <lib/bit.h>
20
 
21
#include <task.h>
22
#include <shm.h>
23
#include <file.h>
24
#include <init.h>
25
#include <test.h>
26
#include <utcb.h>
27
#include <bootm.h>
28
#include <vfs.h>
29
#include <init.h>
30
#include <memory.h>
31
#include <capability.h>
32
#include <linker.h>
33
#include <mmap.h>
34
#include <file.h>
35
#include <syscalls.h>
36
#include <linker.h>
37
 
38
/* Kernel data acquired during initialisation */
39
__initdata struct initdata initdata;
40
 
41
 
42
/* Physical memory descriptors */
43
struct memdesc physmem;         /* Initial, primitive memory descriptor */
44
struct membank membank[1];      /* The memory bank */
45
struct page *page_array;        /* The physical page array based on mem bank */
46
 
47
/* Memory region capabilities */
48
struct container_memory_regions cont_mem_regions;
49
 
50
void print_pfn_range(int pfn, int size)
51
{
52
        unsigned int addr = pfn << PAGE_BITS;
53
        unsigned int end = (pfn + size) << PAGE_BITS;
54
        printf("Used: 0x%x - 0x%x\n", addr, end);
55
}
56
 
57
/*
58
 * This sets up the mm0 task struct and memory environment but omits
59
 * bits that are already done such as creating a new thread, setting
60
 * registers.
61
 */
62
int pager_setup_task(void)
63
{
64
        struct tcb *task;
65
        struct task_ids ids;
66
        struct exregs_data exregs;
67
        void *mapped;
68
        int err;
69
 
70
        /*
71
         * The thread itself is already known by the kernel,
72
         * so we just allocate a local task structure.
73
         */
74
        if (IS_ERR(task = tcb_alloc_init(TCB_NO_SHARING))) {
75
                printf("FATAL: "
76
                       "Could not allocate tcb for pager.\n");
77
                BUG();
78
        }
79
 
80
        /* Set up own ids */
81
        l4_getid(&ids);
82
        task->tid = ids.tid;
83
        task->spid = ids.spid;
84
        task->tgid = ids.tgid;
85
 
86
        /* Initialise vfs specific fields. */
87
        task->fs_data->rootdir = vfs_root.pivot;
88
        task->fs_data->curdir = vfs_root.pivot;
89
 
90
        /* Text markers */
91
        task->text_start = (unsigned long)__start_text;
92
        task->text_end = (unsigned long)__end_rodata;
93
 
94
        /* Data markers */
95
        task->stack_end = (unsigned long)__stack;
96
        task->stack_start = (unsigned long)__start_stack;
97
 
98
        /* Stack markers */
99
        task->data_start = (unsigned long)__start_data;
100
        task->data_end = (unsigned long)__end_data;
101
 
102
        /* BSS markers */
103
        task->bss_start = (unsigned long)__start_bss;
104
        task->bss_end = (unsigned long)__end_bss;
105
 
106
        /* Task's region available for mmap as  */
107
        task->map_start = PAGER_MMAP_START;
108
        task->map_end = PAGER_MMAP_END;
109
 
110
        /* Task's total map boundaries */
111
        task->start = __pfn_to_addr(cont_mem_regions.pager->start);
112
        task->end = __pfn_to_addr(cont_mem_regions.pager->end);
113
 
114
        /*
115
         * Map all regions as anonymous (since no real
116
         * file could back) All already-mapped areas
117
         * are mapped at once.
118
         */
119
        if (IS_ERR(mapped =
120
                   do_mmap(0, 0, task, task->start,
121
                           VMA_ANONYMOUS | VM_READ | VMA_FIXED |
122
                           VM_WRITE | VM_EXEC | VMA_PRIVATE,
123
                           __pfn(page_align_up(task->map_start) -
124
                                 task->start)))) {
125
                printf("FATAL: do_mmap: failed with %d.\n", (int)mapped);
126
                BUG();
127
        }
128
 
129
 
130
        /* Set pager as child and parent of itself */
131
        list_insert(&task->child_ref, &task->children);
132
        task->parent = task;
133
 
134
        /* Allocate and set own utcb */
135
        task_setup_utcb(task);
136
        memset(&exregs, 0, sizeof(exregs));
137
        exregs_set_utcb(&exregs, task->utcb_address);
138
        if ((err = l4_exchange_registers(&exregs, task->tid)) < 0) {
139
                printf("FATAL: Pager could not set own utcb. "
140
                       "UTCB address: 0x%lx, error: %d\n",
141
                       task->utcb_address, err);
142
                BUG();
143
        }
144
 
145
        /* Pager must prefault its utcb */
146
        task_prefault_page(task, task->utcb_address, VM_READ | VM_WRITE);
147
 
148
        /* Add the task to the global task list */
149
        global_add_task(task);
150
 
151
        return 0;
152
}
153
 
154
/*
155
 * Copy all necessary data from initmem to real memory,
156
 * release initdata and any init memory used
157
 */
158
void release_initdata()
159
{
160
        /* Free and unmap init memory:
161
         *
162
         * FIXME: We can and do safely unmap the boot
163
         * memory here, but because we don't utilize it yet,
164
         * it remains as if it is a used block
165
         */
166
 
167
        l4_unmap(__start_init,
168
                 __pfn(page_align_up(__end_init - __start_init)),
169
                 self_tid());
170
}
171
 
172
static void init_page_map(struct page_bitmap *pmap,
173
                          unsigned long pfn_start,
174
                          unsigned long pfn_end)
175
{
176
        pmap->pfn_start = pfn_start;
177
        pmap->pfn_end = pfn_end;
178
        set_page_map(pmap, pfn_start,
179
                     pfn_end - pfn_start, 0);
180
}
181
 
182
/*
183
 * Marks pages in the global page_map as used or unused.
184
 *
185
 * @start = start page address to set, inclusive.
186
 * @numpages = number of pages to set.
187
 */
188
int set_page_map(struct page_bitmap *page_map,
189
                 unsigned long pfn_start,
190
                 int numpages, int val)
191
{
192
        unsigned long pfn_end = pfn_start + numpages;
193
        unsigned long pfn_err = 0;
194
 
195
        if (page_map->pfn_start > pfn_start ||
196
            page_map->pfn_end < pfn_start) {
197
                pfn_err = pfn_start;
198
                goto error;
199
        }
200
        if (page_map->pfn_end < pfn_end ||
201
            page_map->pfn_start > pfn_end) {
202
                pfn_err = pfn_end;
203
                goto error;
204
        }
205
 
206
        /* Adjust bases so words get set from index 0 */
207
        pfn_start -= page_map->pfn_start;
208
        pfn_end -= page_map->pfn_start;
209
 
210
        if (val)
211
                for (int i = pfn_start; i < pfn_end; i++)
212
                        page_map->map[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
213
        else
214
                for (int i = pfn_start; i < pfn_end; i++)
215
                        page_map->map[BITWISE_GETWORD(i)] &= ~BITWISE_GETBIT(i);
216
 
217
        return 0;
218
 
219
error:
220
        BUG_MSG("Given page area is out of system page_map range: 0x%lx\n",
221
                pfn_err << PAGE_BITS);
222
        return -1;
223
}
224
 
225
/*
226
 * Allocates page descriptors and
227
 * initialises them using page_map information
228
 */
229
void init_physmem_secondary(struct membank *membank)
230
{
231
        struct page_bitmap *pmap = initdata.page_map;
232
        int npages = pmap->pfn_end - pmap->pfn_start;
233
        void *virtual_start;
234
        int err;
235
 
236
        /*
237
         * Allocation marks for the struct
238
         * page array; npages, start, end
239
         */
240
        int pg_npages, pg_spfn, pg_epfn;
241
        unsigned long ffree_addr;
242
 
243
        membank[0].start = __pfn_to_addr(pmap->pfn_start);
244
        membank[0].end = __pfn_to_addr(pmap->pfn_end);
245
 
246
        /* First find the first free page after last used page */
247
        for (int i = 0; i < npages; i++)
248
                if ((pmap->map[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i)))
249
                        membank[0].free = membank[0].start + (i + 1) * PAGE_SIZE;
250
        BUG_ON(membank[0].free >= membank[0].end);
251
 
252
        /*
253
         * One struct page for every physical page.
254
         * Calculate how many pages needed for page
255
         * structs, start and end pfn marks.
256
         */
257
        pg_npages = __pfn(page_align_up((sizeof(struct page) * npages)));
258
 
259
        pg_spfn = __pfn(membank[0].free);
260
        pg_epfn = pg_spfn + pg_npages;
261
 
262
        /*
263
         * Use free pages from the bank as
264
         * the space for struct page array
265
         */
266
        if (IS_ERR(membank[0].page_array =
267
                   l4_map_helper((void *)membank[0].free,
268
                                 pg_npages))) {
269
                printf("FATAL: Page array mapping failed. err=%d\n",
270
                       (int)membank[0].page_array);
271
                BUG();
272
        }
273
 
274
        /* Update free memory left */
275
        membank[0].free += pg_npages * PAGE_SIZE;
276
 
277
        /* Update page bitmap for the pages used for the page array */
278
        set_page_map(pmap, pg_spfn, pg_epfn - pg_spfn, 1);
279
 
280
        /* Initialise the page array */
281
        for (int i = 0; i < npages; i++) {
282
                link_init(&membank[0].page_array[i].list);
283
 
284
                /*
285
                 * Set use counts for pages the
286
                 * kernel has already used up
287
                 */
288
                if (!(pmap->map[BITWISE_GETWORD(i)]
289
                      & BITWISE_GETBIT(i)))
290
                        membank[0].page_array[i].refcnt = -1;
291
                else    /* Last page used +1 is free */
292
                        ffree_addr = membank[0].start + (i + 1) * PAGE_SIZE;
293
        }
294
 
295
        /*
296
         * First free address must
297
         * come up the same for both
298
         */
299
        BUG_ON(ffree_addr != membank[0].free);
300
 
301
        /* Set global page array to this bank's array */
302
        page_array = membank[0].page_array;
303
 
304
        /* Test that page/phys macros work */
305
        BUG_ON(phys_to_page(page_to_phys(&page_array[5]))
306
                            != &page_array[5]);
307
 
308
        /* Now map all physical pages to virtual correspondents */
309
        virtual_start = (void *)PAGER_VIRTUAL_START;
310
        if ((err = l4_map((void *)membank[0].start,
311
                          virtual_start,
312
                          __pfn(membank[0].end - membank[0].start),
313
                          MAP_USR_RW, self_tid())) < 0) {
314
                printk("FATAL: Could not map all physical pages to "
315
                       "virtual. err=%d\n", err);
316
                BUG();
317
        }
318
 
319
#if 0
320
        printf("Virtual offset: %p\n", virtual_start);
321
        printf("Physical page offset: 0x%lx\n", membank[0].start);
322
        printf("page address: 0x%lx\n", (unsigned long)&page_array[5]);
323
        printf("page_to_virt: %p\n", page_to_virt(&page_array[5]));
324
        printf("virt_to_phys, virtual_start: %p\n", virt_to_phys(virtual_start));
325
        printf("page_to_virt_to_phys: %p\n", virt_to_phys(page_to_virt(&page_array[5])));
326
        printf("page_to_phys: 0x%lx\n", page_to_phys(&page_array[5]));
327
#endif
328
 
329
        /* Now test that virt/phys macros work */
330
        BUG_ON(virt_to_phys(page_to_virt(&page_array[5]))
331
               != (void *)page_to_phys(&page_array[5]));
332
        BUG_ON(virt_to_page(page_to_virt(&page_array[5]))
333
               != &page_array[5]);
334
}
335
 
336
 
337
/* Fills in the physmem structure with free physical memory information */
338
void init_physmem_primary()
339
{
340
        unsigned long pfn_start, pfn_end, pfn_images_end = 0;
341
        struct bootdesc *bootdesc = initdata.bootdesc;
342
 
343
        /* Allocate page map structure */
344
        initdata.page_map =
345
                alloc_bootmem(sizeof(struct page_bitmap) +
346
                              ((cont_mem_regions.physmem->end -
347
                                cont_mem_regions.physmem->start)
348
                               >> 5) + 1, 0);
349
 
350
        /* Initialise page map from physmem capability */
351
        init_page_map(initdata.page_map,
352
                      cont_mem_regions.physmem->start,
353
                      cont_mem_regions.physmem->end);
354
 
355
        /* Mark pager and other boot task areas as used */
356
        for (int i = 0; i < bootdesc->total_images; i++) {
357
                pfn_start =
358
                        __pfn(page_align_up(bootdesc->images[i].phys_start));
359
                pfn_end =
360
                        __pfn(page_align_up(bootdesc->images[i].phys_end));
361
 
362
                if (pfn_end > pfn_images_end)
363
                        pfn_images_end = pfn_end;
364
                set_page_map(initdata.page_map, pfn_start,
365
                             pfn_end - pfn_start, 1);
366
        }
367
 
368
        physmem.start = cont_mem_regions.physmem->start;
369
        physmem.end = cont_mem_regions.physmem->end;
370
 
371
        physmem.free_cur = pfn_images_end;
372
        physmem.free_end = physmem.end;
373
        physmem.numpages = physmem.end - physmem.start;
374
}
375
 
376
 
377
void init_physmem(void)
378
{
379
        init_physmem_primary();
380
 
381
        init_physmem_secondary(membank);
382
 
383
        init_page_allocator(membank[0].free, membank[0].end);
384
}
385
 
386
/*
387
 * To be removed later: This file copies in-memory elf image to the
388
 * initialized and formatted in-memory memfs filesystem.
389
 */
390
void copy_init_process(void)
391
{
392
        int fd;
393
        struct svc_image *init_img;
394
        unsigned long img_size;
395
        void *init_img_start, *init_img_end;
396
        struct tcb *self = find_task(self_tid());
397
        void *mapped;
398
        int err;
399
 
400
        /* Measure performance, if enabled */
401
        perfmon_reset_start_cyccnt();
402
 
403
        if ((fd = sys_open(self, "/test0", O_TRUNC |
404
                           O_RDWR | O_CREAT, 0)) < 0) {
405
                printf("FATAL: Could not open file "
406
                       "to write initial task.\n");
407
                BUG();
408
        }
409
 
410
        debug_record_cycles("sys_open");
411
 
412
        init_img = bootdesc_get_image_byname("test0");
413
        img_size = page_align_up(init_img->phys_end) -
414
                                 page_align(init_img->phys_start);
415
 
416
        init_img_start = l4_map_helper((void *)init_img->phys_start,
417
                                       __pfn(img_size));
418
        init_img_end = init_img_start + img_size;
419
 
420
        /*
421
         * Map an anonymous region and prefault it.
422
         * Its got to be from __end, because we haven't
423
         * unmapped .init section yet (where map_start normally lies).
424
         */
425
        if (IS_ERR(mapped =
426
                   do_mmap(0, 0, self, page_align_up(__end),
427
                           VMA_ANONYMOUS | VM_READ | VMA_FIXED |
428
                           VM_WRITE | VM_EXEC | VMA_PRIVATE,
429
                           __pfn(img_size)))) {
430
                printf("FATAL: do_mmap: failed with %d.\n",
431
                       (int)mapped);
432
                BUG();
433
        }
434
 
435
        debug_record_cycles("Until after do_mmap");
436
 
437
         /* Prefault it */
438
        if ((err = task_prefault_range(self, (unsigned long)mapped,
439
                                       img_size, VM_READ | VM_WRITE))
440
                                       < 0) {
441
                printf("FATAL: Prefaulting init image failed.\n");
442
                BUG();
443
        }
444
 
445
 
446
        /* Copy the raw image to anon region */
447
        memcpy(mapped, init_img_start, img_size);
448
 
449
 
450
        debug_record_cycles("memcpy image");
451
 
452
 
453
        /* Write it to real file from anon region */
454
        sys_write(find_task(self_tid()), fd, mapped, img_size);
455
 
456
        debug_record_cycles("sys_write");
457
 
458
        /* Close file */
459
        sys_close(find_task(self_tid()), fd);
460
 
461
 
462
        debug_record_cycles("sys_close");
463
 
464
        /* Unmap anon region */
465
        do_munmap(self, (unsigned long)mapped, __pfn(img_size));
466
 
467
        /* Unmap raw virtual range for image memory */
468
        l4_unmap_helper(init_img_start,__pfn(img_size));
469
 
470
        debug_record_cycles("Final do_munmap/l4_unmap");
471
 
472
}
473
 
474
void start_init_process(void)
475
{
476
 
477
        copy_init_process();
478
 
479
        init_execve("/test0");
480
}
481
 
482
void init(void)
483
{
484
        setup_caps();
485
 
486
        pager_address_pool_init();
487
 
488
        read_boot_params();
489
 
490
        init_physmem();
491
 
492
        init_devzero();
493
 
494
        shm_pool_init();
495
 
496
        utcb_pool_init();
497
 
498
        vfs_init();
499
 
500
        pager_setup_task();
501
 
502
        start_init_process();
503
 
504
        release_initdata();
505
 
506
        mm0_test_global_vm_integrity();
507
}
508
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.