OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [generic/] [resource.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Initialize system resource management.
3
 *
4
 * Copyright (C) 2009 Bahadir Balban
5
 */
6
#include <l4/generic/capability.h>
7
#include <l4/generic/cap-types.h>
8
#include <l4/generic/container.h>
9
#include <l4/generic/resource.h>
10
#include <l4/generic/bootmem.h>
11
#include <l4/generic/platform.h>
12
#include <l4/lib/math.h>
13
#include <l4/lib/memcache.h>
14
#include INC_GLUE(memory.h)
15
#include INC_GLUE(mapping.h)
16
#include INC_ARCH(linker.h)
17
#include INC_PLAT(platform.h)
18
#include <l4/api/errno.h>
19
 
20
struct kernel_resources kernel_resources;
21
 
22
pgd_table_t *alloc_pgd(void)
23
{
24
        return mem_cache_zalloc(kernel_resources.pgd_cache);
25
}
26
 
27
pmd_table_t *alloc_pmd(void)
28
{
29
        struct capability *cap;
30
 
31
        if (!(cap = capability_find_by_rtype(current,
32
                                             CAP_RTYPE_MAPPOOL)))
33
                return 0;
34
 
35
        if (capability_consume(cap, 1) < 0)
36
                return 0;
37
 
38
        return mem_cache_zalloc(kernel_resources.pmd_cache);
39
}
40
 
41
struct address_space *alloc_space(void)
42
{
43
        struct capability *cap;
44
 
45
        if (!(cap = capability_find_by_rtype(current,
46
                                             CAP_RTYPE_SPACEPOOL)))
47
                return 0;
48
 
49
        if (capability_consume(cap, 1) < 0)
50
                return 0;
51
 
52
        return mem_cache_zalloc(kernel_resources.space_cache);
53
}
54
 
55
struct ktcb *alloc_ktcb_use_capability(struct capability *cap)
56
{
57
        if (capability_consume(cap, 1) < 0)
58
                return 0;
59
 
60
        return mem_cache_zalloc(kernel_resources.ktcb_cache);
61
}
62
 
63
struct ktcb *alloc_ktcb(void)
64
{
65
        struct capability *cap;
66
 
67
        if (!(cap = capability_find_by_rtype(current,
68
                                             CAP_RTYPE_THREADPOOL)))
69
                return 0;
70
 
71
        if (capability_consume(cap, 1) < 0)
72
                return 0;
73
 
74
        return mem_cache_zalloc(kernel_resources.ktcb_cache);
75
}
76
 
77
/*
78
 * This version is boot-time only and it has no
79
 * capability checking. Imagine the case where the
80
 * initial capabilities are created and there is no
81
 * capability to check this allocation.
82
 */
83
struct capability *boot_alloc_capability(void)
84
{
85
        return mem_cache_zalloc(kernel_resources.cap_cache);
86
}
87
 
88
struct capability *alloc_capability(void)
89
{
90
        struct capability *cap;
91
 
92
        if (!(cap = capability_find_by_rtype(current,
93
                                             CAP_RTYPE_CAPPOOL)))
94
                return 0;
95
 
96
        if (capability_consume(cap, 1) < 0)
97
                return 0;
98
 
99
        return mem_cache_zalloc(kernel_resources.cap_cache);
100
}
101
 
102
struct container *alloc_container(void)
103
{
104
        return mem_cache_zalloc(kernel_resources.cont_cache);
105
}
106
 
107
struct mutex_queue *alloc_user_mutex(void)
108
{
109
        struct capability *cap;
110
 
111
        if (!(cap = capability_find_by_rtype(current,
112
                                             CAP_RTYPE_MUTEXPOOL)))
113
                return 0;
114
 
115
        if (capability_consume(cap, 1) < 0)
116
                return 0;
117
 
118
        return mem_cache_zalloc(kernel_resources.mutex_cache);
119
}
120
 
121
void free_pgd(void *addr)
122
{
123
        BUG_ON(mem_cache_free(kernel_resources.pgd_cache, addr) < 0);
124
}
125
 
126
void free_pmd(void *addr)
127
{
128
        struct capability *cap;
129
 
130
        BUG_ON(!(cap = capability_find_by_rtype(current,
131
                                                CAP_RTYPE_MAPPOOL)));
132
        capability_free(cap, 1);
133
 
134
        BUG_ON(mem_cache_free(kernel_resources.pmd_cache, addr) < 0);
135
}
136
 
137
void free_space(void *addr, struct ktcb *task)
138
{
139
        struct capability *cap;
140
 
141
        BUG_ON(!(cap = capability_find_by_rtype(task,
142
                                                CAP_RTYPE_SPACEPOOL)));
143
        capability_free(cap, 1);
144
 
145
        BUG_ON(mem_cache_free(kernel_resources.space_cache, addr) < 0);
146
}
147
 
148
 
149
/*
150
 * Account it to pager, but if it doesn't exist,
151
 * to current idle task
152
 */
153
void free_ktcb(void *addr, struct ktcb *acc_task)
154
{
155
        struct capability *cap;
156
 
157
        /* Account it to task's pager if it exists */
158
        BUG_ON(!(cap = capability_find_by_rtype(acc_task,
159
                                                CAP_RTYPE_THREADPOOL)));
160
        capability_free(cap, 1);
161
 
162
        BUG_ON(mem_cache_free(kernel_resources.ktcb_cache, addr) < 0);
163
}
164
 
165
void free_capability(void *addr)
166
{
167
        struct capability *cap;
168
 
169
        BUG_ON(!(cap = capability_find_by_rtype(current,
170
                                                CAP_RTYPE_CAPPOOL)));
171
        capability_free(cap, 1);
172
 
173
        BUG_ON(mem_cache_free(kernel_resources.cap_cache, addr) < 0);
174
}
175
 
176
void free_container(void *addr)
177
{
178
        BUG_ON(mem_cache_free(kernel_resources.cont_cache, addr) < 0);
179
}
180
 
181
void free_user_mutex(void *addr)
182
{
183
        struct capability *cap;
184
 
185
        BUG_ON(!(cap = capability_find_by_rtype(current,
186
                                                CAP_RTYPE_MUTEXPOOL)));
187
        capability_free(cap, 1);
188
 
189
        BUG_ON(mem_cache_free(kernel_resources.mutex_cache, addr) < 0);
190
}
191
 
192
/*
193
 * This splits a capability, splitter region must be in
194
 * the *middle* of original capability
195
 */
196
int memcap_split(struct capability *cap, struct cap_list *cap_list,
197
                 const unsigned long start,
198
                 const unsigned long end)
199
{
200
        struct capability *new;
201
 
202
        /* Allocate a capability first */
203
        new = alloc_bootmem(sizeof(*new), 0);
204
 
205
        /*
206
         * Some sanity checks to show that splitter range does end up
207
         * producing two smaller caps.
208
         */
209
        BUG_ON(cap->start >= start || cap->end <= end);
210
 
211
        /* Update new and original caps */
212
        new->end = cap->end;
213
        new->start = end;
214
        cap->end = start;
215
        new->access = cap->access;
216
 
217
        /* Add new one next to original cap */
218
        cap_list_insert(new, cap_list);
219
 
220
        return 0;
221
}
222
 
223
/* This shrinks the cap from *one* end only, either start or end */
224
int memcap_shrink(struct capability *cap, struct cap_list *cap_list,
225
                  const unsigned long start, const unsigned long end)
226
{
227
        /* Shrink from the end */
228
        if (cap->start < start) {
229
                BUG_ON(start >= cap->end);
230
                cap->end = start;
231
 
232
        /* Shrink from the beginning */
233
        } else if (cap->end > end) {
234
                BUG_ON(end <= cap->start);
235
                cap->start = end;
236
        } else
237
                BUG();
238
 
239
        return 0;
240
}
241
 
242
/*
243
 * Given a single memory cap (that definitely overlaps) removes
244
 * the portion of pfns specified by start/end.
245
 */
246
int memcap_unmap_range(struct capability *cap,
247
                       struct cap_list *cap_list,
248
                       const unsigned long start,
249
                       const unsigned long end)
250
{
251
        /* Split needed? */
252
        if (cap->start < start && cap->end > end)
253
                return memcap_split(cap, cap_list, start, end);
254
        /* Shrink needed? */
255
        else if (((cap->start >= start) && (cap->end > end))
256
                   || ((cap->start < start) && (cap->end <= end)))
257
                return memcap_shrink(cap, cap_list, start, end);
258
        /* Destroy needed? */
259
        else if ((cap->start >= start) && (cap->end <= end))
260
                /* Simply unlink it */
261
                list_remove(&cap->list);
262
        else
263
                BUG();
264
 
265
        return 0;
266
}
267
 
268
/*
269
 * Unmaps given memory range from the list of capabilities
270
 * by either shrinking, splitting or destroying the
271
 * intersecting capability. Similar to do_munmap()
272
 */
273
int memcap_unmap(struct cap_list *used_list,
274
                 struct cap_list *cap_list,
275
                 const unsigned long unmap_start,
276
                 const unsigned long unmap_end)
277
{
278
        struct capability *cap, *n;
279
        int ret;
280
 
281
        /*
282
         * If a used list was supplied, check that the
283
         * range does not intersect with the used list.
284
         * This is an optional sanity check.
285
         */
286
        if (used_list) {
287
                list_foreach_removable_struct(cap, n,
288
                                              &used_list->caps,
289
                                              list) {
290
                        if (set_intersection(unmap_start, unmap_end,
291
                                             cap->start, cap->end)) {
292
                                ret = -EPERM;
293
                                goto out_err;
294
                        }
295
                }
296
        }
297
 
298
        list_foreach_removable_struct(cap, n, &cap_list->caps, list) {
299
                /* Check for intersection */
300
                if (set_intersection(unmap_start, unmap_end,
301
                                     cap->start, cap->end)) {
302
                        if ((ret = memcap_unmap_range(cap, cap_list,
303
                                                      unmap_start,
304
                                                      unmap_end))) {
305
                                goto out_err;
306
                        }
307
                        return 0;
308
                }
309
        }
310
        ret = -EEXIST;
311
 
312
out_err:
313
        if (ret == -ENOMEM)
314
                printk("%s: FATAL: Insufficient boot memory "
315
                       "to split capability\n", __KERNELNAME__);
316
        else if (ret == -EPERM)
317
                printk("%s: FATAL: %s memory capability range "
318
                       "overlaps with an already used range. "
319
                       "start=0x%lx, end=0x%lx\n", __KERNELNAME__,
320
                       cap_type(cap) == CAP_TYPE_MAP_VIRTMEM ?
321
                       "Virtual" : "Physical",
322
                       __pfn_to_addr(cap->start),
323
                       __pfn_to_addr(cap->end));
324
        else if (ret == -EEXIST)
325
                printk("%s: FATAL: %s memory capability range "
326
                       "does not match with any available free range. "
327
                       "start=0x%lx, end=0x%lx\n", __KERNELNAME__,
328
                       cap_type(cap) == CAP_TYPE_MAP_VIRTMEM ?
329
                       "Virtual" : "Physical",
330
                       __pfn_to_addr(cap->start),
331
                       __pfn_to_addr(cap->end));
332
        BUG();
333
}
334
 
335
/*
336
 * Finds a device memory capability and deletes it from
337
 * the available device capabilities list
338
 */
339
int memcap_request_device(struct cap_list *cap_list,
340
                          struct cap_info *devcap)
341
{
342
        struct capability *cap, *n;
343
 
344
        list_foreach_removable_struct(cap, n, &cap_list->caps, list) {
345
                if (cap->start == devcap->start &&
346
                     cap->end == devcap->end &&
347
                     cap_is_devmem(cap)) {
348
                        /* Unlink only. This is boot memory */
349
                        list_remove(&cap->list);
350
                        return 0;
351
                }
352
        }
353
        printk("%s: FATAL: Device memory requested "
354
               "does not match any available device "
355
               "capabilities start=0x%lx, end=0x%lx "
356
               "attr=0x%x\n", __KERNELNAME__,
357
               __pfn_to_addr(devcap->start),
358
               __pfn_to_addr(devcap->end), devcap->attr);
359
        BUG();
360
}
361
/*
362
 * TODO: Evaluate if access bits are needed and add new cap ranges
363
 * only if their access bits match.
364
 *
365
 * Maps a memory range as a capability to a list of capabilities either by
366
 * merging the given range to an existing capability or creating a new one.
367
 */
368
int memcap_map(struct cap_list *cap_list,
369
               const unsigned long map_start,
370
               const unsigned long map_end)
371
{
372
        struct capability *cap, *n;
373
 
374
        list_foreach_removable_struct(cap, n, &cap_list->caps, list) {
375
                if (cap->start == map_end) {
376
                        cap->start = map_start;
377
                        return 0;
378
                } else if(cap->end == map_start) {
379
                        cap->end = map_end;
380
                        return 0;
381
                }
382
        }
383
 
384
        /* No capability could be extended, we create a new one */
385
        cap = alloc_capability();
386
        cap->start = map_start;
387
        cap->end = map_end;
388
        link_init(&cap->list);
389
        cap_list_insert(cap, cap_list);
390
 
391
        return 0;
392
}
393
 
394
/* Delete all boot memory and add it to physical memory pool. */
395
int free_boot_memory(struct kernel_resources *kres)
396
{
397
        struct container *c;
398
        unsigned long pfn_start =
399
                __pfn(virt_to_phys(_start_init));
400
        unsigned long pfn_end =
401
                __pfn(page_align_up(virt_to_phys(_end_init)));
402
        unsigned long init_pfns = pfn_end - pfn_start;
403
 
404
        /* Trim kernel used memory cap */
405
        memcap_unmap(0, &kres->physmem_used, pfn_start, pfn_end);
406
 
407
        /* Add it to unused physical memory */
408
        memcap_map(&kres->physmem_free, pfn_start, pfn_end);
409
 
410
        /* Remove the init memory from the page tables */
411
        for (unsigned long i = pfn_start; i < pfn_end; i++)
412
                remove_mapping(phys_to_virt(__pfn_to_addr(i)));
413
 
414
        /* Reset pointers that will remain in system as precaution */
415
        list_foreach_struct(c, &kres->containers.list, list)
416
                c->pager = 0;
417
 
418
        printk("%s: Freed %lu KB init memory, "
419
               "of which %lu KB was used.\n",
420
               __KERNELNAME__, init_pfns * 4,
421
               (init_pfns -
422
                __pfn(page_align_up(bootmem_free_pages()))) * 4);
423
 
424
        return 0;
425
}
426
 
427
/*
428
 * Initializes kernel caplists, and sets up total of physical
429
 * and virtual memory as single capabilities of the kernel.
430
 * They will then get split into caps of different lengths
431
 * during the traversal of container capabilities, and memcache
432
 * allocations.
433
 */
434
void init_kernel_resources(struct kernel_resources *kres)
435
{
436
        struct capability *physmem, *virtmem, *kernel_area;
437
 
438
        /* Initialize system id pools */
439
        kres->space_ids.nwords = SYSTEM_IDS_MAX;
440
        kres->ktcb_ids.nwords = SYSTEM_IDS_MAX;
441
        kres->resource_ids.nwords = SYSTEM_IDS_MAX;
442
        kres->container_ids.nwords = SYSTEM_IDS_MAX;
443
        kres->mutex_ids.nwords = SYSTEM_IDS_MAX;
444
        kres->capability_ids.nwords = SYSTEM_IDS_MAX;
445
 
446
        /* Initialize container head */
447
        container_head_init(&kres->containers);
448
 
449
        /* Initialize kernel capability lists */
450
        cap_list_init(&kres->physmem_used);
451
        cap_list_init(&kres->physmem_free);
452
        cap_list_init(&kres->virtmem_used);
453
        cap_list_init(&kres->virtmem_free);
454
        cap_list_init(&kres->devmem_used);
455
        cap_list_init(&kres->devmem_free);
456
        cap_list_init(&kres->non_memory_caps);
457
 
458
        /* Set up total physical memory as single capability */
459
        physmem = alloc_bootmem(sizeof(*physmem), 0);
460
        physmem->start = __pfn(PLATFORM_PHYS_MEM_START);
461
        physmem->end = __pfn(PLATFORM_PHYS_MEM_END);
462
        link_init(&physmem->list);
463
        cap_list_insert(physmem, &kres->physmem_free);
464
 
465
        /* Set up total virtual memory as single capability */
466
        virtmem = alloc_bootmem(sizeof(*virtmem), 0);
467
        virtmem->start = __pfn(VIRT_MEM_START);
468
        virtmem->end = __pfn(VIRT_MEM_END);
469
        link_init(&virtmem->list);
470
        cap_list_insert(virtmem, &kres->virtmem_free);
471
 
472
        /* Set up kernel used area as a single capability */
473
        kernel_area = alloc_bootmem(sizeof(*physmem), 0);
474
        kernel_area->start = __pfn(virt_to_phys(_start_kernel));
475
        kernel_area->end = __pfn(virt_to_phys(_end_kernel));
476
        link_init(&kernel_area->list);
477
        cap_list_insert(kernel_area, &kres->physmem_used);
478
 
479
        /* Unmap kernel used area from free physical memory capabilities */
480
        memcap_unmap(0, &kres->physmem_free, kernel_area->start,
481
                     kernel_area->end);
482
 
483
        /* Set up platform-specific device capabilities */
484
        platform_setup_device_caps(kres);
485
 
486
        /* TODO:
487
         * Add all virtual memory areas used by the kernel
488
         * e.g. kernel virtual area, syscall page, kip page,
489
         * vectors page, timer, sysctl and uart device pages
490
         */
491
}
492
 
493
 
494
/*
495
 * Copies cinfo structures to real capabilities for each pager.
496
 */
497
int copy_pager_info(struct pager *pager, struct pager_info *pinfo)
498
{
499
        struct capability *cap;
500
        struct cap_info *cap_info;
501
 
502
        pager->start_address = pinfo->start_address;
503
        pager->start_lma = __pfn_to_addr(pinfo->pager_lma);
504
        pager->start_vma = __pfn_to_addr(pinfo->pager_vma);
505
        pager->memsize = __pfn_to_addr(pinfo->pager_size);
506
        pager->rw_sections_start = pinfo->rw_sections_start;
507
        pager->rw_sections_end = pinfo->rw_sections_end;
508
        pager->rx_sections_start = pinfo->rx_sections_start;
509
        pager->rx_sections_end = pinfo->rx_sections_end;
510
 
511
        /* Copy all cinfo structures into real capabilities */
512
        for (int i = 0; i < pinfo->ncaps; i++) {
513
                cap = boot_capability_create();
514
 
515
                cap_info = &pinfo->caps[i];
516
 
517
                cap->resid = cap_info->target;
518
                cap->type = cap_info->type;
519
                cap->access = cap_info->access;
520
                cap->start = cap_info->start;
521
                cap->end = cap_info->end;
522
                cap->size = cap_info->size;
523
                cap->attr = cap_info->attr;
524
                cap->irq = cap_info->irq;
525
 
526
                cap_list_insert(cap, &pager->cap_list);
527
        }
528
 
529
        /*
530
         * Check if pager has enough resources to create its caps:
531
         *
532
         * Find pager's capability capability, check its
533
         * current use count and initialize it
534
         */
535
        cap = cap_list_find_by_rtype(&pager->cap_list,
536
                                     CAP_RTYPE_CAPPOOL);
537
 
538
        /* Verify that we did not excess allocated */
539
        if (!cap || cap->size < pinfo->ncaps) {
540
                printk("FATAL: Pager needs more capabilities "
541
                       "than allocated for initialization.\n");
542
                        BUG();
543
        }
544
 
545
        /*
546
         * Initialize used count. The rest of the spending
547
         * checks on this cap will be done in the cap syscall
548
         */
549
        cap->used = pinfo->ncaps;
550
 
551
        return 0;
552
}
553
 
554
/*
555
 * Copies container info from a given compact container descriptor to
556
 * a real container
557
 */
558
int copy_container_info(struct container *c, struct container_info *cinfo)
559
{
560
        strncpy(c->name, cinfo->name, CONFIG_CONTAINER_NAMESIZE);
561
        c->npagers = cinfo->npagers;
562
 
563
        /* Copy capabilities */
564
        for (int i = 0; i < c->npagers; i++)
565
                copy_pager_info(&c->pager[i], &cinfo->pager[i]);
566
 
567
        return 0;
568
}
569
 
570
/*
571
 * Copy boot-time allocated kernel capabilities to ones that
572
 * are allocated from the capability memcache
573
 */
574
void copy_boot_capabilities(struct cap_list *caplist)
575
{
576
        struct capability *bootcap, *n, *realcap;
577
 
578
        /* For every bootmem-allocated capability */
579
        list_foreach_removable_struct(bootcap, n,
580
                                      &caplist->caps,
581
                                      list) {
582
                /* Create new one from capability cache */
583
                realcap = capability_create();
584
 
585
                /* Copy all fields except id to real */
586
                realcap->owner = bootcap->owner;
587
                realcap->resid = bootcap->resid;
588
                realcap->type = bootcap->type;
589
                realcap->access = bootcap->access;
590
                realcap->start = bootcap->start;
591
                realcap->end = bootcap->end;
592
                realcap->size = bootcap->size;
593
                realcap->attr = bootcap->attr;
594
                realcap->irq = bootcap->irq;
595
 
596
                /* Unlink boot one */
597
                list_remove(&bootcap->list);
598
 
599
                /* Add real one to head */
600
                list_insert(&realcap->list,
601
                            &caplist->caps);
602
        }
603
}
604
 
605
/*
606
 * Creates capabilities allocated with a real id, and from the
607
 * capability cache, in place of ones allocated at boot-time.
608
 */
609
void setup_kernel_resources(struct boot_resources *bootres,
610
                            struct kernel_resources *kres)
611
{
612
        struct capability *cap;
613
        struct container *container;
614
        //pgd_table_t *current_pgd;
615
 
616
        /* First initialize the list of non-memory capabilities */
617
        cap = boot_capability_create();
618
        cap->type = CAP_TYPE_QUANTITY | CAP_RTYPE_MAPPOOL;
619
        cap->size = bootres->nkpmds;
620
        cap->owner = kres->cid;
621
        cap_list_insert(cap, &kres->non_memory_caps);
622
 
623
        cap = boot_capability_create();
624
        cap->type = CAP_TYPE_QUANTITY | CAP_RTYPE_SPACEPOOL;
625
        cap->size = bootres->nkpgds;
626
        cap->owner = kres->cid;
627
        cap_list_insert(cap, &kres->non_memory_caps);
628
 
629
        cap = boot_capability_create();
630
        cap->type = CAP_TYPE_QUANTITY | CAP_RTYPE_CAPPOOL;
631
        cap->size = bootres->nkcaps;
632
        cap->owner = kres->cid;
633
        cap->used = 3;
634
        cap_list_insert(cap, &kres->non_memory_caps);
635
 
636
        /* Set up dummy current cap-list for below functions to use */
637
        cap_list_move(&current->cap_list, &kres->non_memory_caps);
638
 
639
        copy_boot_capabilities(&kres->physmem_used);
640
        copy_boot_capabilities(&kres->physmem_free);
641
        copy_boot_capabilities(&kres->virtmem_used);
642
        copy_boot_capabilities(&kres->virtmem_free);
643
        copy_boot_capabilities(&kres->devmem_used);
644
        copy_boot_capabilities(&kres->devmem_free);
645
 
646
        /*
647
         * Move to real page tables, accounted by
648
         * pgds and pmds provided from the caches
649
         *
650
         * We do not want to delay this too much,
651
         * since we want to avoid allocating an uncertain
652
         * amount of memory from the boot allocators.
653
         */
654
        // current_pgd = arch_realloc_page_tables();
655
 
656
        /* Move it back */
657
        cap_list_move(&kres->non_memory_caps, &current->cap_list);
658
 
659
 
660
        /*
661
         * Setting up ids used internally.
662
         *
663
         * See how many containers we have. Assign next
664
         * unused container id for kernel resources
665
         */
666
        kres->cid = id_get(&kres->container_ids, bootres->nconts + 1);
667
        // kres->cid = id_get(&kres->container_ids, 0); // Gets id 0
668
 
669
        /*
670
         * Assign thread and space ids to current which will later
671
         * become the idle task
672
         */
673
        current->tid = id_new(&kres->ktcb_ids);
674
        current->space->spid = id_new(&kres->space_ids);
675
 
676
        /*
677
         * Init per-cpu zombie lists
678
         */
679
        for (int i = 0; i < CONFIG_NCPU; i++)
680
                init_ktcb_list(&per_cpu_byid(kres->zombie_list, i));
681
 
682
        /*
683
         * Create real containers from compile-time created
684
         * cinfo structures
685
         */
686
        for (int i = 0; i < bootres->nconts; i++) {
687
                /* Allocate & init container */
688
                container = container_create();
689
 
690
                /* Fill in its information */
691
                copy_container_info(container, &cinfo[i]);
692
 
693
                /* Add it to kernel resources list */
694
                kres_insert_container(container, kres);
695
        }
696
 
697
        /* Initialize pagers */
698
        container_init_pagers(kres);
699
}
700
 
701
/*
702
 * Given a structure size and numbers, it initializes a memory cache
703
 * using free memory available from free kernel memory capabilities.
704
 */
705
struct mem_cache *init_resource_cache(int nstruct, int struct_size,
706
                                      struct kernel_resources *kres,
707
                                      int aligned)
708
{
709
        struct capability *cap;
710
        unsigned long bufsize;
711
 
712
        /* In all unused physical memory regions */
713
        list_foreach_struct(cap, &kres->physmem_free.caps, list) {
714
                /* Get buffer size needed for cache */
715
                bufsize = mem_cache_bufsize((void *)__pfn_to_addr(cap->start),
716
                                            struct_size, nstruct,
717
                                            aligned);
718
                /*
719
                 * Check if memcap region size is enough to cover
720
                 * resource allocation
721
                 */
722
                if (__pfn_to_addr(cap->end - cap->start) >= bufsize) {
723
                        unsigned long virtual =
724
                                phys_to_virt(__pfn_to_addr(cap->start));
725
                        /*
726
                         * Map the buffer as boot mapping if pmd caches
727
                         * are not initialized
728
                         */
729
                        if (!kres->pmd_cache) {
730
                                add_boot_mapping(__pfn_to_addr(cap->start),
731
                                                 virtual,
732
                                                 page_align_up(bufsize),
733
                                                 MAP_KERN_RW);
734
                        } else {
735
                                add_mapping_pgd(__pfn_to_addr(cap->start),
736
                                                virtual, page_align_up(bufsize),
737
                                                MAP_KERN_RW, &init_pgd);
738
                        }
739
                        /* Unmap area from memcap */
740
                        memcap_unmap_range(cap, &kres->physmem_free,
741
                                           cap->start, cap->start +
742
                                           __pfn(page_align_up((bufsize))));
743
 
744
                        /* TODO: Manipulate memcaps for virtual range??? */
745
 
746
                        /* Initialize the cache */
747
                        return mem_cache_init((void *)virtual, bufsize,
748
                                              struct_size, aligned);
749
                }
750
        }
751
        return 0;
752
}
753
 
754
/*
755
 * TODO: Initialize ID cache
756
 *
757
 * Given a kernel resources and the set of boot resources required,
758
 * initializes all memory caches for allocations. Once caches are
759
 * initialized, earlier boot allocations are migrated to caches.
760
 */
761
void init_resource_allocators(struct boot_resources *bootres,
762
                              struct kernel_resources *kres)
763
{
764
        /*
765
         * An extra space reserved for kernel
766
         * in case all containers quit
767
         */
768
        bootres->nspaces++;
769
        bootres->nkpgds++;
770
 
771
        /* Initialise PGD cache */
772
        kres->pgd_cache =
773
                init_resource_cache(bootres->nspaces,
774
                                    PGD_SIZE, kres, 1);
775
 
776
        /* Initialise struct address_space cache */
777
        kres->space_cache =
778
                init_resource_cache(bootres->nspaces,
779
                                    sizeof(struct address_space),
780
                                    kres, 0);
781
 
782
        /* Initialise ktcb cache */
783
        kres->ktcb_cache =
784
                init_resource_cache(bootres->nthreads,
785
                                    PAGE_SIZE, kres, 1);
786
 
787
        /* Initialise umutex cache */
788
        kres->mutex_cache =
789
                init_resource_cache(bootres->nmutex,
790
                                    sizeof(struct mutex_queue),
791
                                    kres, 0);
792
        /* Initialise container cache */
793
        kres->cont_cache =
794
                init_resource_cache(bootres->nconts,
795
                                    sizeof(struct container),
796
                                    kres, 0);
797
 
798
        /*
799
         * Add all caps used by the kernel
800
         * Two extra in case more memcaps get split after
801
         * cap cache init below. Three extra for quantitative
802
         * kernel caps for pmds, pgds, caps.
803
         */
804
        bootres->nkcaps += kres->virtmem_used.ncaps +
805
                           kres->virtmem_free.ncaps +
806
                           kres->physmem_used.ncaps +
807
                           kres->physmem_free.ncaps +
808
                           kres->devmem_free.ncaps  +
809
                           kres->devmem_used.ncaps  + 2 + 3;
810
 
811
        /* Add that to all cap count */
812
        bootres->ncaps += bootres->nkcaps;
813
 
814
        /* Initialise capability cache */
815
        kres->cap_cache =
816
                init_resource_cache(bootres->ncaps,
817
                                    sizeof(struct capability),
818
                                    kres, 0);
819
 
820
        /* Count boot pmds used so far and add them */
821
        bootres->nkpmds += pgd_count_boot_pmds();
822
 
823
        /*
824
         * Calculate maximum possible pmds that may be used
825
         * during this pmd cache initialization and add them.
826
         */
827
        bootres->nkpmds += ((bootres->npmds * PMD_SIZE) / PMD_MAP_SIZE);
828
        if (!is_aligned(bootres->npmds * PMD_SIZE,
829
                        PMD_MAP_SIZE))
830
                bootres->nkpmds++;
831
 
832
        /* Add kernel pmds to all pmd count */
833
        bootres->npmds += bootres->nkpmds;
834
 
835
        /* Initialise PMD cache */
836
        kres->pmd_cache =
837
                init_resource_cache(bootres->npmds,
838
                                    PMD_SIZE, kres, 1);
839
}
840
 
841
/*
842
 * Do all system accounting for a given capability info
843
 * structure that belongs to a container, such as
844
 * count its resource requirements, remove its portion
845
 * from global kernel resource capabilities etc.
846
 */
847
int process_cap_info(struct cap_info *cap,
848
                     struct boot_resources *bootres,
849
                     struct kernel_resources *kres)
850
{
851
        int ret = 0;
852
 
853
        switch (cap_rtype(cap)) {
854
        case CAP_RTYPE_THREADPOOL:
855
                bootres->nthreads += cap->size;
856
                break;
857
 
858
        case CAP_RTYPE_SPACEPOOL:
859
                bootres->nspaces += cap->size;
860
                break;
861
 
862
        case CAP_RTYPE_MUTEXPOOL:
863
                bootres->nmutex += cap->size;
864
                break;
865
 
866
        case CAP_RTYPE_MAPPOOL:
867
                /* Speficies how many pmds can be mapped */
868
                bootres->npmds += cap->size;
869
                break;
870
 
871
        case CAP_RTYPE_CAPPOOL:
872
                /* Specifies how many new caps can be created */
873
                bootres->ncaps += cap->size;
874
                break;
875
        }
876
 
877
        if (cap_type(cap) == CAP_TYPE_MAP_VIRTMEM) {
878
                memcap_unmap(&kres->virtmem_used,
879
                             &kres->virtmem_free,
880
                             cap->start, cap->end);
881
        } else if (cap_type(cap) == CAP_TYPE_MAP_PHYSMEM) {
882
                if (!cap_is_devmem(cap))
883
                        memcap_unmap(&kres->physmem_used,
884
                                     &kres->physmem_free,
885
                                     cap->start, cap->end);
886
                else /* Delete device from free list */
887
                        memcap_request_device(&kres->devmem_free, cap);
888
        }
889
 
890
        return ret;
891
}
892
 
893
/*
894
 * Initializes the kernel resources by describing both virtual
895
 * and physical memory. Then traverses cap_info structures
896
 * to figure out resource requirements of containers.
897
 */
898
int setup_boot_resources(struct boot_resources *bootres,
899
                         struct kernel_resources *kres)
900
{
901
        struct cap_info *cap;
902
 
903
        init_kernel_resources(kres);
904
 
905
        /* Number of containers known at compile-time */
906
        bootres->nconts = CONFIG_CONTAINERS;
907
 
908
        /* Traverse all containers */
909
        for (int i = 0; i < bootres->nconts; i++) {
910
                /* Traverse all pagers */
911
                for (int j = 0; j < cinfo[i].npagers; j++) {
912
                        int ncaps = cinfo[i].pager[j].ncaps;
913
 
914
                        /* Count all capabilities */
915
                        bootres->ncaps += ncaps;
916
 
917
                        /* Count all resources */
918
                        for (int k = 0; k < ncaps; k++) {
919
                                cap = &cinfo[i].pager[j].caps[k];
920
                                process_cap_info(cap, bootres, kres);
921
                        }
922
                }
923
        }
924
 
925
        return 0;
926
}
927
 
928
/*
929
 * Initializes all system resources and handling of those
930
 * resources. First descriptions are done by allocating from
931
 * boot memory, once memory caches are initialized, boot
932
 * memory allocations are migrated over to caches.
933
 */
934
int init_system_resources(struct kernel_resources *kres)
935
{
936
        struct boot_resources bootres;
937
 
938
        memset(&bootres, 0, sizeof(bootres));
939
 
940
        setup_boot_resources(&bootres, kres);
941
 
942
        init_resource_allocators(&bootres, kres);
943
 
944
        setup_kernel_resources(&bootres, kres);
945
 
946
        return 0;
947
}
948
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.