OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [arch/] [arm/] [v6/] [mm.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Copyright (C) 2007 Bahadir Balban
3
 */
4
#include <l4/lib/printk.h>
5
#include <l4/lib/mutex.h>
6
#include <l4/lib/string.h>
7
#include <l4/generic/scheduler.h>
8
#include <l4/generic/space.h>
9
#include <l4/generic/bootmem.h>
10
#include <l4/generic/resource.h>
11
#include <l4/api/errno.h>
12
#include INC_SUBARCH(mm.h)
13
#include INC_SUBARCH(mmu_ops.h)
14
#include INC_GLUE(memory.h)
15
#include INC_PLAT(printascii.h)
16
#include INC_GLUE(memlayout.h)
17
#include INC_ARCH(linker.h)
18
#include INC_ARCH(asm.h)
19
#include INC_API(kip.h)
20
 
21
/*
22
 * These are indices into arrays with pgd_t or pmd_t sized elements,
23
 * therefore the index must be divided by appropriate element size
24
 */
25
#define PGD_INDEX(x)            (((((unsigned long)(x)) >> 18) & 0x3FFC) / sizeof(pgd_t))
26
/* Strip out the page offset in this megabyte from a total of 256 pages. */
27
#define PMD_INDEX(x)            (((((unsigned long)(x)) >> 10) & 0x3FC) / sizeof (pmd_t))
28
 
29
/*
30
 * Removes initial mappings needed for transition to virtual memory.
31
 * Used one-time only.
32
 */
33
void remove_section_mapping(unsigned long vaddr)
34
{
35
        pgd_table_t *pgd = &init_pgd;;
36
        pgd_t pgd_i = PGD_INDEX(vaddr);
37
        if (!((pgd->entry[pgd_i] & PGD_TYPE_MASK)
38
              & PGD_TYPE_SECTION))
39
                while(1);
40
        pgd->entry[pgd_i] = 0;
41
        pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
42
        arm_invalidate_tlb();
43
}
44
 
45
/*
46
 * Maps given section-aligned @paddr to @vaddr using enough number
47
 * of section-units to fulfill @size in sections. Note this overwrites
48
 * a mapping if same virtual address was already mapped.
49
 */
50
void __add_section_mapping_init(unsigned int paddr,
51
                                unsigned int vaddr,
52
                                unsigned int size,
53
                                unsigned int flags)
54
{
55
        pte_t *ppte;
56
        unsigned int l1_ptab;
57
        unsigned int l1_offset;
58
 
59
        /* 1st level page table address */
60
        l1_ptab = virt_to_phys(&init_pgd);
61
 
62
        /* Get the section offset for this vaddr */
63
        l1_offset = (vaddr >> 18) & 0x3FFC;
64
 
65
        /* The beginning entry for mapping */
66
        ppte = (unsigned int *)(l1_ptab + l1_offset);
67
        for(int i = 0; i < size; i++) {
68
                *ppte = 0;                       /* Clear out old value */
69
                *ppte |= paddr;                 /* Assign physical address */
70
                *ppte |= PGD_TYPE_SECTION;      /* Assign translation type */
71
                /* Domain is 0, therefore no writes. */
72
                /* Only kernel access allowed */
73
                *ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
74
                /* Cacheability/Bufferability flags */
75
                *ppte |= flags;
76
                ppte++;                         /* Next section entry */
77
                paddr += ARM_SECTION_SIZE;      /* Next physical section */
78
        }
79
        return;
80
}
81
 
82
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
83
                              unsigned int size, unsigned int flags)
84
{
85
        unsigned int psection;
86
        unsigned int vsection;
87
 
88
        /* Align each address to the pages they reside in */
89
        psection = paddr & ~ARM_SECTION_MASK;
90
        vsection = vaddr & ~ARM_SECTION_MASK;
91
 
92
        if(size == 0)
93
                return;
94
 
95
        __add_section_mapping_init(psection, vsection, size, flags);
96
 
97
        return;
98
}
99
 
100
/* TODO: Make sure to flush tlb entry and caches */
101
void __add_mapping(unsigned int paddr, unsigned int vaddr,
102
                   unsigned int flags, pmd_table_t *pmd)
103
{
104
        unsigned int pmd_i = PMD_INDEX(vaddr);
105
        pmd->entry[pmd_i] = paddr;
106
        pmd->entry[pmd_i] |= PMD_TYPE_SMALL;       /* Small page type */
107
        pmd->entry[pmd_i] |= flags;
108
 
109
        /* TODO: Is both required? Investigate */
110
 
111
        /* TEST:
112
         * I think cleaning or invalidating the cache is not required,
113
         * because the entries in the cache aren't for the new mapping anyway.
114
         * It's required if a mapping is removed, but not when newly added.
115
         */
116
        arm_clean_invalidate_cache();
117
 
118
        /* TEST: tlb must be flushed because a new mapping is present in page
119
         * tables, and tlb is inconsistent with the page tables */
120
        arm_invalidate_tlb();
121
}
122
 
123
/* Return whether a pmd associated with @vaddr is mapped on a pgd or not. */
124
pmd_table_t *pmd_exists(pgd_table_t *pgd, unsigned long vaddr)
125
{
126
        unsigned int pgd_i = PGD_INDEX(vaddr);
127
 
128
        /* Return true if non-zero pgd entry */
129
        switch (pgd->entry[pgd_i] & PGD_TYPE_MASK) {
130
                case PGD_TYPE_COARSE:
131
                        return (pmd_table_t *)
132
                               phys_to_virt((pgd->entry[pgd_i] &
133
                                            PGD_COARSE_ALIGN_MASK));
134
                        break;
135
 
136
                case PGD_TYPE_FAULT:
137
                        return 0;
138
                        break;
139
 
140
                case PGD_TYPE_SECTION:
141
                        dprintk("Warning, a section is already mapped "
142
                                "where a coarse page mapping is attempted:",
143
                                (u32)(pgd->entry[pgd_i]
144
                                      & PGD_SECTION_ALIGN_MASK));
145
                                BUG();
146
                        break;
147
 
148
                case PGD_TYPE_FINE:
149
                        dprintk("Warning, a fine page table is already mapped "
150
                                "where a coarse page mapping is attempted:",
151
                                (u32)(pgd->entry[pgd_i]
152
                                      & PGD_FINE_ALIGN_MASK));
153
                        printk("Fine tables are unsupported. ");
154
                        printk("What is this doing here?");
155
                        BUG();
156
                        break;
157
 
158
                default:
159
                        dprintk("Unrecognised pmd type @ pgd index:", pgd_i);
160
                        BUG();
161
                        break;
162
        }
163
        return 0;
164
}
165
 
166
/* Convert a virtual address to a pte if it exists in the page tables. */
167
pte_t virt_to_pte_from_pgd(unsigned long virtual, pgd_table_t *pgd)
168
{
169
        pmd_table_t *pmd = pmd_exists(pgd, virtual);
170
 
171
        if (pmd)
172
                return (pte_t)pmd->entry[PMD_INDEX(virtual)];
173
        else
174
                return (pte_t)0;
175
}
176
 
177
/* Convert a virtual address to a pte if it exists in the page tables. */
178
pte_t virt_to_pte(unsigned long virtual)
179
{
180
        return virt_to_pte_from_pgd(virtual, TASK_PGD(current));
181
}
182
 
183
unsigned long virt_to_phys_by_pgd(unsigned long vaddr, pgd_table_t *pgd)
184
{
185
        pte_t pte = virt_to_pte_from_pgd(vaddr, pgd);
186
        return pte & ~PAGE_MASK;
187
}
188
 
189
unsigned long virt_to_phys_by_task(unsigned long vaddr, struct ktcb *task)
190
{
191
        return virt_to_phys_by_pgd(vaddr, TASK_PGD(task));
192
}
193
 
194
void attach_pmd(pgd_table_t *pgd, pmd_table_t *pmd, unsigned int vaddr)
195
{
196
        u32 pgd_i = PGD_INDEX(vaddr);
197
        u32 pmd_phys = virt_to_phys(pmd);
198
 
199
        /* Domain is 0, therefore no writes. */
200
        pgd->entry[pgd_i] = (pgd_t)pmd_phys;
201
        pgd->entry[pgd_i] |= PGD_TYPE_COARSE;
202
}
203
 
204
/*
205
 * Same as normal mapping but with some boot tweaks.
206
 */
207
void add_boot_mapping(unsigned int paddr, unsigned int vaddr,
208
                      unsigned int size, unsigned int flags)
209
{
210
        pmd_table_t *pmd;
211
        pgd_table_t *pgd = &init_pgd;
212
        unsigned int numpages = (size >> PAGE_BITS);
213
 
214
        if (size < PAGE_SIZE) {
215
                printascii("Error: Mapping size must be in bytes not pages.\n");
216
                while(1);
217
        }
218
        if (size & PAGE_MASK)
219
                numpages++;
220
 
221
        /* Convert generic map flags to pagetable-specific */
222
        BUG_ON(!(flags = space_flags_to_ptflags(flags)));
223
 
224
        /* Map all consecutive pages that cover given size */
225
        for (int i = 0; i < numpages; i++) {
226
                /* Check if another mapping already has a pmd attached. */
227
                pmd = pmd_exists(pgd, vaddr);
228
                if (!pmd) {
229
                        /*
230
                         * If this is the first vaddr in
231
                         * this pmd, allocate new pmd
232
                         */
233
                        pmd = alloc_boot_pmd();
234
 
235
                        /* Attach pmd to its entry in pgd */
236
                        attach_pmd(pgd, pmd, vaddr);
237
                }
238
 
239
                /* Attach paddr to this pmd */
240
                __add_mapping(page_align(paddr),
241
                              page_align(vaddr), flags, pmd);
242
 
243
                /* Go to the next page to be mapped */
244
                paddr += PAGE_SIZE;
245
                vaddr += PAGE_SIZE;
246
        }
247
}
248
 
249
/*
250
 * Maps @paddr to @vaddr, covering @size bytes also allocates new pmd if
251
 * necessary. This flavor explicitly supplies the pgd to modify. This is useful
252
 * when modifying userspace of processes that are not currently running. (Only
253
 * makes sense for userspace mappings since kernel mappings are common.)
254
 */
255
void add_mapping_pgd(unsigned int paddr, unsigned int vaddr,
256
                     unsigned int size, unsigned int flags,
257
                     pgd_table_t *pgd)
258
{
259
        pmd_table_t *pmd;
260
        unsigned int numpages = (size >> PAGE_BITS);
261
 
262
 
263
        if (size < PAGE_SIZE) {
264
                printascii("Error: Mapping size must be in bytes not pages.\n");
265
                while(1);
266
        }
267
        if (size & PAGE_MASK)
268
                numpages++;
269
 
270
        /* Convert generic map flags to pagetable-specific */
271
        BUG_ON(!(flags = space_flags_to_ptflags(flags)));
272
 
273
        /* Map all consecutive pages that cover given size */
274
        for (int i = 0; i < numpages; i++) {
275
                /* Check if another mapping already has a pmd attached. */
276
                pmd = pmd_exists(pgd, vaddr);
277
                if (!pmd) {
278
                        /*
279
                         * If this is the first vaddr in
280
                         * this pmd, allocate new pmd
281
                         */
282
                        pmd = alloc_pmd();
283
 
284
                        /* Attach pmd to its entry in pgd */
285
                        attach_pmd(pgd, pmd, vaddr);
286
                }
287
 
288
                /* Attach paddr to this pmd */
289
                __add_mapping(page_align(paddr),
290
                              page_align(vaddr), flags, pmd);
291
 
292
                /* Go to the next page to be mapped */
293
                paddr += PAGE_SIZE;
294
                vaddr += PAGE_SIZE;
295
        }
296
}
297
 
298
void add_mapping(unsigned int paddr, unsigned int vaddr,
299
                 unsigned int size, unsigned int flags)
300
{
301
        add_mapping_pgd(paddr, vaddr, size, flags, TASK_PGD(current));
302
}
303
 
304
/*
305
 * Checks if a virtual address range has same or more permissive
306
 * flags than the given ones, returns 0 if not, and 1 if OK.
307
 */
308
int check_mapping_pgd(unsigned long vaddr, unsigned long size,
309
                      unsigned int flags, pgd_table_t *pgd)
310
{
311
        unsigned int npages = __pfn(align_up(size, PAGE_SIZE));
312
        pte_t pte;
313
 
314
        /* Convert generic map flags to pagetable-specific */
315
        BUG_ON(!(flags = space_flags_to_ptflags(flags)));
316
 
317
        for (int i = 0; i < npages; i++) {
318
                pte = virt_to_pte_from_pgd(vaddr + i * PAGE_SIZE, pgd);
319
 
320
                /* Check if pte perms are equal or gt given flags */
321
                if ((pte & PTE_PROT_MASK) >= (flags & PTE_PROT_MASK))
322
                        continue;
323
                else
324
                        return 0;
325
        }
326
 
327
        return 1;
328
}
329
 
330
int check_mapping(unsigned long vaddr, unsigned long size,
331
                  unsigned int flags)
332
{
333
        return check_mapping_pgd(vaddr, size, flags, TASK_PGD(current));
334
}
335
 
336
/* FIXME: Empty PMDs should be returned here !!! */
337
int __remove_mapping(pmd_table_t *pmd, unsigned long vaddr)
338
{
339
        pmd_t pmd_i = PMD_INDEX(vaddr);
340
        int ret;
341
 
342
        switch (pmd->entry[pmd_i] & PMD_TYPE_MASK) {
343
                case PMD_TYPE_FAULT:
344
                        ret = -ENOENT;
345
                        break;
346
                case PMD_TYPE_LARGE:
347
                        pmd->entry[pmd_i] = 0;
348
                        pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
349
                        ret = 0;
350
                        break;
351
                case PMD_TYPE_SMALL:
352
                        pmd->entry[pmd_i] = 0;
353
                        pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
354
                        ret = 0;
355
                        break;
356
                default:
357
                        printk("Unknown page mapping in pmd. Assuming bug.\n");
358
                        BUG();
359
        }
360
        return ret;
361
}
362
 
363
/*
364
 * Tell if a pgd index is a common kernel index. This is used to distinguish
365
 * common kernel entries in a pgd, when copying page tables.
366
 */
367
int is_kern_pgdi(int i)
368
{
369
        if ((i >= PGD_INDEX(KERNEL_AREA_START) && i < PGD_INDEX(KERNEL_AREA_END)) ||
370
            (i >= PGD_INDEX(IO_AREA_START) && i < PGD_INDEX(IO_AREA_END)) ||
371
            (i == PGD_INDEX(USER_KIP_PAGE)) ||
372
            (i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
373
            (i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
374
            (i == PGD_INDEX(USERSPACE_UART_BASE)))
375
                return 1;
376
        else
377
                return 0;
378
}
379
 
380
/*
381
 * Removes all userspace mappings from a pgd. Frees any pmds that it
382
 * detects to be user pmds
383
 */
384
int remove_mapping_pgd_all_user(pgd_table_t *pgd)
385
{
386
        pmd_table_t *pmd;
387
 
388
        /* Traverse through all pgd entries */
389
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
390
 
391
                /* Detect a pgd entry that is not a kernel entry */
392
                if (!is_kern_pgdi(i)) {
393
 
394
                        /* Detect a pmd entry */
395
                        if (((pgd->entry[i] & PGD_TYPE_MASK)
396
                             == PGD_TYPE_COARSE)) {
397
 
398
                                /* Obtain the user pmd handle */
399
                                pmd = (pmd_table_t *)
400
                                      phys_to_virt((pgd->entry[i] &
401
                                                    PGD_COARSE_ALIGN_MASK));
402
                                /* Free it */
403
                                free_pmd(pmd);
404
                        }
405
 
406
                        /* Clear the pgd entry */
407
                        pgd->entry[i] = PGD_TYPE_FAULT;
408
                }
409
        }
410
 
411
        return 0;
412
}
413
 
414
int remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd)
415
{
416
        pgd_t pgd_i = PGD_INDEX(vaddr);
417
        pmd_table_t *pmd;
418
        pmd_t pmd_i;
419
        int ret;
420
 
421
        /*
422
         * Clean the cache to main memory before removing the mapping. Otherwise
423
         * entries in the cache for this mapping will cause tranlation faults
424
         * if they're cleaned to main memory after the mapping is removed.
425
         */
426
        arm_clean_invalidate_cache();
427
 
428
        /* TEST:
429
         * Can't think of a valid reason to flush tlbs here, but keeping it just
430
         * to be safe. REMOVE: Remove it if it's unnecessary.
431
         */
432
        arm_invalidate_tlb();
433
 
434
        /* Return true if non-zero pgd entry */
435
        switch (pgd->entry[pgd_i] & PGD_TYPE_MASK) {
436
                case PGD_TYPE_COARSE:
437
                        // printk("Removing coarse mapping @ 0x%x\n", vaddr);
438
                        pmd = (pmd_table_t *)
439
                              phys_to_virt((pgd->entry[pgd_i]
440
                                           & PGD_COARSE_ALIGN_MASK));
441
                        pmd_i = PMD_INDEX(vaddr);
442
                        ret = __remove_mapping(pmd, vaddr);
443
                        break;
444
 
445
                case PGD_TYPE_FAULT:
446
                        ret = -1;
447
                        break;
448
 
449
                case PGD_TYPE_SECTION:
450
                        printk("Removing section mapping for 0x%lx",
451
                               vaddr);
452
                        pgd->entry[pgd_i] = 0;
453
                        pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
454
                        ret = 0;
455
                        break;
456
 
457
                case PGD_TYPE_FINE:
458
                        printk("Table mapped is a fine page table.\n"
459
                               "Fine tables are unsupported. Assuming bug.\n");
460
                        BUG();
461
                        break;
462
 
463
                default:
464
                        dprintk("Unrecognised pmd type @ pgd index:", pgd_i);
465
                        printk("Assuming bug.\n");
466
                        BUG();
467
                        break;
468
        }
469
        /* The tlb must be invalidated here because it might have cached the
470
         * old translation for this mapping. */
471
        arm_invalidate_tlb();
472
 
473
        return ret;
474
}
475
 
476
int remove_mapping(unsigned long vaddr)
477
{
478
        return remove_mapping_pgd(vaddr, TASK_PGD(current));
479
}
480
 
481
int delete_page_tables(struct address_space *space)
482
{
483
        remove_mapping_pgd_all_user(space->pgd);
484
        free_pgd(space->pgd);
485
        return 0;
486
}
487
 
488
/*
489
 * Copies userspace entries of one task to another. In order to do that,
490
 * it allocates new pmds and copies the original values into new ones.
491
 */
492
int copy_user_tables(struct address_space *new, struct address_space *orig_space)
493
{
494
        pgd_table_t *to = new->pgd, *from = orig_space->pgd;
495
        pmd_table_t *pmd, *orig;
496
 
497
        /* Allocate and copy all pmds that will be exclusive to new task. */
498
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
499
                /* Detect a pmd entry that is not a kernel pmd? */
500
                if (!is_kern_pgdi(i) &&
501
                    ((from->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)) {
502
                        /* Allocate new pmd */
503
                        if (!(pmd = alloc_pmd()))
504
                                goto out_error;
505
 
506
                        /* Find original pmd */
507
                        orig = (pmd_table_t *)
508
                                phys_to_virt((from->entry[i] &
509
                                PGD_COARSE_ALIGN_MASK));
510
 
511
                        /* Copy original to new */
512
                        memcpy(pmd, orig, sizeof(pmd_table_t));
513
 
514
                        /* Replace original pmd entry in pgd with new */
515
                        to->entry[i] = (pgd_t)virt_to_phys(pmd);
516
                        to->entry[i] |= PGD_TYPE_COARSE;
517
                }
518
        }
519
 
520
        return 0;
521
 
522
out_error:
523
        /* Find all non-kernel pmds we have just allocated and free them */
524
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
525
                /* Non-kernel pmd that has just been allocated. */
526
                if (!is_kern_pgdi(i) &&
527
                    (to->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
528
                        /* Obtain the pmd handle */
529
                        pmd = (pmd_table_t *)
530
                              phys_to_virt((to->entry[i] &
531
                                            PGD_COARSE_ALIGN_MASK));
532
                        /* Free pmd  */
533
                        free_pmd(pmd);
534
                }
535
        }
536
        return -ENOMEM;
537
}
538
 
539
int pgd_count_pmds(pgd_table_t *pgd)
540
{
541
        int npmd = 0;
542
 
543
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
544
                if ((pgd->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)
545
                        npmd++;
546
        return npmd;
547
}
548
 
549
/*
550
 * Allocates and copies all levels of page tables from one task to another.
551
 * Useful when forking.
552
 *
553
 * The copied page tables end up having shared pmds for kernel entries
554
 * and private copies of same pmds for user entries.
555
 */
556
pgd_table_t *copy_page_tables(pgd_table_t *from)
557
{
558
        pmd_table_t *pmd, *orig;
559
        pgd_table_t *pgd;
560
 
561
        /* Allocate and copy pgd. This includes all kernel entries */
562
        if (!(pgd = alloc_pgd()))
563
                return PTR_ERR(-ENOMEM);
564
 
565
        /* First copy whole pgd entries */
566
        memcpy(pgd, from, sizeof(pgd_table_t));
567
 
568
        /* Allocate and copy all pmds that will be exclusive to new task. */
569
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
570
                /* Detect a pmd entry that is not a kernel pmd? */
571
                if (!is_kern_pgdi(i) &&
572
                    ((pgd->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)) {
573
                        /* Allocate new pmd */
574
                        if (!(pmd = alloc_pmd()))
575
                                goto out_error;
576
 
577
                        /* Find original pmd */
578
                        orig = (pmd_table_t *)
579
                                phys_to_virt((pgd->entry[i] &
580
                                PGD_COARSE_ALIGN_MASK));
581
 
582
                        /* Copy original to new */
583
                        memcpy(pmd, orig, sizeof(pmd_table_t));
584
 
585
                        /* Replace original pmd entry in pgd with new */
586
                        pgd->entry[i] = (pgd_t)virt_to_phys(pmd);
587
                        pgd->entry[i] |= PGD_TYPE_COARSE;
588
                }
589
        }
590
 
591
        return pgd;
592
 
593
out_error:
594
        /* Find all allocated non-kernel pmds and free them */
595
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
596
                /* Non-kernel pmd that has just been allocated. */
597
                if (!is_kern_pgdi(i) &&
598
                    (pgd->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
599
                        /* Obtain the pmd handle */
600
                        pmd = (pmd_table_t *)
601
                              phys_to_virt((pgd->entry[i] &
602
                                            PGD_COARSE_ALIGN_MASK));
603
                        /* Free pmd  */
604
                        free_pmd(pmd);
605
                }
606
        }
607
        /* Free the pgd */
608
        free_pgd(pgd);
609
        return PTR_ERR(-ENOMEM);
610
}
611
 
612
extern pmd_table_t *pmd_array;
613
 
614
/*
615
 * Jumps from boot pmd/pgd page tables to tables allocated from the cache.
616
 */
617
pgd_table_t *realloc_page_tables(void)
618
{
619
        pgd_table_t *pgd_new = alloc_pgd();
620
        pgd_table_t *pgd_old = &init_pgd;
621
        pmd_table_t *orig, *pmd;
622
 
623
        /* Copy whole pgd entries */
624
        memcpy(pgd_new, pgd_old, sizeof(pgd_table_t));
625
 
626
        /* Allocate and copy all pmds */
627
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
628
                /* Detect a pmd entry */
629
                if ((pgd_old->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
630
                        /* Allocate new pmd */
631
                        if (!(pmd = alloc_pmd())) {
632
                                printk("FATAL: PMD allocation "
633
                                       "failed during system initialization\n");
634
                                BUG();
635
                        }
636
 
637
                        /* Find original pmd */
638
                        orig = (pmd_table_t *)
639
                                phys_to_virt((pgd_old->entry[i] &
640
                                PGD_COARSE_ALIGN_MASK));
641
 
642
                        /* Copy original to new */
643
                        memcpy(pmd, orig, sizeof(pmd_table_t));
644
 
645
                        /* Replace original pmd entry in pgd with new */
646
                        pgd_new->entry[i] = (pgd_t)virt_to_phys(pmd);
647
                        pgd_new->entry[i] |= PGD_TYPE_COARSE;
648
                }
649
        }
650
 
651
        /* Switch the virtual memory system into new area */
652
        arm_clean_invalidate_cache();
653
        arm_drain_writebuffer();
654
        arm_invalidate_tlb();
655
        arm_set_ttb(virt_to_phys(pgd_new));
656
        arm_invalidate_tlb();
657
 
658
        printk("%s: Initial page tables moved from 0x%x to 0x%x physical\n",
659
               __KERNELNAME__, virt_to_phys(pgd_old),
660
               virt_to_phys(pgd_new));
661
 
662
        return pgd_new;
663
}
664
 
665
/*
666
 * Useful for upgrading to page-grained control over a section mapping:
667
 * Remaps a section mapping in pages. It allocates a pmd, (at all times because
668
 * there can't really be an already existing pmd for a section mapping) fills
669
 * in the page information, and origaces the direct section physical translation
670
 * with the address of the pmd. Flushes the caches/tlbs.
671
 */
672
void remap_as_pages(void *vstart, void *vend)
673
{
674
        unsigned long pstart = virt_to_phys(vstart);
675
        unsigned long pend = virt_to_phys(vend);
676
        unsigned long paddr = pstart;
677
        pgd_t pgd_i = PGD_INDEX(vstart);
678
        pmd_t pmd_i = PMD_INDEX(vstart);
679
        pgd_table_t *pgd = &init_pgd;
680
        pmd_table_t *pmd = alloc_boot_pmd();
681
        u32 pmd_phys = virt_to_phys(pmd);
682
        int numpages = __pfn(pend - pstart);
683
 
684
        /* Fill in the pmd first */
685
        for (int n = 0; n < numpages; n++) {
686
                pmd->entry[pmd_i + n] = paddr;
687
                pmd->entry[pmd_i + n] |= PMD_TYPE_SMALL; /* Small page type */
688
                pmd->entry[pmd_i + n] |= space_flags_to_ptflags(MAP_SVC_DEFAULT_FLAGS);
689
                paddr += PAGE_SIZE;
690
        }
691
 
692
        /* Fill in the type to produce a complete pmd translator information */
693
        pmd_phys |= PGD_TYPE_COARSE;
694
 
695
        /* Make sure memory is coherent first. */
696
        arm_clean_invalidate_cache();
697
        arm_invalidate_tlb();
698
 
699
        /* Replace the direct section physical address with pmd's address */
700
        pgd->entry[pgd_i] = (pgd_t)pmd_phys;
701
        printk("%s: Kernel area 0x%lx - 0x%lx remapped as %d pages\n", __KERNELNAME__,
702
               (unsigned long)vstart, (unsigned long)vend, numpages);
703
}
704
 
705
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
706
                         unsigned long start, unsigned long end)
707
{
708
        unsigned long start_i = PGD_INDEX(start);
709
        unsigned long end_i =  PGD_INDEX(end);
710
        unsigned long irange = (end_i != 0) ? (end_i - start_i)
711
                               : (PGD_ENTRY_TOTAL - start_i);
712
 
713
        memcpy(&to->entry[start_i], &from->entry[start_i],
714
               irange * sizeof(pgd_t));
715
}
716
 
717
 
718
/* Scheduler uses this to switch context */
719
void arch_hardware_flush(pgd_table_t *pgd)
720
{
721
        arm_clean_invalidate_cache();
722
        arm_invalidate_tlb();
723
        arm_set_ttb(virt_to_phys(pgd));
724
        arm_invalidate_tlb();
725
}
726
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.