OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [arch/] [arm/] [mapping-common.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Low-level page table functions that are common
3
 * and abstracted between v5-v7 ARM architectures
4
 *
5
 * Copyright (C) 2007 - 2010 B Labs Ltd.
6
 * Written by Bahadir Balban
7
 */
8
 
9
#include INC_SUBARCH(mm.h)
10
#include INC_SUBARCH(mmu_ops.h)
11
#include INC_GLUE(memory.h)
12
#include INC_GLUE(memlayout.h)
13
#include INC_ARCH(linker.h)
14
#include INC_GLUE(mapping.h)
15
#include <l4/generic/platform.h>
16
#include <l4/api/errno.h>
17
#include <l4/lib/printk.h>
18
#include <l4/generic/tcb.h>
19
#include <l4/generic/bootmem.h>
20
#include <l4/generic/space.h>
21
 
22
/* Find out whether a pmd exists or not and return it */
23
pmd_table_t *pmd_exists(pgd_table_t *task_pgd, unsigned long vaddr)
24
{
25
        pmd_t *pmd = arch_pick_pmd(task_pgd, vaddr);
26
 
27
        /*
28
         * Check that it has a valid pmd
29
         * (i.e. not a fault, not a section)
30
         */
31
        if ((*pmd & PMD_TYPE_MASK) == PMD_TYPE_PMD)
32
                return (pmd_table_t *)
33
                       phys_to_virt(*pmd & PMD_ALIGN_MASK);
34
        else if ((*pmd & PMD_TYPE_MASK) == 0)
35
                return 0;
36
        else
37
                BUG(); /* Anything that's not a pmd or fault is bug */
38
        return 0;
39
}
40
 
41
/*
42
 * Convert virtual address to a pte from a task-specific pgd
43
 * FIXME: Remove this by using ptep version, leaving due to
44
 * too many things to test right now.
45
 */
46
pte_t virt_to_pte_from_pgd(pgd_table_t *task_pgd,
47
                           unsigned long virtual)
48
{
49
        pmd_table_t *pmd = pmd_exists(task_pgd, virtual);
50
 
51
        if (pmd)
52
                return (pte_t)pmd->entry[PMD_INDEX(virtual)];
53
        else
54
                return (pte_t)0;
55
}
56
 
57
/* Convert virtual address to a pte from a task-specific pgd */
58
pte_t *virt_to_ptep_from_pgd(pgd_table_t *task_pgd,
59
                             unsigned long virtual)
60
{
61
        pmd_table_t *pmd = pmd_exists(task_pgd, virtual);
62
 
63
        if (pmd)
64
                return (pte_t *)&pmd->entry[PMD_INDEX(virtual)];
65
        else
66
                return (pte_t *)0;
67
}
68
 
69
/*
70
 * Convert a virtual address to a pte if it
71
 * exists in the page tables.
72
 */
73
pte_t virt_to_pte(unsigned long virtual)
74
{
75
        return virt_to_pte_from_pgd(TASK_PGD(current), virtual);
76
}
77
 
78
pte_t *virt_to_ptep(unsigned long virtual)
79
{
80
        return virt_to_ptep_from_pgd(TASK_PGD(current), virtual);
81
}
82
 
83
unsigned long virt_to_phys_by_pgd(pgd_table_t *pgd, unsigned long vaddr)
84
{
85
        pte_t pte = virt_to_pte_from_pgd(pgd, vaddr);
86
        return pte & ~PAGE_MASK;
87
}
88
 
89
static inline unsigned long
90
virt_to_phys_by_task(struct ktcb *task, unsigned long vaddr)
91
{
92
        return virt_to_phys_by_pgd(TASK_PGD(task), vaddr);
93
}
94
 
95
/*
96
 * Attaches a pmd to either a task or the global pgd
97
 * depending on the virtual address passed.
98
 */
99
void attach_pmd(pgd_table_t *task_pgd, pmd_table_t *pmd_table,
100
                unsigned long vaddr)
101
{
102
        u32 pmd_phys = virt_to_phys(pmd_table);
103
        pmd_t *pmd;
104
 
105
        BUG_ON(!is_aligned(pmd_phys, PMD_SIZE));
106
 
107
        /*
108
         * Pick the right pmd from the right pgd.
109
         * It makes a difference if split tables are used.
110
         */
111
        pmd = arch_pick_pmd(task_pgd, vaddr);
112
 
113
        /* Write the pmd into hardware pgd */
114
        arch_write_pmd(pmd, pmd_phys, vaddr);
115
}
116
 
117
void add_mapping_pgd(unsigned long physical, unsigned long virtual,
118
                     unsigned int sz_bytes, unsigned int flags,
119
                     pgd_table_t *task_pgd)
120
{
121
        unsigned long npages = (sz_bytes >> PFN_SHIFT);
122
        pmd_table_t *pmd_table;
123
 
124
        if (sz_bytes < PAGE_SIZE) {
125
                print_early("Error: Mapping size less than PAGE_SIZE. "
126
                           "Mapping size is in bytes not pages.\n");
127
                BUG();
128
        }
129
 
130
        if (sz_bytes & PAGE_MASK)
131
                npages++;
132
 
133
        /* Convert generic map flags to arch specific flags */
134
        BUG_ON(!(flags = space_flags_to_ptflags(flags)));
135
 
136
        /* Map all pages that cover given size */
137
        for (int i = 0; i < npages; i++) {
138
                /* Check if a pmd was attached previously */
139
                if (!(pmd_table = pmd_exists(task_pgd, virtual))) {
140
 
141
                        /* First mapping in pmd, allocate it */
142
                        pmd_table = alloc_pmd();
143
 
144
                        /* Prepare the pte but don't sync */
145
                        arch_prepare_pte(physical, virtual, flags,
146
                        &pmd_table->entry[PMD_INDEX(virtual)]);
147
 
148
                        /* Attach pmd to its pgd and sync it */
149
                        attach_pmd(task_pgd, pmd_table, virtual);
150
                } else {
151
                        /* Prepare, write the pte and sync */
152
                        arch_prepare_write_pte(physical, virtual,
153
                        flags, &pmd_table->entry[PMD_INDEX(virtual)]);
154
                }
155
 
156
                /* Move on to the next page */
157
                physical += PAGE_SIZE;
158
                virtual += PAGE_SIZE;
159
        }
160
}
161
 
162
void add_boot_mapping(unsigned long physical, unsigned long virtual,
163
                     unsigned int sz_bytes, unsigned int flags)
164
{
165
        unsigned long npages = (sz_bytes >> PFN_SHIFT);
166
        pmd_table_t *pmd_table;
167
 
168
        if (sz_bytes < PAGE_SIZE) {
169
                print_early("Error: Mapping size less than PAGE_SIZE. "
170
                           "Mapping size should be in _bytes_ "
171
                           "not pages.\n");
172
                BUG();
173
        }
174
 
175
        if (sz_bytes & PAGE_MASK)
176
                npages++;
177
 
178
        /* Convert generic map flags to arch specific flags */
179
        BUG_ON(!(flags = space_flags_to_ptflags(flags)));
180
 
181
        /* Map all pages that cover given size */
182
        for (int i = 0; i < npages; i++) {
183
                /* Check if a pmd was attached previously */
184
                if (!(pmd_table = pmd_exists(&init_pgd, virtual))) {
185
 
186
                        /* First mapping in pmd, allocate it */
187
                        pmd_table = alloc_boot_pmd();
188
 
189
                        /* Prepare the pte but don't sync */
190
                        arch_prepare_pte(physical, virtual, flags,
191
                        &pmd_table->entry[PMD_INDEX(virtual)]);
192
 
193
                        /* Attach pmd to its pgd and sync it */
194
                        attach_pmd(&init_pgd, pmd_table, virtual);
195
                } else {
196
                        /* Prepare, write the pte and sync */
197
                        arch_prepare_write_pte(physical, virtual,
198
                        flags, &pmd_table->entry[PMD_INDEX(virtual)]);
199
                }
200
 
201
                /* Move on to the next page */
202
                physical += PAGE_SIZE;
203
                virtual += PAGE_SIZE;
204
        }
205
}
206
 
207
void add_mapping(unsigned long paddr, unsigned long vaddr,
208
                 unsigned int size, unsigned int flags)
209
{
210
        add_mapping_pgd(paddr, vaddr, size, flags, TASK_PGD(current));
211
}
212
 
213
/*
214
 * Checks if a virtual address range has same or more permissive
215
 * flags than the given ones, returns 0 if not, and 1 if OK.
216
 */
217
int check_mapping_pgd(unsigned long vaddr, unsigned long size,
218
                      unsigned int flags, pgd_table_t *pgd)
219
{
220
        unsigned int npages = __pfn(align_up(size, PAGE_SIZE));
221
        pte_t pte;
222
 
223
        /* Convert generic map flags to pagetable-specific */
224
        BUG_ON(!(flags = space_flags_to_ptflags(flags)));
225
 
226
        for (int i = 0; i < npages; i++) {
227
                pte = virt_to_pte_from_pgd(pgd, vaddr + i * PAGE_SIZE);
228
 
229
                /* Check if pte perms are equal or gt given flags */
230
                if (arch_check_pte_access_perms(pte, flags))
231
                        continue;
232
                else
233
                        return 0;
234
        }
235
 
236
        return 1;
237
}
238
 
239
int check_mapping(unsigned long vaddr, unsigned long size,
240
                  unsigned int flags)
241
{
242
        return check_mapping_pgd(vaddr, size, flags,
243
                                 TASK_PGD(current));
244
}
245
 
246
/*
247
 * This can be made common for v5/v7, keeping split/page table
248
 * and cache flush parts in arch-specific files.
249
 */
250
int remove_mapping_pgd(pgd_table_t *task_pgd, unsigned long vaddr)
251
{
252
        pmd_table_t *pmd_table;
253
        int pgd_i, pmd_i;
254
        pmd_t *pmd;
255
        unsigned int pmd_type, pte_type;
256
 
257
        vaddr = page_align(vaddr);
258
        pgd_i = PGD_INDEX(vaddr);
259
        pmd_i = PMD_INDEX(vaddr);
260
 
261
        /*
262
         * Get the right pgd's pmd according to whether
263
         * the address is global or task-specific.
264
         */
265
        pmd = arch_pick_pmd(task_pgd, vaddr);
266
 
267
        pmd_type = *pmd & PMD_TYPE_MASK;
268
 
269
        if (pmd_type == PMD_TYPE_FAULT)
270
                return -ENOMAP;
271
 
272
        /* Anything else must be a proper pmd */
273
        BUG_ON(pmd_type != PMD_TYPE_PMD);
274
 
275
        /* Get the 2nd level pmd table */
276
        pmd_table = (pmd_table_t *)
277
                    phys_to_virt((unsigned long)*pmd
278
                                 & PMD_ALIGN_MASK);
279
 
280
        /* Get the pte type already there */
281
        pte_type = pmd_table->entry[pmd_i] & PTE_TYPE_MASK;
282
 
283
        /* If it's a fault we're done */
284
        if (pte_type == PTE_TYPE_FAULT)
285
                return -ENOMAP;
286
        /* It must be a small pte if not fault */
287
        else if (pte_type != PTE_TYPE_SMALL)
288
                BUG();
289
 
290
        /* Write to pte, also syncing it as required by arch */
291
        arch_prepare_write_pte(0, vaddr,
292
                               space_flags_to_ptflags(MAP_FAULT),
293
                               (pte_t *)&pmd_table->entry[pmd_i]);
294
        return 0;
295
}
296
 
297
int remove_mapping(unsigned long vaddr)
298
{
299
        return remove_mapping_pgd(TASK_PGD(current), vaddr);
300
}
301
 
302
 
303
int delete_page_tables(struct address_space *space)
304
{
305
        remove_mapping_pgd_all_user(space->pgd);
306
        free_pgd(space->pgd);
307
        return 0;
308
}
309
 
310
/*
311
 * Copies userspace entries of one task to another.
312
 * In order to do that, it allocates new pmds and
313
 * copies the original values into new ones.
314
 */
315
int copy_user_tables(struct address_space *new,
316
                     struct address_space *orig_space)
317
{
318
        pgd_table_t *to = new->pgd, *from = orig_space->pgd;
319
        pmd_table_t *pmd, *orig;
320
 
321
        /* Allocate and copy all pmds that will be exclusive to new task. */
322
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
323
                /* Detect a pmd entry that is not a global pmd? */
324
                if (!is_global_pgdi(i) &&
325
                    ((from->entry[i] & PMD_TYPE_MASK)
326
                     == PMD_TYPE_PMD)) {
327
                        /* Allocate new pmd */
328
                        if (!(pmd = alloc_pmd()))
329
                                goto out_error;
330
 
331
                        /* Find original pmd */
332
                        orig = (pmd_table_t *)
333
                                phys_to_virt((from->entry[i] &
334
                                PMD_ALIGN_MASK));
335
 
336
                        /* Copy original to new */
337
                        memcpy(pmd, orig, sizeof(pmd_table_t));
338
 
339
                        /* Replace original pmd entry in pgd with new */
340
                        to->entry[i] = (pmd_t)(virt_to_phys(pmd)
341
                                               | PMD_TYPE_PMD);
342
                }
343
        }
344
 
345
        /* Just in case the new table is written to any ttbr
346
         * after here, make sure all writes on it are complete. */
347
        dmb();
348
 
349
        return 0;
350
 
351
out_error:
352
        /* Find all non-kernel pmds we have just allocated and free them */
353
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
354
                /* Non-kernel pmd that has just been allocated. */
355
                if (!is_global_pgdi(i) &&
356
                    (to->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD) {
357
                        /* Obtain the pmd handle */
358
                        pmd = (pmd_table_t *)
359
                              phys_to_virt((to->entry[i] &
360
                                            PMD_ALIGN_MASK));
361
                        /* Free pmd  */
362
                        free_pmd(pmd);
363
                }
364
        }
365
        return -ENOMEM;
366
}
367
 
368
 
369
 
370
/*
371
 * Useful for upgrading to page-grained control
372
 * over the kernel section mapping.
373
 *
374
 * Remaps a section mapping in pages. It allocates a pmd,
375
 * fills in the page information, and replaces the direct
376
 * section physical translation with the address of the
377
 * pmd. Syncs the caches.
378
 *
379
 * NOTE: Assumes only a single pmd is enough.
380
 */
381
void remap_as_pages(void *vstart, void *vend)
382
{
383
        unsigned long pstart = virt_to_phys(vstart);
384
        unsigned long pend = virt_to_phys(vend);
385
        unsigned long paddr = pstart;
386
        unsigned long vaddr = (unsigned long)vstart;
387
        int pmd_i = PMD_INDEX(vstart);
388
        pgd_table_t *pgd = &init_pgd;
389
        pmd_table_t *pmd = alloc_boot_pmd();
390
        int npages = __pfn(pend - pstart);
391
        int map_flags;
392
 
393
        /* Map the whole kernel into the pmd first */
394
        for (int n = 0; n < npages; n++) {
395
                /* Map text pages as executable */
396
                if ((vaddr >= (unsigned long)_start_text &&
397
                     vaddr < page_align_up(_end_text)) ||
398
                    (vaddr >= (unsigned long)_start_vectors &&
399
                     vaddr < page_align_up(_end_vectors)))
400
                        map_flags = MAP_KERN_RWX;
401
                else
402
                        map_flags = MAP_KERN_RW;
403
 
404
                arch_prepare_pte(paddr, vaddr,
405
                                 space_flags_to_ptflags(map_flags),
406
                                 &pmd->entry[pmd_i + n]);
407
                paddr += PAGE_SIZE;
408
                vaddr += PAGE_SIZE;
409
        }
410
 
411
        attach_pmd(pgd, pmd, (unsigned long)vstart);
412
 
413
        printk("%s: Kernel area 0x%lx - 0x%lx "
414
               "remapped as %d pages\n", __KERNELNAME__,
415
               (unsigned long)vstart, (unsigned long)vend,
416
               npages);
417
}
418
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.