OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [arch/] [arm/] [v6/] [mapping.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Copyright (C) 2007 Bahadir Balban
3
 */
4
#include <l4/lib/printk.h>
5
#include <l4/lib/mutex.h>
6
#include <l4/lib/string.h>
7
#include <l4/generic/scheduler.h>
8
#include <l4/generic/space.h>
9
#include <l4/generic/bootmem.h>
10
#include <l4/generic/resource.h>
11
#include <l4/generic/platform.h>
12
#include <l4/api/errno.h>
13
#include INC_SUBARCH(mm.h)
14
#include INC_SUBARCH(mmu_ops.h)
15
#include INC_GLUE(memory.h)
16
#include INC_GLUE(mapping.h)
17
#include INC_GLUE(memlayout.h)
18
#include INC_ARCH(linker.h)
19
#include INC_ARCH(asm.h)
20
#include INC_API(kip.h)
21
#include INC_ARCH(io.h)
22
 
23
/*
24
 * Removes initial mappings needed for transition to virtual memory.
25
 * Used one-time only.
26
 */
27
void remove_section_mapping(unsigned long vaddr)
28
{
29
        pgd_table_t *pgd = &init_pgd;
30
        pmd_t pgd_i = PGD_INDEX(vaddr);
31
        if (!((pgd->entry[pgd_i] & PMD_TYPE_MASK)
32
              & PMD_TYPE_SECTION))
33
                while(1);
34
        pgd->entry[pgd_i] = 0;
35
        pgd->entry[pgd_i] |= PMD_TYPE_FAULT;
36
        arm_invalidate_tlb();
37
}
38
 
39
/*
40
 * Maps given section-aligned @paddr to @vaddr using enough number
41
 * of section-units to fulfill @size in sections. Note this overwrites
42
 * a mapping if same virtual address was already mapped.
43
 */
44
void __add_section_mapping_init(unsigned int paddr,
45
                                unsigned int vaddr,
46
                                unsigned int size,
47
                                unsigned int flags)
48
{
49
        pte_t *ppte;
50
        unsigned int l1_ptab;
51
        unsigned int l1_offset;
52
 
53
        /* 1st level page table address */
54
        l1_ptab = virt_to_phys(&init_pgd);
55
 
56
        /* Get the section offset for this vaddr */
57
        l1_offset = (vaddr >> 18) & 0x3FFC;
58
 
59
        /* The beginning entry for mapping */
60
        ppte = (unsigned int *)(l1_ptab + l1_offset);
61
        for(int i = 0; i < size; i++) {
62
                *ppte = 0;                       /* Clear out old value */
63
                *ppte |= paddr;                 /* Assign physical address */
64
                *ppte |= PMD_TYPE_SECTION;      /* Assign translation type */
65
                /* Domain is 0, therefore no writes. */
66
                /* Only kernel access allowed */
67
                *ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
68
                /* Cacheability/Bufferability flags */
69
                *ppte |= flags;
70
                ppte++;                         /* Next section entry */
71
                paddr += SECTION_SIZE;          /* Next physical section */
72
        }
73
        return;
74
}
75
 
76
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
77
                              unsigned int size, unsigned int flags)
78
{
79
        unsigned int psection;
80
        unsigned int vsection;
81
 
82
        /* Align each address to the pages they reside in */
83
        psection = paddr & ~SECTION_MASK;
84
        vsection = vaddr & ~SECTION_MASK;
85
 
86
        if (size == 0)
87
                return;
88
 
89
        __add_section_mapping_init(psection, vsection, size, flags);
90
 
91
        return;
92
}
93
 
94
void arch_prepare_pte(u32 paddr, u32 vaddr, unsigned int flags,
95
                      pte_t *ptep)
96
{
97
        /* They must be aligned at this stage */
98
        BUG_ON(!is_page_aligned(paddr));
99
        BUG_ON(!is_page_aligned(vaddr));
100
 
101
        /*
102
         * NOTE: In v5, the flags converted from generic
103
         * by space_flags_to_ptflags() can be directly
104
         * written to the pte. No further conversion is needed.
105
         * Therefore this function doesn't do much on flags. In
106
         * contrast in ARMv7 the flags need an extra level of
107
         * processing.
108
         */
109
        if (flags == __MAP_FAULT)
110
                *ptep = paddr | flags | PTE_TYPE_FAULT;
111
        else
112
                *ptep = paddr | flags | PTE_TYPE_SMALL;
113
}
114
 
115
void arch_write_pte(pte_t *ptep, pte_t pte, u32 vaddr)
116
{
117
        /* FIXME:
118
         * Clean the dcache and invalidate the icache
119
         * for the old translation first?
120
         *
121
         * The dcache is virtual, therefore the data
122
         * in those entries should be cleaned first,
123
         * before the translation of that virtual
124
         * address is changed to a new physical address.
125
         *
126
         * Check that the entry was not faulty first.
127
         */
128
        arm_clean_invalidate_cache();
129
 
130
        *ptep = pte;
131
 
132
        /* FIXME: Fix this!
133
         * - Use vaddr to clean the dcache pte by MVA.
134
         * - Use mapped area to invalidate the icache
135
         * - Invalidate the tlb for mapped area
136
         */
137
        arm_clean_invalidate_cache();
138
        arm_invalidate_tlb();
139
}
140
 
141
 
142
void arch_prepare_write_pte(u32 paddr, u32 vaddr,
143
                            unsigned int flags, pte_t *ptep)
144
{
145
        pte_t pte = 0;
146
 
147
        /* They must be aligned at this stage */
148
        BUG_ON(!is_page_aligned(paddr));
149
        BUG_ON(!is_page_aligned(vaddr));
150
 
151
        arch_prepare_pte(paddr, vaddr, flags, &pte);
152
 
153
        arch_write_pte(ptep, pte, vaddr);
154
}
155
 
156
pmd_t *
157
arch_pick_pmd(pgd_table_t *pgd, unsigned long vaddr)
158
{
159
        return &pgd->entry[PGD_INDEX(vaddr)];
160
}
161
 
162
/*
163
 * v5 pmd writes
164
 */
165
void arch_write_pmd(pmd_t *pmd_entry, u32 pmd_phys, u32 vaddr)
166
{
167
        /* FIXME: Clean the dcache if there was a valid entry */
168
        *pmd_entry = (pmd_t)(pmd_phys | PMD_TYPE_PMD);
169
        arm_clean_invalidate_cache(); /*FIXME: Write these properly! */
170
        arm_invalidate_tlb();
171
}
172
 
173
 
174
int arch_check_pte_access_perms(pte_t pte, unsigned int flags)
175
{
176
        if ((pte & PTE_PROT_MASK) >= (flags & PTE_PROT_MASK))
177
                return 1;
178
        else
179
                return 0;
180
}
181
 
182
/*
183
 * Tell if a pgd index is a common kernel index.
184
 * This is used to distinguish common kernel entries
185
 * in a pgd, when copying page tables.
186
 */
187
int is_global_pgdi(int i)
188
{
189
        if ((i >= PGD_INDEX(KERNEL_AREA_START) &&
190
             i < PGD_INDEX(KERNEL_AREA_END)) ||
191
            (i >= PGD_INDEX(IO_AREA_START) &&
192
             i < PGD_INDEX(IO_AREA_END)) ||
193
            (i == PGD_INDEX(USER_KIP_PAGE)) ||
194
            (i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
195
            (i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
196
            (i == PGD_INDEX(USERSPACE_CONSOLE_VBASE)))
197
                return 1;
198
        else
199
                return 0;
200
}
201
 
202
extern pmd_table_t *pmd_array;
203
 
204
void remove_mapping_pgd_all_user(pgd_table_t *pgd)
205
{
206
        pmd_table_t *pmd;
207
 
208
        /* Traverse through all pgd entries. */
209
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
210
                if (!is_global_pgdi(i)) {
211
                        /* Detect a pmd entry */
212
                        if (((pgd->entry[i] & PMD_TYPE_MASK)
213
                             == PMD_TYPE_PMD)) {
214
 
215
                                /* Obtain the user pmd handle */
216
                                pmd = (pmd_table_t *)
217
                                      phys_to_virt((pgd->entry[i] &
218
                                                    PMD_ALIGN_MASK));
219
                                /* Free it */
220
                                free_pmd(pmd);
221
                        }
222
 
223
                        /* Clear the pgd entry */
224
                        pgd->entry[i] = PMD_TYPE_FAULT;
225
                }
226
        }
227
}
228
 
229
 
230
int pgd_count_boot_pmds()
231
{
232
        int npmd = 0;
233
        pgd_table_t *pgd = &init_pgd;
234
 
235
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
236
                if ((pgd->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD)
237
                        npmd++;
238
        return npmd;
239
}
240
 
241
 
242
/*
243
 * Jumps from boot pmd/pgd page tables to tables allocated from the cache.
244
 */
245
pgd_table_t *arch_realloc_page_tables(void)
246
{
247
        pgd_table_t *pgd_new = alloc_pgd();
248
        pgd_table_t *pgd_old = &init_pgd;
249
        pmd_table_t *orig, *pmd;
250
 
251
        /* Copy whole pgd entries */
252
        memcpy(pgd_new, pgd_old, sizeof(pgd_table_t));
253
 
254
        /* Allocate and copy all pmds */
255
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
256
                /* Detect a pmd entry */
257
                if ((pgd_old->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD) {
258
                        /* Allocate new pmd */
259
                        if (!(pmd = alloc_pmd())) {
260
                                printk("FATAL: PMD allocation "
261
                                       "failed during system initialization\n");
262
                                BUG();
263
                        }
264
 
265
                        /* Find original pmd */
266
                        orig = (pmd_table_t *)
267
                                phys_to_virt((pgd_old->entry[i] &
268
                                PMD_ALIGN_MASK));
269
 
270
                        /* Copy original to new */
271
                        memcpy(pmd, orig, sizeof(pmd_table_t));
272
 
273
                        /* Replace original pmd entry in pgd with new */
274
                        pgd_new->entry[i] = (pmd_t)virt_to_phys(pmd);
275
                        pgd_new->entry[i] |= PMD_TYPE_PMD;
276
                }
277
        }
278
 
279
        /* Switch the virtual memory system into new area */
280
        arm_clean_invalidate_cache();
281
        arm_drain_writebuffer();
282
        arm_invalidate_tlb();
283
        arm_set_ttb(virt_to_phys(pgd_new));
284
        arm_invalidate_tlb();
285
 
286
        printk("%s: Initial page tables moved from 0x%x to 0x%x physical\n",
287
               __KERNELNAME__, virt_to_phys(pgd_old),
288
               virt_to_phys(pgd_new));
289
 
290
        return pgd_new;
291
}
292
 
293
/*
294
 * Copies global kernel entries into another pgd. Even for
295
 * sub-pmd ranges the associated pmd entries are copied,
296
 * assuming any pmds copied are applicable to all tasks in
297
 * the system.
298
 */
299
void copy_pgd_global_by_vrange(pgd_table_t *to, pgd_table_t *from,
300
                               unsigned long start, unsigned long end)
301
{
302
        /* Extend sub-pmd ranges to their respective pmd boundaries */
303
        start = align(start, PMD_MAP_SIZE);
304
 
305
        if (end < start)
306
                end = 0;
307
 
308
        /* Aligning would overflow if mapping the last virtual pmd */
309
        if (end < align(~0, PMD_MAP_SIZE) ||
310
            start > end) /* end may have already overflown as input */
311
                end = align_up(end, PMD_MAP_SIZE);
312
        else
313
                end = 0;
314
 
315
        copy_pgds_by_vrange(to, from, start, end);
316
}
317
 
318
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
319
                         unsigned long start, unsigned long end)
320
{
321
        unsigned long start_i = PGD_INDEX(start);
322
        unsigned long end_i =  PGD_INDEX(end);
323
        unsigned long irange = (end_i != 0) ? (end_i - start_i)
324
                               : (PGD_ENTRY_TOTAL - start_i);
325
 
326
        memcpy(&to->entry[start_i], &from->entry[start_i],
327
               irange * sizeof(pmd_t));
328
}
329
 
330
void arch_copy_pgd_kernel_entries(pgd_table_t *to)
331
{
332
        pgd_table_t *from = TASK_PGD(current);
333
 
334
        copy_pgd_global_by_vrange(to, from, KERNEL_AREA_START,
335
                                  KERNEL_AREA_END);
336
        copy_pgd_global_by_vrange(to, from, IO_AREA_START, IO_AREA_END);
337
        copy_pgd_global_by_vrange(to, from, USER_KIP_PAGE,
338
                                  USER_KIP_PAGE + PAGE_SIZE);
339
        copy_pgd_global_by_vrange(to, from, ARM_HIGH_VECTOR,
340
                                  ARM_HIGH_VECTOR + PAGE_SIZE);
341
        copy_pgd_global_by_vrange(to, from, ARM_SYSCALL_VECTOR,
342
                                  ARM_SYSCALL_VECTOR + PAGE_SIZE);
343
 
344
        /* We temporarily map uart registers to every process */
345
        copy_pgd_global_by_vrange(to, from, USERSPACE_CONSOLE_VBASE,
346
                                  USERSPACE_CONSOLE_VBASE + PAGE_SIZE);
347
}
348
 
349
/* Scheduler uses this to switch context */
350
void arch_space_switch(struct ktcb *to)
351
{
352
        pgd_table_t *pgd = TASK_PGD(to);
353
 
354
        arm_clean_invalidate_cache();
355
        arm_invalidate_tlb();
356
        arm_set_ttb(virt_to_phys(pgd));
357
        arm_invalidate_tlb();
358
}
359
 
360
void idle_task(void)
361
{
362
        printk("Idle task.\n");
363
 
364
        while(1);
365
}
366
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.