OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [arch/] [arm/] [v5/] [mapping.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Copyright (C) 2007 Bahadir Balban
3
 */
4
#include <l4/lib/printk.h>
5
#include <l4/lib/mutex.h>
6
#include <l4/lib/string.h>
7
#include <l4/generic/scheduler.h>
8
#include <l4/generic/space.h>
9
#include <l4/generic/bootmem.h>
10
#include <l4/generic/resource.h>
11
#include <l4/generic/platform.h>
12
#include <l4/generic/debug.h>
13
#include <l4/api/errno.h>
14
#include INC_SUBARCH(mm.h)
15
#include INC_SUBARCH(mmu_ops.h)
16
#include INC_GLUE(memory.h)
17
#include INC_GLUE(mapping.h)
18
#include INC_GLUE(memlayout.h)
19
#include INC_ARCH(linker.h)
20
#include INC_ARCH(asm.h)
21
#include INC_API(kip.h)
22
#include INC_ARCH(io.h)
23
 
24
/*
25
 * Removes initial mappings needed for transition to virtual memory.
26
 * Used one-time only.
27
 */
28
void remove_section_mapping(unsigned long vaddr)
29
{
30
        pgd_table_t *pgd = &init_pgd;
31
        pmd_t pgd_i = PGD_INDEX(vaddr);
32
        if (!((pgd->entry[pgd_i] & PMD_TYPE_MASK)
33
              & PMD_TYPE_SECTION))
34
                while(1);
35
        pgd->entry[pgd_i] = 0;
36
        pgd->entry[pgd_i] |= PMD_TYPE_FAULT;
37
        arm_invalidate_tlb();
38
}
39
 
40
/*
41
 * Maps given section-aligned @paddr to @vaddr using enough number
42
 * of section-units to fulfill @size in sections. Note this overwrites
43
 * a mapping if same virtual address was already mapped.
44
 */
45
void __add_section_mapping_init(unsigned int paddr,
46
                                unsigned int vaddr,
47
                                unsigned int size,
48
                                unsigned int flags)
49
{
50
        pte_t *ppte;
51
        unsigned int l1_ptab;
52
        unsigned int l1_offset;
53
 
54
        /* 1st level page table address */
55
        l1_ptab = virt_to_phys(&init_pgd);
56
 
57
        /* Get the section offset for this vaddr */
58
        l1_offset = (vaddr >> 18) & 0x3FFC;
59
 
60
        /* The beginning entry for mapping */
61
        ppte = (unsigned int *)(l1_ptab + l1_offset);
62
        for(int i = 0; i < size; i++) {
63
                *ppte = 0;                       /* Clear out old value */
64
                *ppte |= paddr;                 /* Assign physical address */
65
                *ppte |= PMD_TYPE_SECTION;      /* Assign translation type */
66
                /* Domain is 0, therefore no writes. */
67
                /* Only kernel access allowed */
68
                *ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
69
                /* Cacheability/Bufferability flags */
70
                *ppte |= flags;
71
                ppte++;                         /* Next section entry */
72
                paddr += SECTION_SIZE;          /* Next physical section */
73
        }
74
        return;
75
}
76
 
77
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
78
                              unsigned int size, unsigned int flags)
79
{
80
        unsigned int psection;
81
        unsigned int vsection;
82
 
83
        /* Align each address to the pages they reside in */
84
        psection = paddr & ~SECTION_MASK;
85
        vsection = vaddr & ~SECTION_MASK;
86
 
87
        if (size == 0)
88
                return;
89
 
90
        __add_section_mapping_init(psection, vsection, size, flags);
91
 
92
        return;
93
}
94
 
95
void arch_prepare_pte(u32 paddr, u32 vaddr, unsigned int flags,
96
                      pte_t *ptep)
97
{
98
        /* They must be aligned at this stage */
99
        if (!is_page_aligned(paddr) || !is_page_aligned(vaddr)) {
100
                printk("address not aligned, phys address %x"
101
                       " virtual address %x\n", paddr, vaddr);
102
                BUG();
103
        }
104
 
105
        /*
106
         * NOTE: In v5, the flags converted from generic
107
         * by space_flags_to_ptflags() can be directly
108
         * written to the pte. No further conversion is needed.
109
         * Therefore this function doesn't do much on flags. In
110
         * contrast in ARMv7 the flags need an extra level of
111
         * processing.
112
         */
113
        if (flags == __MAP_FAULT)
114
                *ptep = paddr | flags | PTE_TYPE_FAULT;
115
        else
116
                *ptep = paddr | flags | PTE_TYPE_SMALL;
117
}
118
 
119
void arch_write_pte(pte_t *ptep, pte_t pte, u32 vaddr)
120
{
121
        /* FIXME:
122
         * Clean the dcache and invalidate the icache
123
         * for the old translation first?
124
         *
125
         * The dcache is virtual, therefore the data
126
         * in those entries should be cleaned first,
127
         * before the translation of that virtual
128
         * address is changed to a new physical address.
129
         *
130
         * Check that the entry was not faulty first.
131
         */
132
        arm_clean_invalidate_cache();
133
 
134
        *ptep = pte;
135
 
136
        /* FIXME: Fix this!
137
         * - Use vaddr to clean the dcache pte by MVA.
138
         * - Use mapped area to invalidate the icache
139
         * - Invalidate the tlb for mapped area
140
         */
141
        arm_clean_invalidate_cache();
142
        arm_invalidate_tlb();
143
}
144
 
145
 
146
void arch_prepare_write_pte(u32 paddr, u32 vaddr,
147
                            unsigned int flags, pte_t *ptep)
148
{
149
        pte_t pte = 0;
150
 
151
        /* They must be aligned at this stage */
152
        BUG_ON(!is_page_aligned(paddr));
153
        BUG_ON(!is_page_aligned(vaddr));
154
 
155
        arch_prepare_pte(paddr, vaddr, flags, &pte);
156
 
157
        arch_write_pte(ptep, pte, vaddr);
158
}
159
 
160
pmd_t *
161
arch_pick_pmd(pgd_table_t *pgd, unsigned long vaddr)
162
{
163
        return &pgd->entry[PGD_INDEX(vaddr)];
164
}
165
 
166
/*
167
 * v5 pmd writes
168
 */
169
void arch_write_pmd(pmd_t *pmd_entry, u32 pmd_phys, u32 vaddr)
170
{
171
        /* FIXME: Clean the dcache if there was a valid entry */
172
        *pmd_entry = (pmd_t)(pmd_phys | PMD_TYPE_PMD);
173
        arm_clean_invalidate_cache(); /*FIXME: Write these properly! */
174
        arm_invalidate_tlb();
175
}
176
 
177
 
178
int arch_check_pte_access_perms(pte_t pte, unsigned int flags)
179
{
180
        if ((pte & PTE_PROT_MASK) >= (flags & PTE_PROT_MASK))
181
                return 1;
182
        else
183
                return 0;
184
}
185
 
186
/*
187
 * Tell if a pgd index is a common kernel index.
188
 * This is used to distinguish common kernel entries
189
 * in a pgd, when copying page tables.
190
 */
191
int is_global_pgdi(int i)
192
{
193
        if ((i >= PGD_INDEX(KERNEL_AREA_START) &&
194
             i < PGD_INDEX(KERNEL_AREA_END)) ||
195
            (i >= PGD_INDEX(IO_AREA_START) &&
196
             i < PGD_INDEX(IO_AREA_END)) ||
197
            (i == PGD_INDEX(USER_KIP_PAGE)) ||
198
            (i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
199
            (i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
200
            (i == PGD_INDEX(USERSPACE_CONSOLE_VBASE)))
201
                return 1;
202
        else
203
                return 0;
204
}
205
 
206
extern pmd_table_t *pmd_array;
207
 
208
void remove_mapping_pgd_all_user(pgd_table_t *pgd)
209
{
210
        pmd_table_t *pmd;
211
 
212
        /* Traverse through all pgd entries. */
213
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
214
                if (!is_global_pgdi(i)) {
215
                        /* Detect a pmd entry */
216
                        if (((pgd->entry[i] & PMD_TYPE_MASK)
217
                             == PMD_TYPE_PMD)) {
218
 
219
                                /* Obtain the user pmd handle */
220
                                pmd = (pmd_table_t *)
221
                                      phys_to_virt((pgd->entry[i] &
222
                                                    PMD_ALIGN_MASK));
223
                                /* Free it */
224
                                free_pmd(pmd);
225
                        }
226
 
227
                        /* Clear the pgd entry */
228
                        pgd->entry[i] = PMD_TYPE_FAULT;
229
                }
230
        }
231
}
232
 
233
 
234
int pgd_count_boot_pmds()
235
{
236
        int npmd = 0;
237
        pgd_table_t *pgd = &init_pgd;
238
 
239
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
240
                if ((pgd->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD)
241
                        npmd++;
242
        return npmd;
243
}
244
 
245
 
246
/*
247
 * Jumps from boot pmd/pgd page tables to tables allocated from the cache.
248
 */
249
pgd_table_t *arch_realloc_page_tables(void)
250
{
251
        pgd_table_t *pgd_new = alloc_pgd();
252
        pgd_table_t *pgd_old = &init_pgd;
253
        pmd_table_t *orig, *pmd;
254
 
255
        /* Copy whole pgd entries */
256
        memcpy(pgd_new, pgd_old, sizeof(pgd_table_t));
257
 
258
        /* Allocate and copy all pmds */
259
        for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
260
                /* Detect a pmd entry */
261
                if ((pgd_old->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD) {
262
                        /* Allocate new pmd */
263
                        if (!(pmd = alloc_pmd())) {
264
                                printk("FATAL: PMD allocation "
265
                                       "failed during system initialization\n");
266
                                BUG();
267
                        }
268
 
269
                        /* Find original pmd */
270
                        orig = (pmd_table_t *)
271
                                phys_to_virt((pgd_old->entry[i] &
272
                                PMD_ALIGN_MASK));
273
 
274
                        /* Copy original to new */
275
                        memcpy(pmd, orig, sizeof(pmd_table_t));
276
 
277
                        /* Replace original pmd entry in pgd with new */
278
                        pgd_new->entry[i] = (pmd_t)virt_to_phys(pmd);
279
                        pgd_new->entry[i] |= PMD_TYPE_PMD;
280
                }
281
        }
282
 
283
        /* Switch the virtual memory system into new area */
284
        arm_clean_invalidate_cache();
285
        arm_drain_writebuffer();
286
        arm_invalidate_tlb();
287
        arm_set_ttb(virt_to_phys(pgd_new));
288
        arm_invalidate_tlb();
289
 
290
        printk("%s: Initial page tables moved from 0x%x to 0x%x physical\n",
291
               __KERNELNAME__, virt_to_phys(pgd_old),
292
               virt_to_phys(pgd_new));
293
 
294
        return pgd_new;
295
}
296
 
297
/*
298
 * Copies global kernel entries into another pgd. Even for
299
 * sub-pmd ranges the associated pmd entries are copied,
300
 * assuming any pmds copied are applicable to all tasks in
301
 * the system.
302
 */
303
void copy_pgd_global_by_vrange(pgd_table_t *to, pgd_table_t *from,
304
                               unsigned long start, unsigned long end)
305
{
306
        /* Extend sub-pmd ranges to their respective pmd boundaries */
307
        start = align(start, PMD_MAP_SIZE);
308
 
309
        if (end < start)
310
                end = 0;
311
 
312
        /* Aligning would overflow if mapping the last virtual pmd */
313
        if (end < align(~0, PMD_MAP_SIZE) ||
314
            start > end) /* end may have already overflown as input */
315
                end = align_up(end, PMD_MAP_SIZE);
316
        else
317
                end = 0;
318
 
319
        copy_pgds_by_vrange(to, from, start, end);
320
}
321
 
322
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
323
                         unsigned long start, unsigned long end)
324
{
325
        unsigned long start_i = PGD_INDEX(start);
326
        unsigned long end_i =  PGD_INDEX(end);
327
        unsigned long irange = (end_i != 0) ? (end_i - start_i)
328
                               : (PGD_ENTRY_TOTAL - start_i);
329
 
330
        memcpy(&to->entry[start_i], &from->entry[start_i],
331
               irange * sizeof(pmd_t));
332
}
333
 
334
void arch_copy_pgd_kernel_entries(pgd_table_t *to)
335
{
336
        pgd_table_t *from = TASK_PGD(current);
337
 
338
        copy_pgd_global_by_vrange(to, from, KERNEL_AREA_START,
339
                                  KERNEL_AREA_END);
340
        copy_pgd_global_by_vrange(to, from, IO_AREA_START, IO_AREA_END);
341
        copy_pgd_global_by_vrange(to, from, USER_KIP_PAGE,
342
                                  USER_KIP_PAGE + PAGE_SIZE);
343
        copy_pgd_global_by_vrange(to, from, ARM_HIGH_VECTOR,
344
                                  ARM_HIGH_VECTOR + PAGE_SIZE);
345
        copy_pgd_global_by_vrange(to, from, ARM_SYSCALL_VECTOR,
346
                                  ARM_SYSCALL_VECTOR + PAGE_SIZE);
347
 
348
        /* We temporarily map uart registers to every process */
349
        copy_pgd_global_by_vrange(to, from, USERSPACE_CONSOLE_VBASE,
350
                                  USERSPACE_CONSOLE_VBASE + PAGE_SIZE);
351
}
352
 
353
void arch_update_utcb(unsigned long utcb_address)
354
{
355
        /* Update the KIP pointer */
356
        kip.utcb = utcb_address;
357
}
358
 
359
/* Scheduler uses this to switch context */
360
void arch_space_switch(struct ktcb *to)
361
{
362
        pgd_table_t *pgd = TASK_PGD(to);
363
 
364
        system_account_space_switch();
365
 
366
        arm_clean_invalidate_cache();
367
        arm_invalidate_tlb();
368
        arm_set_ttb(virt_to_phys(pgd));
369
        arm_invalidate_tlb();
370
}
371
 
372
void idle_task(void)
373
{
374
        while(1) {
375
                /* Do maintenance */
376
                tcb_delete_zombies();
377
 
378
                schedule();
379
        }
380
}
381
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.