| 1 |
2 |
drasko |
/*
|
| 2 |
|
|
* ARM v5 specific init routines
|
| 3 |
|
|
*
|
| 4 |
|
|
* Copyright (C) 2007 - 2010 B Labs Ltd.
|
| 5 |
|
|
*/
|
| 6 |
|
|
|
| 7 |
|
|
#include <l4/generic/tcb.h>
|
| 8 |
|
|
#include <l4/generic/scheduler.h>
|
| 9 |
|
|
#include <l4/generic/platform.h>
|
| 10 |
|
|
#include INC_SUBARCH(mm.h)
|
| 11 |
|
|
#include INC_SUBARCH(mmu_ops.h)
|
| 12 |
|
|
#include INC_GLUE(memory.h)
|
| 13 |
|
|
#include INC_GLUE(mapping.h)
|
| 14 |
|
|
#include INC_ARCH(linker.h)
|
| 15 |
|
|
|
| 16 |
|
|
SECTION(".data.pgd") ALIGN(PGD_SIZE) pgd_table_t init_pgd;
|
| 17 |
|
|
struct address_space init_space;
|
| 18 |
|
|
|
| 19 |
|
|
void system_identify(void)
|
| 20 |
|
|
{
|
| 21 |
|
|
|
| 22 |
|
|
}
|
| 23 |
|
|
|
| 24 |
|
|
|
| 25 |
|
|
void jump(struct ktcb *task)
|
| 26 |
|
|
{
|
| 27 |
|
|
__asm__ __volatile__ (
|
| 28 |
|
|
"mov lr, %0\n" /* Load pointer to context area */
|
| 29 |
|
|
"ldr r0, [lr]\n" /* Load spsr value to r0 */
|
| 30 |
|
|
"msr spsr, r0\n" /* Set SPSR as ARM_MODE_USR */
|
| 31 |
|
|
"add sp, lr, %1\n" /* Reset SVC stack */
|
| 32 |
|
|
"sub sp, sp, %2\n" /* Align to stack alignment */
|
| 33 |
|
|
"ldmib lr, {r0-r14}^\n" /* Load all USR registers */
|
| 34 |
|
|
|
| 35 |
|
|
"nop \n" /* Spec says dont touch banked registers
|
| 36 |
|
|
* right after LDM {no-pc}^ for one instruction */
|
| 37 |
|
|
"add lr, lr, #64\n" /* Manually move to PC location. */
|
| 38 |
|
|
"ldr lr, [lr]\n" /* Load the PC_USR to LR */
|
| 39 |
|
|
"movs pc, lr\n" /* Jump to userspace, also switching SPSR/CPSR */
|
| 40 |
|
|
:
|
| 41 |
|
|
: "r" (task), "r" (PAGE_SIZE), "r" (STACK_ALIGNMENT)
|
| 42 |
|
|
);
|
| 43 |
|
|
}
|
| 44 |
|
|
|
| 45 |
|
|
void switch_to_user(struct ktcb *task)
|
| 46 |
|
|
{
|
| 47 |
|
|
arm_clean_invalidate_cache();
|
| 48 |
|
|
arm_invalidate_tlb();
|
| 49 |
|
|
arm_set_ttb(virt_to_phys(TASK_PGD(task)));
|
| 50 |
|
|
arm_invalidate_tlb();
|
| 51 |
|
|
jump(task);
|
| 52 |
|
|
}
|
| 53 |
|
|
|
| 54 |
|
|
/* Maps the early memory regions needed to bootstrap the system */
|
| 55 |
|
|
void init_kernel_mappings(void)
|
| 56 |
|
|
{
|
| 57 |
|
|
//memset((void *)virt_to_phys(&init_pgd), 0, sizeof(pgd_table_t));
|
| 58 |
|
|
|
| 59 |
|
|
/* Map kernel area to its virtual region */
|
| 60 |
|
|
add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
|
| 61 |
|
|
align((unsigned int)_start_text, SZ_1MB), 1,
|
| 62 |
|
|
cacheable | bufferable);
|
| 63 |
|
|
|
| 64 |
|
|
/* Map kernel one-to-one to its physical region */
|
| 65 |
|
|
add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
|
| 66 |
|
|
align(virt_to_phys(_start_text), SZ_1MB),
|
| 67 |
|
|
1, 0);
|
| 68 |
|
|
}
|
| 69 |
|
|
|
| 70 |
|
|
/*
|
| 71 |
|
|
* Enable virtual memory using kernel's pgd
|
| 72 |
|
|
* and continue execution on virtual addresses.
|
| 73 |
|
|
*/
|
| 74 |
|
|
void start_virtual_memory()
|
| 75 |
|
|
{
|
| 76 |
|
|
/*
|
| 77 |
|
|
* TTB must be 16K aligned. This is because first level tables are
|
| 78 |
|
|
* sized 16K.
|
| 79 |
|
|
*/
|
| 80 |
|
|
if ((unsigned int)&init_pgd & 0x3FFF)
|
| 81 |
|
|
dprintk("kspace not properly aligned for ttb:",
|
| 82 |
|
|
(u32)&init_pgd);
|
| 83 |
|
|
// memset((void *)&kspace, 0, sizeof(pgd_table_t));
|
| 84 |
|
|
arm_set_ttb(virt_to_phys(&init_pgd));
|
| 85 |
|
|
|
| 86 |
|
|
/*
|
| 87 |
|
|
* This sets all 16 domains to zero and domain 0 to 1. The outcome
|
| 88 |
|
|
* is that page table access permissions are in effect for domain 0.
|
| 89 |
|
|
* All other domains have no access whatsoever.
|
| 90 |
|
|
*/
|
| 91 |
|
|
arm_set_domain(1);
|
| 92 |
|
|
|
| 93 |
|
|
/* Enable everything before mmu permissions are in place */
|
| 94 |
|
|
arm_enable_caches();
|
| 95 |
|
|
arm_enable_wbuffer();
|
| 96 |
|
|
|
| 97 |
|
|
arm_enable_high_vectors();
|
| 98 |
|
|
|
| 99 |
|
|
/*
|
| 100 |
|
|
* Leave the past behind. Tlbs are invalidated, write buffer is drained.
|
| 101 |
|
|
* The whole of I + D caches are invalidated unconditionally. This is
|
| 102 |
|
|
* important to ensure that the cache is free of previously loaded
|
| 103 |
|
|
* values. Otherwise unpredictable data aborts may occur at arbitrary
|
| 104 |
|
|
* times, each time a load/store operation hits one of the invalid
|
| 105 |
|
|
* entries and those entries are cleaned to main memory.
|
| 106 |
|
|
*/
|
| 107 |
|
|
arm_invalidate_cache();
|
| 108 |
|
|
arm_drain_writebuffer();
|
| 109 |
|
|
arm_invalidate_tlb();
|
| 110 |
|
|
arm_enable_mmu();
|
| 111 |
|
|
|
| 112 |
|
|
/* Jump to virtual memory addresses */
|
| 113 |
|
|
__asm__ __volatile__ (
|
| 114 |
|
|
"add sp, sp, %0 \n" /* Update stack pointer */
|
| 115 |
|
|
"add fp, fp, %0 \n" /* Update frame pointer */
|
| 116 |
|
|
/* On the next instruction below, r0 gets
|
| 117 |
|
|
* current PC + KOFFSET + 2 instructions after itself. */
|
| 118 |
|
|
"add r0, pc, %0 \n"
|
| 119 |
|
|
/* Special symbol that is extracted and included in the loader.
|
| 120 |
|
|
* Debuggers can break on it to load the virtual symbol table */
|
| 121 |
|
|
".global break_virtual;\n"
|
| 122 |
|
|
"break_virtual:\n"
|
| 123 |
|
|
"mov pc, r0 \n" /* (r0 has next instruction) */
|
| 124 |
|
|
:
|
| 125 |
|
|
: "r" (KERNEL_OFFSET)
|
| 126 |
|
|
: "r0"
|
| 127 |
|
|
);
|
| 128 |
|
|
|
| 129 |
|
|
/*
|
| 130 |
|
|
* Restore link register (LR) for this function.
|
| 131 |
|
|
*
|
| 132 |
|
|
* NOTE: LR values are pushed onto the stack at each function call,
|
| 133 |
|
|
* which means the restored return values will be physical for all
|
| 134 |
|
|
* functions in the call stack except this function. So the caller
|
| 135 |
|
|
* of this function must never return but initiate scheduling etc.
|
| 136 |
|
|
*/
|
| 137 |
|
|
__asm__ __volatile__ (
|
| 138 |
|
|
"add %0, %0, %1 \n"
|
| 139 |
|
|
"mov pc, %0 \n"
|
| 140 |
|
|
:: "r" (__builtin_return_address(0)), "r" (KERNEL_OFFSET)
|
| 141 |
|
|
);
|
| 142 |
|
|
|
| 143 |
|
|
/* should never come here */
|
| 144 |
|
|
while(1);
|
| 145 |
|
|
}
|
| 146 |
|
|
|