OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [arch/] [arm/] [v6/] [init.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * ARM v5 specific init routines
3
 *
4
 * Copyright (C) 2007 - 2010 B Labs Ltd.
5
 */
6
 
7
#include <l4/generic/tcb.h>
8
#include <l4/generic/scheduler.h>
9
#include <l4/generic/platform.h>
10
#include INC_SUBARCH(mm.h)
11
#include INC_SUBARCH(mmu_ops.h)
12
#include INC_GLUE(memory.h)
13
#include INC_GLUE(mapping.h)
14
#include INC_ARCH(linker.h)
15
 
16
SECTION(".init.pgd") ALIGN(PGD_SIZE) pgd_table_t init_pgd;
17
 
18
 
19
void jump(struct ktcb *task)
20
{
21
        __asm__ __volatile__ (
22
                "mov    lr,     %0\n"   /* Load pointer to context area */
23
                "ldr    r0,     [lr]\n" /* Load spsr value to r0 */
24
                "msr    spsr,   r0\n"   /* Set SPSR as ARM_MODE_USR */
25
                "add    sp, lr, %1\n"   /* Reset SVC stack */
26
                "sub    sp, sp, %2\n"   /* Align to stack alignment */
27
                "ldmib  lr, {r0-r14}^\n" /* Load all USR registers */
28
 
29
                "nop            \n"     /* Spec says dont touch banked registers
30
                                         * right after LDM {no-pc}^ for one instruction */
31
                "add    lr, lr, #64\n"  /* Manually move to PC location. */
32
                "ldr    lr,     [lr]\n" /* Load the PC_USR to LR */
33
                "movs   pc,     lr\n"   /* Jump to userspace, also switching SPSR/CPSR */
34
                :
35
                : "r" (task), "r" (PAGE_SIZE), "r" (STACK_ALIGNMENT)
36
        );
37
}
38
 
39
void switch_to_user(struct ktcb *task)
40
{
41
        arm_clean_invalidate_cache();
42
        arm_invalidate_tlb();
43
        arm_set_ttb(virt_to_phys(TASK_PGD(task)));
44
        arm_invalidate_tlb();
45
        jump(task);
46
}
47
 
48
/* Maps the early memory regions needed to bootstrap the system */
49
void init_kernel_mappings(void)
50
{
51
        memset((void *)virt_to_phys(&init_pgd), 0, sizeof(pgd_table_t));
52
 
53
        /* Map kernel area to its virtual region */
54
        add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
55
                                 align((unsigned int)_start_text, SZ_1MB), 1,
56
                                 cacheable | bufferable);
57
 
58
        /* Map kernel one-to-one to its physical region */
59
        add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
60
                                 align(virt_to_phys(_start_text), SZ_1MB),
61
                                 1, 0);
62
}
63
 
64
/*
65
 * Enable virtual memory using kernel's pgd
66
 * and continue execution on virtual addresses.
67
 */
68
void start_virtual_memory()
69
{
70
        /*
71
         * TTB must be 16K aligned. This is because first level tables are
72
         * sized 16K.
73
         */
74
        if ((unsigned int)&init_pgd & 0x3FFF)
75
                dprintk("kspace not properly aligned for ttb:",
76
                        (u32)&init_pgd);
77
        // memset((void *)&kspace, 0, sizeof(pgd_table_t));
78
        arm_set_ttb(virt_to_phys(&init_pgd));
79
 
80
        /*
81
         * This sets all 16 domains to zero and  domain 0 to 1. The outcome
82
         * is that page table access permissions are in effect for domain 0.
83
         * All other domains have no access whatsoever.
84
         */
85
        arm_set_domain(1);
86
 
87
        /* Enable everything before mmu permissions are in place */
88
        arm_enable_caches();
89
        arm_enable_wbuffer();
90
 
91
        arm_enable_high_vectors();
92
 
93
        /*
94
         * Leave the past behind. Tlbs are invalidated, write buffer is drained.
95
         * The whole of I + D caches are invalidated unconditionally. This is
96
         * important to ensure that the cache is free of previously loaded
97
         * values. Otherwise unpredictable data aborts may occur at arbitrary
98
         * times, each time a load/store operation hits one of the invalid
99
         * entries and those entries are cleaned to main memory.
100
         */
101
        arm_invalidate_cache();
102
        arm_drain_writebuffer();
103
        arm_invalidate_tlb();
104
        arm_enable_mmu();
105
 
106
        /* Jump to virtual memory addresses */
107
        __asm__ __volatile__ (
108
                "add    sp, sp, %0      \n"     /* Update stack pointer */
109
                "add    fp, fp, %0      \n"     /* Update frame pointer */
110
                /* On the next instruction below, r0 gets
111
                 * current PC + KOFFSET + 2 instructions after itself. */
112
                "add    r0, pc, %0      \n"
113
                /* Special symbol that is extracted and included in the loader.
114
                 * Debuggers can break on it to load the virtual symbol table */
115
                ".global break_virtual;\n"
116
                "break_virtual:\n"
117
                "mov    pc, r0          \n" /* (r0 has next instruction) */
118
                :
119
                : "r" (KERNEL_OFFSET)
120
                : "r0"
121
        );
122
 
123
        /*
124
         * Restore link register (LR) for this function.
125
         *
126
         * NOTE: LR values are pushed onto the stack at each function call,
127
         * which means the restored return values will be physical for all
128
         * functions in the call stack except this function. So the caller
129
         * of this function must never return but initiate scheduling etc.
130
         */
131
        __asm__ __volatile__ (
132
                "add    %0, %0, %1      \n"
133
                "mov    pc, %0          \n"
134
                :: "r" (__builtin_return_address(0)), "r" (KERNEL_OFFSET)
135
        );
136
 
137
        /* should never come here */
138
        while(1);
139
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.