OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-sh/] [mmu_context.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * Copyright (C) 1999 Niibe Yutaka
3
 *
4
 * ASID handling idea taken from MIPS implementation.
5
 */
6
#ifndef __ASM_SH_MMU_CONTEXT_H
7
#define __ASM_SH_MMU_CONTEXT_H
8
 
9
/* The MMU "context" consists of two things:
10
     (a) TLB cache version (or round, cycle whatever expression you like)
11
     (b) ASID (Address Space IDentifier)
12
 */
13
 
14
/*
15
 * Cache of MMU context last used.
16
 */
17
extern unsigned long mmu_context_cache;
18
 
19
#define MMU_CONTEXT_ASID_MASK           0x000000ff
20
#define MMU_CONTEXT_VERSION_MASK        0xffffff00
21
#define MMU_CONTEXT_FIRST_VERSION       0x00000100
22
#define NO_CONTEXT                      0
23
 
24
/* ASID is 8-bit value, so it can't be 0x100 */
25
#define MMU_NO_ASID                     0x100
26
 
27
/*
28
 * Virtual Page Number mask
29
 */
30
#define MMU_VPN_MASK    0xfffff000
31
 
32
/*
33
 * Get MMU context if needed.
34
 */
35
static __inline__ void
36
get_mmu_context(struct mm_struct *mm)
37
{
38
        extern void flush_tlb_all(void);
39
        unsigned long mc = mmu_context_cache;
40
 
41
        /* Check if we have old version of context. */
42
        if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
43
                /* It's up to date, do nothing */
44
                return;
45
 
46
        /* It's old, we need to get new context with new version. */
47
        mc = ++mmu_context_cache;
48
        if (!(mc & MMU_CONTEXT_ASID_MASK)) {
49
                /*
50
                 * We exhaust ASID of this version.
51
                 * Flush all TLB and start new cycle.
52
                 */
53
                flush_tlb_all();
54
                /*
55
                 * Fix version; Note that we avoid version #0
56
                 * to distingush NO_CONTEXT.
57
                 */
58
                if (!mc)
59
                        mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
60
        }
61
        mm->context = mc;
62
}
63
 
64
/*
65
 * Initialize the context related info for a new mm_struct
66
 * instance.
67
 */
68
static __inline__ int init_new_context(struct task_struct *tsk,
69
                                       struct mm_struct *mm)
70
{
71
        mm->context = NO_CONTEXT;
72
        return 0;
73
}
74
 
75
/*
76
 * Destroy context related info for an mm_struct that is about
77
 * to be put to rest.
78
 */
79
static __inline__ void destroy_context(struct mm_struct *mm)
80
{
81
        /* Do nothing */
82
}
83
 
84
/* Other MMU related constants. */
85
 
86
#if defined(__sh3__)
87
#define MMU_PTEH        0xFFFFFFF0      /* Page table entry register HIGH */
88
#define MMU_PTEL        0xFFFFFFF4      /* Page table entry register LOW */
89
#define MMU_TTB         0xFFFFFFF8      /* Translation table base register */
90
#define MMU_TEA         0xFFFFFFFC      /* TLB Exception Address */
91
 
92
#define MMUCR           0xFFFFFFE0      /* MMU Control Register */
93
 
94
#define MMU_TLB_ADDRESS_ARRAY   0xF2000000
95
#define MMU_PAGE_ASSOC_BIT      0x80
96
 
97
#define MMU_NTLB_ENTRIES        128     /* for 7708 */
98
#define MMU_CONTROL_INIT        0x007   /* SV=0, TF=1, IX=1, AT=1 */
99
 
100
#elif defined(__SH4__)
101
#define MMU_PTEH        0xFF000000      /* Page table entry register HIGH */
102
#define MMU_PTEL        0xFF000004      /* Page table entry register LOW */
103
#define MMU_TTB         0xFF000008      /* Translation table base register */
104
#define MMU_TEA         0xFF00000C      /* TLB Exception Address */
105
#define MMU_PTEA        0xFF000034      /* Page table entry assistance register */
106
 
107
#define MMUCR           0xFF000010      /* MMU Control Register */
108
 
109
#define MMU_ITLB_ADDRESS_ARRAY  0xF2000000
110
#define MMU_UTLB_ADDRESS_ARRAY  0xF6000000
111
#define MMU_PAGE_ASSOC_BIT      0x80
112
 
113
#define MMU_NTLB_ENTRIES        64      /* for 7750 */
114
#define MMU_CONTROL_INIT        0x205   /* SQMD=1, SV=0, TI=1, AT=1 */
115
 
116
#define MMU_ITLB_DATA_ARRAY     0xF3000000
117
#define MMU_UTLB_DATA_ARRAY     0xF7000000
118
 
119
#define MMU_UTLB_ENTRIES           64
120
#define MMU_U_ENTRY_SHIFT           8
121
#define MMU_UTLB_VALID          0x100
122
#define MMU_ITLB_ENTRIES            4
123
#define MMU_I_ENTRY_SHIFT           8
124
#define MMU_ITLB_VALID          0x100
125
#endif
126
 
127
static __inline__ void set_asid(unsigned long asid)
128
{
129
        unsigned long __dummy;
130
 
131
        __asm__ __volatile__ ("mov.l    %2, %0\n\t"
132
                              "and      %3, %0\n\t"
133
                              "or       %1, %0\n\t"
134
                              "mov.l    %0, %2"
135
                              : "=&r" (__dummy)
136
                              : "r" (asid), "m" (__m(MMU_PTEH)),
137
                                "r" (0xffffff00));
138
}
139
 
140
static __inline__ unsigned long get_asid(void)
141
{
142
        unsigned long asid;
143
 
144
        __asm__ __volatile__ ("mov.l    %1, %0"
145
                              : "=r" (asid)
146
                              : "m" (__m(MMU_PTEH)));
147
        asid &= MMU_CONTEXT_ASID_MASK;
148
        return asid;
149
}
150
 
151
/*
152
 * After we have set current->mm to a new value, this activates
153
 * the context for the new mm so we see the new mappings.
154
 */
155
static __inline__ void activate_context(struct mm_struct *mm)
156
{
157
        get_mmu_context(mm);
158
        set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
159
}
160
 
161
/* MMU_TTB can be used for optimizing the fault handling.
162
   (Currently not used) */
163
static __inline__ void switch_mm(struct mm_struct *prev,
164
                                 struct mm_struct *next,
165
                                 struct task_struct *tsk, unsigned int cpu)
166
{
167
        if (prev != next) {
168
                unsigned long __pgdir = (unsigned long)next->pgd;
169
 
170
                __asm__ __volatile__("mov.l     %0, %1"
171
                                     : /* no output */
172
                                     : "r" (__pgdir), "m" (__m(MMU_TTB)));
173
                activate_context(next);
174
        }
175
}
176
 
177
#define activate_mm(prev, next) \
178
        switch_mm((prev),(next),NULL,smp_processor_id())
179
 
180
static __inline__ void
181
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
182
{
183
}
184
 
185
#endif /* __ASM_SH_MMU_CONTEXT_H */

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.