OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-alpha/] [mmu_context.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef __ALPHA_MMU_CONTEXT_H
2
#define __ALPHA_MMU_CONTEXT_H
3
 
4
/*
5
 * get a new mmu context..
6
 *
7
 * Copyright (C) 1996, Linus Torvalds
8
 */
9
 
10
#include <linux/config.h>
11
#include <asm/system.h>
12
#include <asm/machvec.h>
13
 
14
/*
15
 * Force a context reload. This is needed when we change the page
16
 * table pointer or when we update the ASN of the current process.
17
 */
18
 
19
/* Don't get into trouble with dueling __EXTERN_INLINEs.  */
20
#ifndef __EXTERN_INLINE
21
#include <asm/io.h>
22
#endif
23
 
24
extern inline unsigned long
25
__reload_thread(struct thread_struct *pcb)
26
{
27
        register unsigned long a0 __asm__("$16");
28
        register unsigned long v0 __asm__("$0");
29
 
30
        a0 = virt_to_phys(pcb);
31
        __asm__ __volatile__(
32
                "call_pal %2 #__reload_thread"
33
                : "=r"(v0), "=r"(a0)
34
                : "i"(PAL_swpctx), "r"(a0)
35
                : "$1", "$22", "$23", "$24", "$25");
36
 
37
        return v0;
38
}
39
 
40
 
41
/*
42
 * The maximum ASN's the processor supports.  On the EV4 this is 63
43
 * but the PAL-code doesn't actually use this information.  On the
44
 * EV5 this is 127, and EV6 has 255.
45
 *
46
 * On the EV4, the ASNs are more-or-less useless anyway, as they are
47
 * only used as an icache tag, not for TB entries.  On the EV5 and EV6,
48
 * ASN's also validate the TB entries, and thus make a lot more sense.
49
 *
50
 * The EV4 ASN's don't even match the architecture manual, ugh.  And
51
 * I quote: "If a processor implements address space numbers (ASNs),
52
 * and the old PTE has the Address Space Match (ASM) bit clear (ASNs
53
 * in use) and the Valid bit set, then entries can also effectively be
54
 * made coherent by assigning a new, unused ASN to the currently
55
 * running process and not reusing the previous ASN before calling the
56
 * appropriate PALcode routine to invalidate the translation buffer (TB)".
57
 *
58
 * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
59
 * work correctly and can thus not be used (explaining the lack of PAL-code
60
 * support).
61
 */
62
#define EV4_MAX_ASN 63
63
#define EV5_MAX_ASN 127
64
#define EV6_MAX_ASN 255
65
 
66
#ifdef CONFIG_ALPHA_GENERIC
67
# define MAX_ASN        (alpha_mv.max_asn)
68
#else
69
# ifdef CONFIG_ALPHA_EV4
70
#  define MAX_ASN       EV4_MAX_ASN
71
# elif defined(CONFIG_ALPHA_EV5)
72
#  define MAX_ASN       EV5_MAX_ASN
73
# else
74
#  define MAX_ASN       EV6_MAX_ASN
75
# endif
76
#endif
77
 
78
/*
79
 * cpu_last_asn(processor):
80
 * 63                                            0
81
 * +-------------+----------------+--------------+
82
 * | asn version | this processor | hardware asn |
83
 * +-------------+----------------+--------------+
84
 */
85
 
86
#ifdef CONFIG_SMP
87
#include <asm/smp.h>
88
#define cpu_last_asn(cpuid)     (cpu_data[cpuid].last_asn)
89
#else
90
extern unsigned long last_asn;
91
#define cpu_last_asn(cpuid)     last_asn
92
#endif /* CONFIG_SMP */
93
 
94
#define WIDTH_HARDWARE_ASN      8
95
#define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN)
96
#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
97
 
98
/*
99
 * NOTE! The way this is set up, the high bits of the "asn_cache" (and
100
 * the "mm->context") are the ASN _version_ code. A version of 0 is
101
 * always considered invalid, so to invalidate another process you only
102
 * need to do "p->mm->context = 0".
103
 *
104
 * If we need more ASN's than the processor has, we invalidate the old
105
 * user TLB's (tbiap()) and start a new ASN version. That will automatically
106
 * force a new asn for any other processes the next time they want to
107
 * run.
108
 */
109
 
110
#ifndef __EXTERN_INLINE
111
#define __EXTERN_INLINE extern inline
112
#define __MMU_EXTERN_INLINE
113
#endif
114
 
115
static inline unsigned long
116
__get_new_mm_context(struct mm_struct *mm, long cpu)
117
{
118
        unsigned long asn = cpu_last_asn(cpu);
119
        unsigned long next = asn + 1;
120
 
121
        if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
122
                tbiap();
123
                imb();
124
                next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
125
        }
126
        cpu_last_asn(cpu) = next;
127
        return next;
128
}
129
 
130
__EXTERN_INLINE void
131
ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
132
              struct task_struct *next, long cpu)
133
{
134
        /* Check if our ASN is of an older version, and thus invalid. */
135
        unsigned long asn;
136
        unsigned long mmc;
137
 
138
#ifdef CONFIG_SMP
139
        cpu_data[cpu].asn_lock = 1;
140
        barrier();
141
#endif
142
        asn = cpu_last_asn(cpu);
143
        mmc = next_mm->context[cpu];
144
        if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) {
145
                mmc = __get_new_mm_context(next_mm, cpu);
146
                next_mm->context[cpu] = mmc;
147
        }
148
#ifdef CONFIG_SMP
149
        else
150
                cpu_data[cpu].need_new_asn = 1;
151
#endif
152
 
153
        /* Always update the PCB ASN.  Another thread may have allocated
154
           a new mm->context (via flush_tlb_mm) without the ASN serial
155
           number wrapping.  We have no way to detect when this is needed.  */
156
        next->thread.asn = mmc & HARDWARE_ASN_MASK;
157
}
158
 
159
__EXTERN_INLINE void
160
ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
161
              struct task_struct *next, long cpu)
162
{
163
        /* As described, ASN's are broken for TLB usage.  But we can
164
           optimize for switching between threads -- if the mm is
165
           unchanged from current we needn't flush.  */
166
        /* ??? May not be needed because EV4 PALcode recognizes that
167
           ASN's are broken and does a tbiap itself on swpctx, under
168
           the "Must set ASN or flush" rule.  At least this is true
169
           for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
170
           I'm going to leave this here anyway, just to Be Sure.  -- r~  */
171
        if (prev_mm != next_mm)
172
                tbiap();
173
 
174
        /* Do continue to allocate ASNs, because we can still use them
175
           to avoid flushing the icache.  */
176
        ev5_switch_mm(prev_mm, next_mm, next, cpu);
177
}
178
 
179
extern void __load_new_mm_context(struct mm_struct *);
180
 
181
#ifdef CONFIG_SMP
182
#define check_mmu_context()                                     \
183
do {                                                            \
184
        int cpu = smp_processor_id();                           \
185
        cpu_data[cpu].asn_lock = 0;                              \
186
        barrier();                                              \
187
        if (cpu_data[cpu].need_new_asn) {                       \
188
                struct mm_struct * mm = current->active_mm;     \
189
                cpu_data[cpu].need_new_asn = 0;                  \
190
                if (!mm->context[cpu])                  \
191
                        __load_new_mm_context(mm);              \
192
        }                                                       \
193
} while(0)
194
#else
195
#define check_mmu_context()  do { } while(0)
196
#endif
197
 
198
__EXTERN_INLINE void
199
ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
200
{
201
        __load_new_mm_context(next_mm);
202
}
203
 
204
__EXTERN_INLINE void
205
ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
206
{
207
        __load_new_mm_context(next_mm);
208
        tbiap();
209
}
210
 
211
#ifdef CONFIG_ALPHA_GENERIC
212
# define switch_mm(a,b,c,d)     alpha_mv.mv_switch_mm((a),(b),(c),(d))
213
# define activate_mm(x,y)       alpha_mv.mv_activate_mm((x),(y))
214
#else
215
# ifdef CONFIG_ALPHA_EV4
216
#  define switch_mm(a,b,c,d)    ev4_switch_mm((a),(b),(c),(d))
217
#  define activate_mm(x,y)      ev4_activate_mm((x),(y))
218
# else
219
#  define switch_mm(a,b,c,d)    ev5_switch_mm((a),(b),(c),(d))
220
#  define activate_mm(x,y)      ev5_activate_mm((x),(y))
221
# endif
222
#endif
223
 
224
extern inline int
225
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
226
{
227
        int i;
228
 
229
        for (i = 0; i < smp_num_cpus; i++)
230
                mm->context[cpu_logical_map(i)] = 0;
231
        tsk->thread.ptbr = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
232
        return 0;
233
}
234
 
235
extern inline void
236
destroy_context(struct mm_struct *mm)
237
{
238
        /* Nothing to do.  */
239
}
240
 
241
static inline void
242
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
243
{
244
        tsk->thread.ptbr = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
245
}
246
 
247
#ifdef __MMU_EXTERN_INLINE
248
#undef __EXTERN_INLINE
249
#undef __MMU_EXTERN_INLINE
250
#endif
251
 
252
#endif /* __ALPHA_MMU_CONTEXT_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.