OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ppc64/] [mmu_context.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef __PPC64_MMU_CONTEXT_H
2
#define __PPC64_MMU_CONTEXT_H
3
 
4
#include <linux/spinlock.h>     
5
#include <linux/kernel.h>       
6
#include <linux/mm.h>   
7
#include <asm/mmu.h>    
8
#include <asm/ppcdebug.h>       
9
#ifdef CONFIG_ALTIVEC
10
#include <asm/cputable.h>
11
#endif
12
/*
13
 * Copyright (C) 2001 PPC 64 Team, IBM Corp
14
 *
15
 * This program is free software; you can redistribute it and/or
16
 * modify it under the terms of the GNU General Public License
17
 * as published by the Free Software Foundation; either version
18
 * 2 of the License, or (at your option) any later version.
19
 */
20
 
21
#define NO_CONTEXT              0
22
#define FIRST_USER_CONTEXT      0x10    /* First 16 reserved for kernel */
23
#define LAST_USER_CONTEXT       0x8000  /* Same as PID_MAX for now... */
24
#define NUM_USER_CONTEXT        (LAST_USER_CONTEXT-FIRST_USER_CONTEXT)
25
 
26
/* Choose whether we want to implement our context
27
 * number allocator as a LIFO or FIFO queue.
28
 */
29
#if 1
30
#define MMU_CONTEXT_LIFO
31
#else
32
#define MMU_CONTEXT_FIFO
33
#endif
34
 
35
struct mmu_context_queue_t {
36
        spinlock_t lock;
37
        long head;
38
        long tail;
39
        long size;
40
        mm_context_t elements[LAST_USER_CONTEXT];
41
};
42
 
43
extern struct mmu_context_queue_t mmu_context_queue;
44
 
45
static inline void
46
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
47
{
48
}
49
 
50
extern void flush_stab(void);
51
 
52
/*
53
 * The context number queue has underflowed.
54
 * Meaning: we tried to push a context number that was freed
55
 * back onto the context queue and the queue was already full.
56
 */
57
static inline void
58
mmu_context_underflow(void)
59
{
60
        printk(KERN_DEBUG "mmu_context_underflow\n");
61
        panic("mmu_context_underflow");
62
}
63
 
64
 
65
/*
66
 * Set up the context for a new address space.
67
 */
68
static inline int
69
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
70
{
71
        long head, size;
72
        unsigned long flags;
73
 
74
        spin_lock_irqsave(&mmu_context_queue.lock, flags);
75
 
76
        if ( (size = mmu_context_queue.size) <= 0 ) {
77
                spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
78
                return -ENOMEM;
79
        }
80
 
81
        head = mmu_context_queue.head;
82
        mm->context = mmu_context_queue.elements[head];
83
 
84
        head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0;
85
        mmu_context_queue.head = head;
86
        mmu_context_queue.size = size-1;
87
 
88
        spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
89
 
90
        return 0;
91
}
92
 
93
/*
94
 * We're finished using the context for an address space.
95
 */
96
static inline void
97
destroy_context(struct mm_struct *mm)
98
{
99
        long index, size = mmu_context_queue.size;
100
        unsigned long flags;
101
 
102
        spin_lock_irqsave(&mmu_context_queue.lock, flags);
103
 
104
        if ( (size = mmu_context_queue.size) >= NUM_USER_CONTEXT ) {
105
                spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
106
                mmu_context_underflow();
107
        }
108
 
109
#ifdef MMU_CONTEXT_LIFO
110
        index = mmu_context_queue.head;
111
        index = (index > 0) ? index-1 : LAST_USER_CONTEXT-1;
112
        mmu_context_queue.head = index;
113
#else
114
        index = mmu_context_queue.tail;
115
        index = (index < LAST_USER_CONTEXT-1) ? index+1 : 0;
116
        mmu_context_queue.tail = index;
117
#endif
118
 
119
        mmu_context_queue.size = size+1;
120
        mmu_context_queue.elements[index] = mm->context;
121
 
122
        spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
123
}
124
 
125
extern void flush_stab(void);
126
 
127
/*
128
 * switch_mm is the entry point called from the architecture independent
129
 * code in kernel/sched.c
130
 */
131
static inline void
132
switch_mm(struct mm_struct *prev, struct mm_struct *next,
133
          struct task_struct *tsk, int cpu)
134
{
135
#ifdef CONFIG_ALTIVEC
136
         __asm__ __volatile__(
137
                 BEGIN_FTR_SECTION
138
                 "\tdssall\n"
139
                  "\tsync\n"
140
                 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
141
                 ::);
142
#endif
143
        flush_stab();
144
}
145
 
146
/*
147
 * After we have set current->mm to a new value, this activates
148
 * the context for the new mm so we see the new mappings.
149
 */
150
#define activate_mm(active_mm, mm) \
151
        switch_mm(active_mm, mm, current, smp_processor_id());
152
 
153
#define VSID_RANDOMIZER 42470972311
154
#define VSID_MASK       0xfffffffff
155
 
156
 
157
/* This is only valid for kernel (including vmalloc, imalloc and bolted) EA's
158
 */
159
static inline unsigned long
160
get_kernel_vsid( unsigned long ea )
161
{
162
        unsigned long ordinal, vsid;
163
 
164
        ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | (ea >> 60);
165
        vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
166
 
167
        ifppcdebug(PPCDBG_HTABSTRESS) {
168
                /* For debug, this path creates a very poor vsid distribuition.
169
                 * A user program can access virtual addresses in the form
170
                 * 0x0yyyyxxxx000 where yyyy = xxxx to cause multiple mappings
171
                 * to hash to the same page table group.
172
                 */
173
                ordinal = ((ea >> 28) & 0x1fff) | (ea >> 44);
174
                vsid = ordinal & VSID_MASK;
175
        }
176
 
177
        return vsid;
178
}
179
 
180
/* This is only valid for user EA's (user EA's do not exceed 2^41 (EADDR_SIZE))
181
 */
182
static inline unsigned long
183
get_vsid( unsigned long context, unsigned long ea )
184
{
185
        unsigned long ordinal, vsid;
186
 
187
        ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | context;
188
        vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
189
 
190
        ifppcdebug(PPCDBG_HTABSTRESS) {
191
                /* See comment above. */
192
                ordinal = ((ea >> 28) & 0x1fff) | (context << 16);
193
                vsid = ordinal & VSID_MASK;
194
        }
195
 
196
        return vsid;
197
}
198
 
199
#endif /* __PPC64_MMU_CONTEXT_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.