OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [arch/] [or32/] [mm/] [tlb.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *  linux/arch/or32/mm/tlb.c
3
 *
4
 *  or32 version
5
 *    author(s): Matjaz Breskvar (phoenix@bsemi.com)
6
 *
7
 *  derived from cris, i386, m68k, ppc, sh ports.
8
 *
9
 *  changes:
10
 *  18. 11. 2003: Matjaz Breskvar (phoenix@bsemi.com)
11
 *    initial port to or32 architecture
12
 *
13
 *  based on: linux/arch/cris/mm/tlb.c
14
 *    Copyright (C) 2000, 2001  Axis Communications AB
15
 *    Authors:   Bjorn Wesen (bjornw@axis.com)
16
 *
17
 */
18
 
19
#include <linux/sched.h>
20
#include <linux/kernel.h>
21
#include <linux/errno.h>
22
#include <linux/string.h>
23
#include <linux/types.h>
24
#include <linux/ptrace.h>
25
#include <linux/mman.h>
26
#include <linux/mm.h>
27
#include <linux/init.h>
28
 
29
#include <asm/system.h>
30
#include <asm/segment.h>
31
#include <asm/tlbflush.h>
32
#include <asm/pgtable.h>
33
#include <asm/mmu_context.h>
34
#include <asm/or32-hf.h>
35
#include <asm/spr_defs.h>
36
 
37
#define D(x)
38
 
39
#define NUM_TLB_ENTRIES 64
40
#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
41
 
42
#define NO_CONTEXT -1
43
 
44
/* this is to work around for certian problems */
45
#define CONFIG_OR32_FLUSH_ALL
46
 
47
#define or32_disable_immu()                \
48
{                                          \
49
        unsigned long __t1, __t2;          \
50
        __asm__ __volatile__(              \
51
                "l.movhi  %6,hi(99f)    ;" \
52
                "l.ori    %6,%6,lo(99f) ;" \
53
                "l.movhi  %0,%5         ;" \
54
                "l.add    %0,%0,%6      ;" \
55
                "l.mtspr  r0,%0,%1      ;" \
56
                "l.mfspr  %0,r0,%2      ;" \
57
                "l.andi   %0,%0,lo(%3)  ;" \
58
                "l.mtspr  r0,%0,%4      ;" \
59
                "l.rfe   ;l.nop;l.nop;l.nop;l.nop;l.nop               ;" \
60
                "99:                     " \
61
                : "=r"(__t1)               \
62
                : "K"(SPR_EPCR_BASE), "K"(SPR_SR),        \
63
                  "K"(0x0000ffff&(~(SPR_SR_IME))), "K"(SPR_ESR_BASE), \
64
                  "K"((-KERNELBASE)>>16), "r"(__t2)); \
65
}
66
 
67
#define or32_enable_immu()            \
68
{                                     \
69
        unsigned long __t1;                \
70
        __asm__ __volatile__(         \
71
                "l.movhi  %0,hi(99f);" \
72
                "l.ori    %0,%0,lo(99f);" \
73
                "l.mtspr  r0,%0,%1  ;" \
74
                "l.mfspr  %0,r0,%2  ;" \
75
                "l.ori    %0,%0,lo(%3) ;" \
76
                "l.mtspr  r0,%0,%4  ;" \
77
                "l.rfe              ;" \
78
                "l.nop;l.nop;l.nop;l.nop;" \
79
                "99:                " \
80
                : "=r"(__t1)     \
81
                : "K"(SPR_EPCR_BASE), "K"(SPR_SR),         \
82
                  "K"(SPR_SR_IME), "K"(SPR_ESR_BASE));     \
83
}
84
 
85
 
86
 
87
 
88
/* invalidate all TLB entries */
89
 
90
void flush_tlb_all(void)
91
{
92
        int i;
93
        unsigned long flags;
94
 
95
        D(printk("tlb: flushed all\n"));
96
 
97
        local_irq_save(flags); /* flush needs to be atomic */
98
//      or32_disable_immu();
99
 
100
        for(i = 0; i < NUM_TLB_ENTRIES; i++) {
101
                mtspr(SPR_DTLBMR_BASE(0) + i, 0);
102
                mtspr(SPR_ITLBMR_BASE(0) + i, 0);
103
        }
104
 
105
//      or32_enable_immu();
106
        local_irq_restore(flags);
107
}
108
 
109
/* invalidate the selected mm context only */
110
 
111
void flush_tlb_mm(struct mm_struct *mm)
112
{
113
#ifdef CONFIG_OR32_FLUSH_ALL
114
        flush_tlb_all();
115
#else
116
        D(printk("tlb: flush mm (%p)\n", mm));
117
 
118
        if(mm->map_count) {
119
                struct vm_area_struct *mp;
120
                for(mp = mm->mmap; mp != NULL; mp = mp->vm_next)
121
                        flush_tlb_range(mp, mp->vm_start,  mp->vm_end);
122
        }
123
#endif
124
}
125
 
126
/* invalidate a single page */
127
 
128
void flush_tlb_page(struct vm_area_struct *vma,
129
                    unsigned long addr)
130
{
131
#ifdef CONFIG_OR32_FLUSH_ALL
132
        flush_tlb_all();
133
#else
134
        unsigned long tlb_offset, flags;
135
 
136
        D(printk("tlb: flush page %p \n", addr));
137
 
138
        addr &= PAGE_MASK; /* perhaps not necessary */
139
        tlb_offset = TLB_OFFSET(addr);
140
 
141
        local_irq_save(flags);  /* flush needs to be atomic */
142
 
143
 
144
        if((mfspr(SPR_DTLBMR_BASE(0) + tlb_offset) & PAGE_MASK) == addr)
145
                mtspr(SPR_DTLBMR_BASE(0) + tlb_offset, 0);
146
 
147
        if((mfspr(SPR_ITLBMR_BASE(0) + tlb_offset) & PAGE_MASK) == addr)
148
                mtspr(SPR_ITLBMR_BASE(0) + tlb_offset, 0);
149
 
150
        local_irq_restore(flags);
151
#endif
152
}
153
 
154
/* invalidate a page range */
155
 
156
void flush_tlb_range(struct vm_area_struct *vma,
157
                     unsigned long start,
158
                     unsigned long end)
159
{
160
#ifdef CONFIG_OR32_FLUSH_ALL
161
        flush_tlb_all();
162
#else
163
        unsigned long vpn, flags;
164
 
165
        D(printk("tlb: flush range %p<->%p (%p)\n",
166
                 start, end, vma));
167
 
168
        start = start >> PAGE_SHIFT;
169
        end   = end   >> PAGE_SHIFT;
170
 
171
        local_irq_save(flags);  /* flush needs to be atomic */
172
 
173
        for (vpn = start; vpn < end; vpn++) {
174
                unsigned long slot = vpn%NUM_TLB_ENTRIES;
175
 
176
                if (vpn == (mfspr(SPR_DTLBMR_BASE(0) + slot) >> PAGE_SHIFT)) {
177
                        mtspr(SPR_DTLBMR_BASE(0) + slot,0);
178
                        D(printk("DTLB invalidate :: vpn 0x%x, set %d\n", vpn, slot));
179
                }
180
 
181
                if (vpn == (mfspr(SPR_ITLBMR_BASE(0) + slot) >> PAGE_SHIFT)) {
182
                        mtspr(SPR_ITLBMR_BASE(0) + slot,0);
183
                        D(printk("ITLB invalidate :: vpn 0x%x, set %d\n", vpn, slot));
184
                }
185
        }
186
        local_irq_restore(flags);
187
#endif
188
}
189
 
190
/* called in schedule() just before actually doing the switch_to */
191
 
192
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
193
               struct task_struct *next_tsk)
194
{
195
        /* remember the pgd for the fault handlers
196
         * this is similar to the pgd register in some other CPU's.
197
         * we need our own copy of it because current and active_mm
198
         * might be invalid at points where we still need to derefer
199
         * the pgd.
200
         */
201
        current_pgd = next->pgd;
202
 
203
        /* We don't have context support implemented, so flush all
204
         * entries belonging to previous map
205
         */
206
 
207
/*
208
        phx_mmu("prev_mm %p, next_mm %p, next_tsk %p, "
209
                "next_tsk->mm %p, current %p",
210
                prev, next, next_tsk, next_tsk ? next_tsk->mm : 0, current);
211
*/
212
 
213
#ifdef CONFIG_OR32_FLUSH_ALL
214
        flush_tlb_all();
215
#else
216
        if (prev != next)
217
                flush_tlb_mm(prev);
218
#endif
219
}
220
 
221
/*
222
 * Initialize the context related info for a new mm_struct
223
 * instance.
224
 */
225
 
226
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
227
{
228
        mm->context = NO_CONTEXT;
229
        return 0;
230
}
231
 
232
/* called by __exit_mm to destroy the used MMU context if any before
233
 * destroying the mm itself. this is only called when the last user of the mm
234
 * drops it.
235
 */
236
 
237
void destroy_context(struct mm_struct *mm)
238
{
239
        D(printk("destroy_context %d (%p)\n", mm->context, mm));
240
 
241
#ifdef CONFIG_OR32_FLUSH_ALL
242
        flush_tlb_all();
243
#else
244
        flush_tlb_mm(mm);
245
#endif
246
 
247
}
248
 
249
/* called once during VM initialization, from init.c */
250
 
251
void __init tlb_init(void)
252
{
253
        /* invalidate the entire TLB */
254
        flush_tlb_all();
255
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.