OpenCores
URL https://opencores.org/ocsvn/mips_enhanced/mips_enhanced/trunk

Subversion Repositories mips_enhanced

[/] [mips_enhanced/] [trunk/] [grlib-gpl-1.0.19-b3188/] [software/] [leon3/] [mmu.h] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 dimamali
#ifndef _MMU_H
2
#define _MMU_H
3
 
4
/* page table param */
5
#define PAGE_SHIFT   12
6
#define PAGE_SIZE   1<<PAGE_SHIFT
7
 
8
/* PMD_SHIFT determines the size of the area a second-level page table can map */
9
#define SRMMU_PMD_SHIFT         18
10
#define SRMMU_PMD_SIZE          (1UL << SRMMU_PMD_SHIFT)
11
#define SRMMU_PMD_MASK          (~(SRMMU_PMD_SIZE-1))
12
#define SRMMU_PMD_ALIGN(addr)   (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK)
13
 
14
/* PGDIR_SHIFT determines what a third-level page table entry can map */
15
#define SRMMU_PGDIR_SHIFT       24
16
#define SRMMU_PGDIR_SIZE        (1UL << SRMMU_PGDIR_SHIFT)
17
#define SRMMU_PGDIR_MASK        (~(SRMMU_PGDIR_SIZE-1))
18
#define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
19
 
20
#define SRMMU_PTRS_PER_PTE      64
21
#define SRMMU_PTRS_PER_PMD      64
22
#define SRMMU_PTRS_PER_PGD      256
23
#define SRMMU_PTRS_PER_CTX      256
24
 
25
#define SRMMU_PTE_TABLE_SIZE    0x100 /* 64 entries, 4 bytes a piece */
26
#define SRMMU_PMD_TABLE_SIZE    0x100 /* 64 entries, 4 bytes a piece */
27
#define SRMMU_PGD_TABLE_SIZE    0x400 /* 256 entries, 4 bytes a piece */
28
 
29
/* Definition of the values in the ET field of PTD's and PTE's */
30
#define SRMMU_ET_MASK         0x3
31
#define SRMMU_ET_INVALID      0x0
32
#define SRMMU_ET_PTD          0x1
33
#define SRMMU_ET_PTE          0x2
34
#define SRMMU_ET_REPTE        0x3 /* AIEEE, SuperSparc II reverse endian page! */
35
 
36
/* Physical page extraction from PTP's and PTE's. */
37
#define SRMMU_CTX_PMASK    0xfffffff0
38
#define SRMMU_PTD_PMASK    0xfffffff0
39
#define SRMMU_PTE_PMASK    0xffffff00
40
 
41
/* The pte non-page bits.  Some notes:
42
 * 1) cache, dirty, valid, and ref are frobbable
43
 *    for both supervisor and user pages.
44
 * 2) exec and write will only give the desired effect
45
 *    on user pages
46
 * 3) use priv and priv_readonly for changing the
47
 *    characteristics of supervisor ptes
48
 */
49
#define SRMMU_CACHE        0x80
50
#define SRMMU_DIRTY        0x40
51
#define SRMMU_REF          0x20
52
#define SRMMU_EXEC         0x08
53
#define SRMMU_WRITE        0x04
54
#define SRMMU_VALID        0x02 /* SRMMU_ET_PTE */
55
#define SRMMU_PRIV         0x1c
56
#define SRMMU_PRIV_RDONLY  0x18
57
 
58
#define SRMMU_CHG_MASK    (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
59
 
60
/* Some day I will implement true fine grained access bits for
61
 * user pages because the SRMMU gives us the capabilities to
62
 * enforce all the protection levels that vma's can have.
63
 * XXX But for now...
64
 */
65
#define SRMMU_PAGE_NONE    __pgprot(SRMMU_VALID | SRMMU_CACHE | \
66
                                    SRMMU_PRIV | SRMMU_REF)
67
#define SRMMU_PAGE_SHARED  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
68
                                    SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF)
69
#define SRMMU_PAGE_COPY    __pgprot(SRMMU_VALID | SRMMU_CACHE | \
70
                                    SRMMU_EXEC | SRMMU_REF)
71
#define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
72
                                    SRMMU_EXEC | SRMMU_REF)
73
#define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
74
                                    SRMMU_DIRTY | SRMMU_REF)
75
 
76
/* mmu asi spaces*/
77
#define ASI_M_FLUSH_PROBE  0x18
78
#define ASI_M_MMUREGS      0x19
79
#define ASI_MMU_BP         0x1c
80
 
81
#define SRMMU_CTRL_REG           0x00000000
82
#define SRMMU_CTXTBL_PTR         0x00000100
83
#define SRMMU_CTX_REG            0x00000200
84
#define SRMMU_FAULT_STATUS       0x00000300
85
#define SRMMU_FAULT_ADDR         0x00000400
86
 
87
#ifndef __ASSEMBLER__
88
 
89
static inline void srmmu_set_mmureg(unsigned long regval)
90
{
91
        asm volatile("sta %0, [%%g0] %1\n\t" : :
92
                     "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
93
}
94
 
95
static inline int srmmu_get_mmureg(void)
96
{
97
        register int retval;
98
        asm volatile("lda [%%g0] %1, %0\n\t" :
99
                     "=r" (retval) :
100
                     "i" (ASI_M_MMUREGS));
101
        return retval;
102
}
103
 
104
 
105
static inline void srmmu_set_ctable_ptr(unsigned long paddr)
106
{
107
        paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
108
        asm volatile("sta %0, [%1] %2\n\t" : :
109
                     "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
110
                     "i" (ASI_M_MMUREGS) :
111
                     "memory");
112
}
113
 
114
static inline unsigned long srmmu_get_ctable_ptr(void)
115
{
116
        unsigned int retval;
117
        asm volatile("lda [%1] %2, %0\n\t" :
118
                     "=r" (retval) :
119
                     "r" (SRMMU_CTXTBL_PTR),
120
                     "i" (ASI_M_MMUREGS));
121
        return (retval & SRMMU_CTX_PMASK) << 4;
122
}
123
 
124
static inline void srmmu_set_context(int context)
125
{
126
        asm volatile("sta %0, [%1] %2\n\t" : :
127
                     "r" (context), "r" (SRMMU_CTX_REG),
128
                     "i" (ASI_M_MMUREGS) : "memory");
129
}
130
 
131
static inline int srmmu_get_context(void)
132
{
133
        register int retval;
134
        asm volatile("lda [%1] %2, %0\n\t" :
135
                     "=r" (retval) :
136
                     "r" (SRMMU_CTX_REG),
137
                     "i" (ASI_M_MMUREGS));
138
        return retval;
139
}
140
 
141
static inline void srmmu_flush_whole_tlb(void)
142
{
143
        __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
144
                             "r" (0x400),        /* Flush entire TLB!! */
145
                             "i" (ASI_M_FLUSH_PROBE) : "memory");
146
 
147
}
148
 
149
 
150
 
151
 
152
 
153
 
154
typedef unsigned long pte_t;
155
typedef unsigned long iopte_t;
156
typedef unsigned long pmd_t;
157
typedef unsigned long pgd_t;
158
typedef unsigned long ctxd_t;
159
typedef unsigned long pgprot_t;
160
typedef unsigned long iopgprot_t;
161
 
162
#define pte_val(x)      (x)
163
#define iopte_val(x)    (x)
164
#define pmd_val(x)      (x)
165
#define pgd_val(x)      (x)
166
#define ctxd_val(x)     (x)
167
#define pgprot_val(x)   (x)
168
#define iopgprot_val(x) (x)
169
 
170
#define __pte(x)        (x)
171
#define __iopte(x)      (x)
172
#define __pmd(x)        (x)
173
#define __pgd(x)        (x)
174
#define __ctxd(x)       (x)
175
#define __pgprot(x)     (x)
176
#define __iopgprot(x)   (x)
177
 
178
 
179
/*
180
 * In general all page table modifications should use the V8 atomic
181
 * swap instruction.  This insures the mmu and the cpu are in sync
182
 * with respect to ref/mod bits in the page tables.
183
 */
184
static  unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
185
{
186
  #ifndef IMAGE_CREATE
187
        __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
188
  #else
189
        unsigned long old;
190
        *addr = value;
191
        value = old;
192
  #endif
193
        return value;
194
}
195
 
196
static  void srmmu_set_pte(pte_t *ptep, pte_t pteval)
197
{
198
        srmmu_swap((unsigned long *)ptep, pte_val(pteval));
199
}
200
 
201
/* The very generic SRMMU page table operations. */
202
static  int srmmu_device_memory(unsigned long x)
203
{
204
        return ((x & 0xF0000000) != 0);
205
}
206
 
207
int srmmu_cache_pagetables;
208
 
209
/* XXX Make this dynamic based on ram size - Anton */
210
#define SRMMU_NOCACHE_BITMAP_SIZE (SRMMU_NOCACHE_NPAGES * 16)
211
#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
212
 
213
void *srmmu_nocache_pool;
214
void *srmmu_nocache_bitmap;
215
int srmmu_nocache_low;
216
int srmmu_nocache_used;
217
//spinlock_t srmmu_nocache_spinlock;
218
 
219
/* This makes sense. Honest it does - Anton */
220
#define __nocache_pa(VADDR) VADDR // (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool))
221
#define __nocache_va(PADDR) PADDR // (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
222
#define __nocache_fix(VADDR) VADDR // __va(__nocache_pa(VADDR))
223
 
224
static  unsigned long srmmu_pgd_page(pgd_t pgd)
225
{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
226
 
227
static  unsigned long srmmu_pmd_page(pmd_t pmd)
228
{ return srmmu_device_memory(pmd_val(pmd))?~0:(unsigned long)__nocache_va((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
229
 
230
//static  struct page *srmmu_pte_page(pte_t pte)
231
//{ return (mem_map + (unsigned long)(srmmu_device_memory(pte_val(pte))?~0:(((pte_val(pte) & SRMMU_PTE_PMASK) << 4) >> PAGE_SHIFT))); }
232
 
233
static  int srmmu_pte_none(pte_t pte)
234
{ return !(pte_val(pte) & 0xFFFFFFF); }
235
 
236
static  int srmmu_pte_present(pte_t pte)
237
{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
238
 
239
static  void srmmu_pte_clear(pte_t *ptep)
240
{ srmmu_set_pte(ptep, __pte(0)); }
241
 
242
static  int srmmu_pmd_none(pmd_t pmd)
243
{ return !(pmd_val(pmd) & 0xFFFFFFF); }
244
 
245
static  int srmmu_pmd_bad(pmd_t pmd)
246
{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
247
 
248
static  int srmmu_pmd_present(pmd_t pmd)
249
{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
250
 
251
static  void srmmu_pmd_clear(pmd_t *pmdp)
252
{ srmmu_set_pte((pte_t *)pmdp, __pte(0)); }
253
 
254
static  int srmmu_pgd_none(pgd_t pgd)
255
{ return !(pgd_val(pgd) & 0xFFFFFFF); }
256
 
257
static  int srmmu_pgd_bad(pgd_t pgd)
258
{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
259
 
260
static  int srmmu_pgd_present(pgd_t pgd)
261
{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
262
 
263
static  void srmmu_pgd_clear(pgd_t * pgdp)
264
{ srmmu_set_pte((pte_t *)pgdp, __pte(0)); }
265
 
266
static  int srmmu_pte_write(pte_t pte)
267
{ return pte_val(pte) & SRMMU_WRITE; }
268
 
269
static  int srmmu_pte_dirty(pte_t pte)
270
{ return pte_val(pte) & SRMMU_DIRTY; }
271
 
272
static  int srmmu_pte_young(pte_t pte)
273
{ return pte_val(pte) & SRMMU_REF; }
274
 
275
static  pte_t srmmu_pte_wrprotect(pte_t pte)
276
{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
277
 
278
static  pte_t srmmu_pte_mkclean(pte_t pte)
279
{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
280
 
281
static  pte_t srmmu_pte_mkold(pte_t pte)
282
{ return __pte(pte_val(pte) & ~SRMMU_REF);}
283
 
284
static  pte_t srmmu_pte_mkwrite(pte_t pte)
285
{ return __pte(pte_val(pte) | SRMMU_WRITE);}
286
 
287
static  pte_t srmmu_pte_mkdirty(pte_t pte)
288
{ return __pte(pte_val(pte) | SRMMU_DIRTY);}
289
 
290
static  pte_t srmmu_pte_mkyoung(pte_t pte)
291
{ return __pte(pte_val(pte) | SRMMU_REF);}
292
 
293
/*
294
 * Conversion functions: convert a page and protection to a page entry,
295
 * and a page entry and page directory to the page they refer to.
296
 */
297
//static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
298
//{ return __pte((((page - mem_map) << PAGE_SHIFT) >> 4) | pgprot_val(pgprot)); }
299
 
300
static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
301
{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
302
 
303
static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
304
{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); }
305
 
306
/* XXX should we hyper_flush_whole_icache here - Anton */
307
static  void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
308
{ srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
309
 
310
static  void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
311
{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
312
 
313
static  void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
314
{ srmmu_set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) ptep) >> 4))); }
315
 
316
static  pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
317
{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
318
 
319
/* to find an entry in a top-level page table... */
320
//extern  pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
321
//{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
322
 
323
/* Find an entry in the second-level page table.. */
324
static  pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
325
{ return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1)); }
326
 
327
/* Find an entry in the third-level page table.. */
328
static  pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
329
{ return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)); }
330
 
331
/* do a physical address bypass write, i.e. for 0x80000000 */
332
static __inline__ void leon_store_bp(unsigned long paddr,unsigned long value)
333
{
334
        __asm__ __volatile__("sta %0, [%1] %2\n\t": :
335
                             "r" (value), "r" (paddr),
336
                             "i" (ASI_MMU_BP) : "memory");
337
}
338
 
339
/* do a physical address bypass load, i.e. for 0x80000000 */
340
static __inline__ unsigned long leon_load_bp(unsigned long paddr)
341
{
342
        unsigned long retval;
343
        __asm__ __volatile__("lda [%1] %2, %0\n\t" :
344
                             "=r" (retval) :
345
                             "r" (paddr), "i" (ASI_MMU_BP));
346
        return retval;
347
}
348
 
349
 
350
#endif /*__ASSEMBLER__*/
351
 
352
 
353
#endif /* _MMU_H */
354
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.