OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-sparc64/] [pgtable.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1276 phoenix
/* $Id: pgtable.h,v 1.1.1.1 2004-04-15 03:01:08 phoenix Exp $
2
 * pgtable.h: SpitFire page table operations.
3
 *
4
 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5
 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6
 */
7
 
8
#ifndef _SPARC64_PGTABLE_H
9
#define _SPARC64_PGTABLE_H
10
 
11
/* This file contains the functions and defines necessary to modify and use
12
 * the SpitFire page tables.
13
 */
14
 
15
#include <asm/spitfire.h>
16
#include <asm/asi.h>
17
#include <asm/mmu_context.h>
18
#include <asm/system.h>
19
#include <asm/page.h>
20
#include <asm/processor.h>
21
 
22
/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 16MB).
23
 * The page copy blockops use 0x1000000 to 0x18000000 (16MB --> 24MB).
24
 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
25
 * The vmalloc area spans 0x140000000 to 0x200000000.
26
 * There is a single static kernel PMD which maps from 0x0 to address
27
 * 0x400000000.
28
 */
29
#define TLBTEMP_BASE            0x0000000001000000
30
#define MODULES_VADDR           0x0000000002000000
31
#define MODULES_LEN             0x000000007e000000
32
#define MODULES_END             0x0000000080000000
33
#define VMALLOC_START           0x0000000140000000
34
#define VMALLOC_VMADDR(x)       ((unsigned long)(x))
35
#define VMALLOC_END             0x0000000200000000
36
#define LOW_OBP_ADDRESS         0x00000000f0000000
37
#define HI_OBP_ADDRESS          0x0000000100000000
38
 
39
/* XXX All of this needs to be rethought so we can take advantage
40
 * XXX cheetah's full 64-bit virtual address space, ie. no more hole
41
 * XXX in the middle like on spitfire. -DaveM
42
 */
43
/*
44
 * Given a virtual address, the lowest PAGE_SHIFT bits determine offset
45
 * into the page; the next higher PAGE_SHIFT-3 bits determine the pte#
46
 * in the proper pagetable (the -3 is from the 8 byte ptes, and each page
47
 * table is a single page long). The next higher PMD_BITS determine pmd#
48
 * in the proper pmdtable (where we must have PMD_BITS <= (PAGE_SHIFT-2)
49
 * since the pmd entries are 4 bytes, and each pmd page is a single page
50
 * long). Finally, the higher few bits determine pgde#.
51
 */
52
 
53
/* PMD_SHIFT determines the size of the area a second-level page table can map */
54
#define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT-3))
55
#define PMD_SIZE        (1UL << PMD_SHIFT)
56
#define PMD_MASK        (~(PMD_SIZE-1))
57
#define PMD_BITS        11
58
 
59
/* PGDIR_SHIFT determines what a third-level page table entry can map */
60
#define PGDIR_SHIFT     (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
61
#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
62
#define PGDIR_MASK      (~(PGDIR_SIZE-1))
63
 
64
#ifndef __ASSEMBLY__
65
 
66
/* Certain architectures need to do special things when pte's
67
 * within a page table are directly modified.  Thus, the following
68
 * hook is made available.
69
 */
70
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
71
 
72
/* Entries per page directory level. */
73
#define PTRS_PER_PTE            (1UL << (PAGE_SHIFT-3))
74
 
75
/* We the first one in this file, what we export to the kernel
76
 * is different so we can optimize correctly for 32-bit tasks.
77
 */
78
#define REAL_PTRS_PER_PMD       (1UL << PMD_BITS)
79
#define PTRS_PER_PMD            ((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \
80
                                 (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT)) : (REAL_PTRS_PER_PMD)))
81
 
82
/*
83
 * We cannot use the top address range because VPTE table lives there. This
84
 * formula finds the total legal virtual space in the processor, subtracts the
85
 * vpte size, then aligns it to the number of bytes mapped by one pgde, and
86
 * thus calculates the number of pgdes needed.
87
 */
88
#define PTRS_PER_PGD    (((1UL << VA_BITS) - VPTE_SIZE + (1UL << (PAGE_SHIFT + \
89
                        (PAGE_SHIFT-3) + PMD_BITS)) - 1) / (1UL << (PAGE_SHIFT + \
90
                        (PAGE_SHIFT-3) + PMD_BITS)))
91
 
92
/* Kernel has a separate 44bit address space. */
93
#define USER_PTRS_PER_PGD       ((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \
94
                                 (1) : (PTRS_PER_PGD)))
95
#define FIRST_USER_PGD_NR       0
96
 
97
#define pte_ERROR(e)    __builtin_trap()
98
#define pmd_ERROR(e)    __builtin_trap()
99
#define pgd_ERROR(e)    __builtin_trap()
100
 
101
#endif /* !(__ASSEMBLY__) */
102
 
103
/* Spitfire/Cheetah TTE bits. */
104
#define _PAGE_VALID     0x8000000000000000      /* Valid TTE                          */
105
#define _PAGE_R         0x8000000000000000      /* Used to keep ref bit up to date    */
106
#define _PAGE_SZ4MB     0x6000000000000000      /* 4MB Page                           */
107
#define _PAGE_SZ512K    0x4000000000000000      /* 512K Page                          */
108
#define _PAGE_SZ64K     0x2000000000000000      /* 64K Page                           */
109
#define _PAGE_SZ8K      0x0000000000000000      /* 8K Page                            */
110
#define _PAGE_NFO       0x1000000000000000      /* No Fault Only                      */
111
#define _PAGE_IE        0x0800000000000000      /* Invert Endianness                  */
112
#define _PAGE_SN        0x0000800000000000      /* (Cheetah) Snoop                    */
113
#define _PAGE_PADDR_SF  0x000001FFFFFFE000      /* (Spitfire) Phys Address [40:13]    */
114
#define _PAGE_PADDR     0x000007FFFFFFE000      /* (Cheetah) Phys Address [42:13]     */
115
#define _PAGE_SOFT      0x0000000000001F80      /* Software bits                      */
116
#define _PAGE_L         0x0000000000000040      /* Locked TTE                         */
117
#define _PAGE_CP        0x0000000000000020      /* Cacheable in Physical Cache        */
118
#define _PAGE_CV        0x0000000000000010      /* Cacheable in Virtual Cache         */
119
#define _PAGE_E         0x0000000000000008      /* side-Effect                        */
120
#define _PAGE_P         0x0000000000000004      /* Privileged Page                    */
121
#define _PAGE_W         0x0000000000000002      /* Writable                           */
122
#define _PAGE_G         0x0000000000000001      /* Global                             */
123
 
124
/* Here are the SpitFire software bits we use in the TTE's. */
125
#define _PAGE_MODIFIED  0x0000000000000800      /* Modified Page (ie. dirty)          */
126
#define _PAGE_ACCESSED  0x0000000000000400      /* Accessed Page (ie. referenced)     */
127
#define _PAGE_READ      0x0000000000000200      /* Readable SW Bit                    */
128
#define _PAGE_WRITE     0x0000000000000100      /* Writable SW Bit                    */
129
#define _PAGE_PRESENT   0x0000000000000080      /* Present Page (ie. not swapped out) */
130
 
131
#if PAGE_SHIFT == 13
132
#define _PAGE_SZBITS    _PAGE_SZ8K
133
#elif PAGE_SHIFT == 16
134
#define _PAGE_SZBITS    _PAGE_SZ64K
135
#elif PAGE_SHIFT == 19
136
#define _PAGE_SZBITS    _PAGE_SZ512K
137
#elif PAGE_SHIFT == 22
138
#define _PAGE_SZBITS    _PAGE_SZ4M
139
#else
140
#error Wrong PAGE_SHIFT specified
141
#endif
142
 
143
#define _PAGE_CACHE     (_PAGE_CP | _PAGE_CV)
144
 
145
#define __DIRTY_BITS    (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W)
146
#define __ACCESS_BITS   (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R)
147
#define __PRIV_BITS     _PAGE_P
148
 
149
#define PAGE_NONE       __pgprot (_PAGE_PRESENT | _PAGE_ACCESSED)
150
 
151
/* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
152
#define PAGE_SHARED     __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
153
                                  __ACCESS_BITS | _PAGE_WRITE)
154
 
155
#define PAGE_COPY       __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
156
                                  __ACCESS_BITS)
157
 
158
#define PAGE_READONLY   __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
159
                                  __ACCESS_BITS)
160
 
161
#define PAGE_KERNEL     __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
162
                                  __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS)
163
 
164
#define PAGE_INVALID    __pgprot (0)
165
 
166
#define _PFN_MASK       _PAGE_PADDR
167
 
168
#define _PAGE_CHG_MASK  (_PFN_MASK | _PAGE_MODIFIED | _PAGE_ACCESSED | _PAGE_PRESENT | _PAGE_SZBITS)
169
 
170
#define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | __ACCESS_BITS | _PAGE_E)
171
 
172
#define __P000  PAGE_NONE
173
#define __P001  PAGE_READONLY
174
#define __P010  PAGE_COPY
175
#define __P011  PAGE_COPY
176
#define __P100  PAGE_READONLY
177
#define __P101  PAGE_READONLY
178
#define __P110  PAGE_COPY
179
#define __P111  PAGE_COPY
180
 
181
#define __S000  PAGE_NONE
182
#define __S001  PAGE_READONLY
183
#define __S010  PAGE_SHARED
184
#define __S011  PAGE_SHARED
185
#define __S100  PAGE_READONLY
186
#define __S101  PAGE_READONLY
187
#define __S110  PAGE_SHARED
188
#define __S111  PAGE_SHARED
189
 
190
#ifndef __ASSEMBLY__
191
 
192
extern unsigned long phys_base;
193
 
194
extern struct page *mem_map_zero;
195
#define ZERO_PAGE(vaddr)        (mem_map_zero)
196
 
197
/* Warning: These take pointers to page structs now... */
198
#define mk_pte(page, pgprot)            \
199
        __pte((((page - mem_map) << PAGE_SHIFT)+phys_base) | pgprot_val(pgprot) | _PAGE_SZBITS)
200
#define page_pte_prot(page, prot)       mk_pte(page, prot)
201
#define page_pte(page)                  page_pte_prot(page, __pgprot(0))
202
 
203
#define mk_pte_phys(physpage, pgprot)   (__pte((physpage) | pgprot_val(pgprot) | _PAGE_SZBITS))
204
 
205
extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
206
{
207
        pte_t __pte;
208
 
209
        pte_val(__pte) = (pte_val(orig_pte) & _PAGE_CHG_MASK) |
210
                pgprot_val(new_prot);
211
 
212
        return __pte;
213
}
214
#define pmd_set(pmdp, ptep)     \
215
        (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
216
#define pgd_set(pgdp, pmdp)     \
217
        (pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
218
#define pmd_page(pmd)                   ((unsigned long) __va((pmd_val(pmd)<<11UL)))
219
#define pgd_page(pgd)                   ((unsigned long) __va((pgd_val(pgd)<<11UL)))
220
#define pte_none(pte)                   (!pte_val(pte))
221
#define pte_present(pte)                (pte_val(pte) & _PAGE_PRESENT)
222
#define pte_clear(pte)                  (pte_val(*(pte)) = 0UL)
223
#define pmd_none(pmd)                   (!pmd_val(pmd))
224
#define pmd_bad(pmd)                    (0)
225
#define pmd_present(pmd)                (pmd_val(pmd) != 0UL)
226
#define pmd_clear(pmdp)                 (pmd_val(*(pmdp)) = 0UL)
227
#define pgd_none(pgd)                   (!pgd_val(pgd))
228
#define pgd_bad(pgd)                    (0)
229
#define pgd_present(pgd)                (pgd_val(pgd) != 0UL)
230
#define pgd_clear(pgdp)                 (pgd_val(*(pgdp)) = 0UL)
231
 
232
/* The following only work if pte_present() is true.
233
 * Undefined behaviour if not..
234
 */
235
#define pte_read(pte)           (pte_val(pte) & _PAGE_READ)
236
#define pte_exec(pte)           pte_read(pte)
237
#define pte_write(pte)          (pte_val(pte) & _PAGE_WRITE)
238
#define pte_dirty(pte)          (pte_val(pte) & _PAGE_MODIFIED)
239
#define pte_young(pte)          (pte_val(pte) & _PAGE_ACCESSED)
240
#define pte_wrprotect(pte)      (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W)))
241
#define pte_rdprotect(pte)      (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ))
242
#define pte_mkclean(pte)        (__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W)))
243
#define pte_mkold(pte)          (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
244
 
245
/* Permanent address of a page. */
246
#define __page_address(page)    page_address(page)
247
 
248
#define pte_page(x) (mem_map+(((pte_val(x)&_PAGE_PADDR)-phys_base)>>PAGE_SHIFT))
249
 
250
/* Be very careful when you change these three, they are delicate. */
251
#define pte_mkyoung(pte)        (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))
252
#define pte_mkwrite(pte)        (__pte(pte_val(pte) | _PAGE_WRITE))
253
#define pte_mkdirty(pte)        (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W))
254
 
255
/* to find an entry in a page-table-directory. */
256
#define pgd_index(address)      (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD))
257
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
258
 
259
/* to find an entry in a kernel page-table-directory */
260
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
261
 
262
/* Find an entry in the second-level page table.. */
263
#define pmd_offset(dir, address)        ((pmd_t *) pgd_page(*(dir)) + \
264
                                        ((address >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1)))
265
 
266
/* Find an entry in the third-level page table.. */
267
#define pte_offset(dir, address)        ((pte_t *) pmd_page(*(dir)) + \
268
                                        ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
269
 
270
extern pgd_t swapper_pg_dir[1];
271
 
272
/* These do nothing with the way I have things setup. */
273
#define mmu_lockarea(vaddr, len)                (vaddr)
274
#define mmu_unlockarea(vaddr, len)              do { } while(0)
275
 
276
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
277
 
278
#define flush_icache_page(vma, pg)      do { } while(0)
279
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
280
 
281
/* Make a non-present pseudo-TTE. */
282
extern inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
283
{
284
        pte_t pte;
285
        pte_val(pte) = ((page) | pgprot_val(prot) | _PAGE_E) & ~(unsigned long)_PAGE_CACHE;
286
        pte_val(pte) |= (((unsigned long)space) << 32);
287
        return pte;
288
}
289
 
290
/* Encode and de-code a swap entry */
291
#define SWP_TYPE(entry)         (((entry).val >> PAGE_SHIFT) & 0xffUL)
292
#define SWP_OFFSET(entry)       ((entry).val >> (PAGE_SHIFT + 8UL))
293
#define SWP_ENTRY(type, offset) \
294
        ( (swp_entry_t) \
295
          { \
296
                (((long)(type) << PAGE_SHIFT) | \
297
                 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
298
          } )
299
#define pte_to_swp_entry(pte)           ((swp_entry_t) { pte_val(pte) })
300
#define swp_entry_to_pte(x)             ((pte_t) { (x).val })
301
 
302
extern unsigned long prom_virt_to_phys(unsigned long, int *);
303
 
304
extern __inline__ unsigned long
305
sun4u_get_pte (unsigned long addr)
306
{
307
        pgd_t *pgdp;
308
        pmd_t *pmdp;
309
        pte_t *ptep;
310
 
311
        if (addr >= PAGE_OFFSET)
312
                return addr & _PAGE_PADDR;
313
        if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
314
                return prom_virt_to_phys(addr, 0);
315
        pgdp = pgd_offset_k (addr);
316
        pmdp = pmd_offset (pgdp, addr);
317
        ptep = pte_offset (pmdp, addr);
318
        return pte_val (*ptep) & _PAGE_PADDR;
319
}
320
 
321
extern __inline__ unsigned long
322
__get_phys (unsigned long addr)
323
{
324
        return sun4u_get_pte (addr);
325
}
326
 
327
extern __inline__ int
328
__get_iospace (unsigned long addr)
329
{
330
        return ((sun4u_get_pte (addr) & 0xf0000000) >> 28);
331
}
332
 
333
extern unsigned long *sparc64_valid_addr_bitmap;
334
 
335
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
336
#define kern_addr_valid(addr)   \
337
        (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))
338
 
339
extern int io_remap_page_range(unsigned long from, unsigned long offset,
340
                               unsigned long size, pgprot_t prot, int space);
341
 
342
#include <asm-generic/pgtable.h>
343
 
344
/* We provide our own get_unmapped_area to cope with VA holes for userland */
345
#define HAVE_ARCH_UNMAPPED_AREA
346
 
347
/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
348
 * the largest alignment possible such that larget PTEs can be used.
349
 */
350
extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, unsigned long, unsigned long, unsigned long);
351
#define HAVE_ARCH_FB_UNMAPPED_AREA
352
 
353
#endif /* !(__ASSEMBLY__) */
354
 
355
/*
356
 * No page table caches to initialise
357
 */
358
#define pgtable_cache_init()    do { } while (0)
359
 
360
#endif /* !(_SPARC64_PGTABLE_H) */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.