1 |
1276 |
phoenix |
/*
|
2 |
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
3 |
|
|
* License. See the file "COPYING" in the main directory of this archive
|
4 |
|
|
* for more details.
|
5 |
|
|
*
|
6 |
|
|
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000 by Ralf Baechle at alii
|
7 |
|
|
* Copyright (C) 1999 Silicon Graphics, Inc.
|
8 |
|
|
*/
|
9 |
|
|
#ifndef _ASM_PGTABLE_H
|
10 |
|
|
#define _ASM_PGTABLE_H
|
11 |
|
|
|
12 |
|
|
#include <linux/config.h>
|
13 |
|
|
#include <asm/addrspace.h>
|
14 |
|
|
#include <asm/page.h>
|
15 |
|
|
|
16 |
|
|
#ifndef _LANGUAGE_ASSEMBLY
|
17 |
|
|
|
18 |
|
|
#include <linux/linkage.h>
|
19 |
|
|
#include <asm/cachectl.h>
|
20 |
|
|
#include <asm/cacheflush.h>
|
21 |
|
|
#include <asm/fixmap.h>
|
22 |
|
|
#include <asm/types.h>
|
23 |
|
|
|
24 |
|
|
/*
|
25 |
|
|
* This flag is used to indicate that the page pointed to by a pte
|
26 |
|
|
* is dirty and requires cleaning before returning it to the user.
|
27 |
|
|
*/
|
28 |
|
|
#define PG_dcache_dirty PG_arch_1
|
29 |
|
|
|
30 |
|
|
#define Page_dcache_dirty(page) \
|
31 |
|
|
test_bit(PG_dcache_dirty, &(page)->flags)
|
32 |
|
|
#define SetPageDcacheDirty(page) \
|
33 |
|
|
set_bit(PG_dcache_dirty, &(page)->flags)
|
34 |
|
|
#define ClearPageDcacheDirty(page) \
|
35 |
|
|
clear_bit(PG_dcache_dirty, &(page)->flags)
|
36 |
|
|
|
37 |
|
|
/*
|
38 |
|
|
* - add_wired_entry() add a fixed TLB entry, and move wired register
|
39 |
|
|
*/
|
40 |
|
|
extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
41 |
|
|
unsigned long entryhi, unsigned long pagemask);
|
42 |
|
|
|
43 |
|
|
/*
|
44 |
|
|
* - add_temporary_entry() add a temporary TLB entry. We use TLB entries
|
45 |
|
|
* starting at the top and working down. This is for populating the
|
46 |
|
|
* TLB before trap_init() puts the TLB miss handler in place. It
|
47 |
|
|
* should be used only for entries matching the actual page tables,
|
48 |
|
|
* to prevent inconsistencies.
|
49 |
|
|
*/
|
50 |
|
|
extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
|
51 |
|
|
unsigned long entryhi, unsigned long pagemask);
|
52 |
|
|
|
53 |
|
|
|
54 |
|
|
/* Basically we have the same two-level (which is the logical three level
|
55 |
|
|
* Linux page table layout folded) page tables as the i386. Some day
|
56 |
|
|
* when we have proper page coloring support we can have a 1% quicker
|
57 |
|
|
* tlb refill handling mechanism, but for now it is a bit slower but
|
58 |
|
|
* works even with the cache aliasing problem the R4k and above have.
|
59 |
|
|
*/
|
60 |
|
|
|
61 |
|
|
/* PMD_SHIFT determines the size of the area a second-level page table can map */
|
62 |
|
|
|
63 |
|
|
#ifdef CONFIG_64BIT_PHYS_ADDR
|
64 |
|
|
extern int remap_page_range_high(unsigned long from, phys_t to, unsigned long size, pgprot_t prot);
|
65 |
|
|
#endif
|
66 |
|
|
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
|
67 |
|
|
|
68 |
|
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
69 |
|
|
#define PMD_MASK (~(PMD_SIZE-1))
|
70 |
|
|
|
71 |
|
|
/* PGDIR_SHIFT determines what a third-level page table entry can map */
|
72 |
|
|
#define PGDIR_SHIFT PMD_SHIFT
|
73 |
|
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
74 |
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
75 |
|
|
|
76 |
|
|
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
|
77 |
|
|
#define FIRST_USER_PGD_NR 0
|
78 |
|
|
|
79 |
|
|
#define VMALLOC_START KSEG2
|
80 |
|
|
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
|
81 |
|
|
|
82 |
|
|
#if CONFIG_HIGHMEM
|
83 |
|
|
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
|
84 |
|
|
#else
|
85 |
|
|
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
|
86 |
|
|
#endif
|
87 |
|
|
|
88 |
|
|
#include <asm/pgtable-bits.h>
|
89 |
|
|
|
90 |
|
|
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
|
91 |
|
|
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
|
92 |
|
|
PAGE_CACHABLE_DEFAULT)
|
93 |
|
|
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
|
94 |
|
|
PAGE_CACHABLE_DEFAULT)
|
95 |
|
|
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
|
96 |
|
|
PAGE_CACHABLE_DEFAULT)
|
97 |
|
|
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
|
98 |
|
|
_PAGE_GLOBAL | PAGE_CACHABLE_DEFAULT)
|
99 |
|
|
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
|
100 |
|
|
PAGE_CACHABLE_DEFAULT)
|
101 |
|
|
#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
|
102 |
|
|
__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
|
103 |
|
|
|
104 |
|
|
/*
|
105 |
|
|
* MIPS can't do page protection for execute, and considers that the same like
|
106 |
|
|
* read. Also, write permissions imply read permissions. This is the closest
|
107 |
|
|
* we can get by reasonable means..
|
108 |
|
|
*/
|
109 |
|
|
#define __P000 PAGE_NONE
|
110 |
|
|
#define __P001 PAGE_READONLY
|
111 |
|
|
#define __P010 PAGE_COPY
|
112 |
|
|
#define __P011 PAGE_COPY
|
113 |
|
|
#define __P100 PAGE_READONLY
|
114 |
|
|
#define __P101 PAGE_READONLY
|
115 |
|
|
#define __P110 PAGE_COPY
|
116 |
|
|
#define __P111 PAGE_COPY
|
117 |
|
|
|
118 |
|
|
#define __S000 PAGE_NONE
|
119 |
|
|
#define __S001 PAGE_READONLY
|
120 |
|
|
#define __S010 PAGE_SHARED
|
121 |
|
|
#define __S011 PAGE_SHARED
|
122 |
|
|
#define __S100 PAGE_READONLY
|
123 |
|
|
#define __S101 PAGE_READONLY
|
124 |
|
|
#define __S110 PAGE_SHARED
|
125 |
|
|
#define __S111 PAGE_SHARED
|
126 |
|
|
|
127 |
|
|
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
|
128 |
|
|
#include <asm/pgtable-64.h>
|
129 |
|
|
#else
|
130 |
|
|
#include <asm/pgtable-32.h>
|
131 |
|
|
#endif
|
132 |
|
|
|
133 |
|
|
#if !defined (_LANGUAGE_ASSEMBLY)
|
134 |
|
|
|
135 |
|
|
extern unsigned long empty_zero_page;
|
136 |
|
|
extern unsigned long zero_page_mask;
|
137 |
|
|
|
138 |
|
|
#define ZERO_PAGE(vaddr) \
|
139 |
|
|
(virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
|
140 |
|
|
|
141 |
|
|
extern void load_pgd(unsigned long pg_dir);
|
142 |
|
|
|
143 |
|
|
extern pmd_t invalid_pte_table[PAGE_SIZE/sizeof(pmd_t)];
|
144 |
|
|
|
145 |
|
|
/*
|
146 |
|
|
* Conversion functions: convert a page and protection to a page entry,
|
147 |
|
|
* and a page entry and page directory to the page they refer to.
|
148 |
|
|
*/
|
149 |
|
|
static inline unsigned long pmd_page(pmd_t pmd)
|
150 |
|
|
{
|
151 |
|
|
return pmd_val(pmd);
|
152 |
|
|
}
|
153 |
|
|
|
154 |
|
|
static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
|
155 |
|
|
{
|
156 |
|
|
pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK);
|
157 |
|
|
}
|
158 |
|
|
|
159 |
|
|
static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
|
160 |
|
|
|
161 |
|
|
/*
|
162 |
|
|
* (pmds are folded into pgds so this doesn't get actually called,
|
163 |
|
|
* but the define is needed for a generic inline function.)
|
164 |
|
|
*/
|
165 |
|
|
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
|
166 |
|
|
#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
|
167 |
|
|
|
168 |
|
|
/*
|
169 |
|
|
* Empty pgd/pmd entries point to the invalid_pte_table.
|
170 |
|
|
*/
|
171 |
|
|
static inline int pmd_none(pmd_t pmd)
|
172 |
|
|
{
|
173 |
|
|
return pmd_val(pmd) == (unsigned long) invalid_pte_table;
|
174 |
|
|
}
|
175 |
|
|
|
176 |
|
|
static inline int pmd_bad(pmd_t pmd)
|
177 |
|
|
{
|
178 |
|
|
return ((pmd_page(pmd) > (unsigned long) high_memory) ||
|
179 |
|
|
(pmd_page(pmd) < PAGE_OFFSET));
|
180 |
|
|
}
|
181 |
|
|
|
182 |
|
|
static inline int pmd_present(pmd_t pmd)
|
183 |
|
|
{
|
184 |
|
|
return (pmd_val(pmd) != (unsigned long) invalid_pte_table);
|
185 |
|
|
}
|
186 |
|
|
|
187 |
|
|
static inline void pmd_clear(pmd_t *pmdp)
|
188 |
|
|
{
|
189 |
|
|
pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
|
190 |
|
|
}
|
191 |
|
|
|
192 |
|
|
/*
|
193 |
|
|
* The "pgd_xxx()" functions here are trivial for a folded two-level
|
194 |
|
|
* setup: the pgd is never bad, and a pmd always exists (as it's folded
|
195 |
|
|
* into the pgd entry)
|
196 |
|
|
*/
|
197 |
|
|
static inline int pgd_none(pgd_t pgd) { return 0; }
|
198 |
|
|
static inline int pgd_bad(pgd_t pgd) { return 0; }
|
199 |
|
|
static inline int pgd_present(pgd_t pgd) { return 1; }
|
200 |
|
|
static inline void pgd_clear(pgd_t *pgdp) { }
|
201 |
|
|
|
202 |
|
|
/*
|
203 |
|
|
* The following only work if pte_present() is true.
|
204 |
|
|
* Undefined behaviour if not..
|
205 |
|
|
*/
|
206 |
|
|
|
207 |
|
|
static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_READ; }
|
208 |
|
|
static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_WRITE; }
|
209 |
|
|
static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_MODIFIED; }
|
210 |
|
|
static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
|
211 |
|
|
|
212 |
|
|
/*
|
213 |
|
|
* (pmds are folded into pgds so this doesnt get actually called,
|
214 |
|
|
* but the define is needed for a generic inline function.)
|
215 |
|
|
*/
|
216 |
|
|
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
|
217 |
|
|
#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
|
218 |
|
|
|
219 |
|
|
#define PGD_T_LOG2 ffz(~sizeof(pgd_t))
|
220 |
|
|
#define PMD_T_LOG2 ffz(~sizeof(pmd_t))
|
221 |
|
|
#define PTE_T_LOG2 ffz(~sizeof(pte_t))
|
222 |
|
|
|
223 |
|
|
#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
|
224 |
|
|
#define PTRS_PER_PMD 1
|
225 |
|
|
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
|
226 |
|
|
|
227 |
|
|
#define page_pte(page) page_pte_prot(page, __pgprot(0))
|
228 |
|
|
|
229 |
|
|
#define __pgd_offset(address) pgd_index(address)
|
230 |
|
|
#define __pmd_offset(address) \
|
231 |
|
|
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
|
232 |
|
|
|
233 |
|
|
/* to find an entry in a kernel page-table-directory */
|
234 |
|
|
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
235 |
|
|
|
236 |
|
|
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
|
237 |
|
|
|
238 |
|
|
/* to find an entry in a page-table-directory */
|
239 |
|
|
static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
|
240 |
|
|
{
|
241 |
|
|
return mm->pgd + pgd_index(address);
|
242 |
|
|
}
|
243 |
|
|
|
244 |
|
|
/* Find an entry in the second-level page table.. */
|
245 |
|
|
static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
|
246 |
|
|
{
|
247 |
|
|
return (pmd_t *) dir;
|
248 |
|
|
}
|
249 |
|
|
|
250 |
|
|
/* Find an entry in the third-level page table.. */
|
251 |
|
|
static inline pte_t *pte_offset(pmd_t * dir, unsigned long address)
|
252 |
|
|
{
|
253 |
|
|
return (pte_t *) (pmd_page(*dir)) +
|
254 |
|
|
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
|
255 |
|
|
}
|
256 |
|
|
|
257 |
|
|
extern int do_check_pgt_cache(int, int);
|
258 |
|
|
|
259 |
|
|
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
260 |
|
|
extern void paging_init(void);
|
261 |
|
|
|
262 |
|
|
extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
|
263 |
|
|
pte_t pte);
|
264 |
|
|
extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
|
265 |
|
|
pte_t pte);
|
266 |
|
|
|
267 |
|
|
static inline void update_mmu_cache(struct vm_area_struct *vma,
|
268 |
|
|
unsigned long address, pte_t pte)
|
269 |
|
|
{
|
270 |
|
|
__update_tlb(vma, address, pte);
|
271 |
|
|
__update_cache(vma, address, pte);
|
272 |
|
|
}
|
273 |
|
|
|
274 |
|
|
/* Swap entries must have VALID and GLOBAL bits cleared. */
|
275 |
|
|
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
|
276 |
|
|
|
277 |
|
|
#define SWP_TYPE(x) (((x).val >> 1) & 0x7f)
|
278 |
|
|
#define SWP_OFFSET(x) ((x).val >> 10)
|
279 |
|
|
#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 10) })
|
280 |
|
|
#else
|
281 |
|
|
|
282 |
|
|
#define SWP_TYPE(x) (((x).val >> 1) & 0x1f)
|
283 |
|
|
#define SWP_OFFSET(x) ((x).val >> 8)
|
284 |
|
|
#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
|
285 |
|
|
#endif
|
286 |
|
|
|
287 |
|
|
#define pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
|
288 |
|
|
#define swp_entry_to_pte(x) ((pte_t) { (x).val })
|
289 |
|
|
|
290 |
|
|
|
291 |
|
|
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
292 |
|
|
#define PageSkip(page) (0)
|
293 |
|
|
#define kern_addr_valid(addr) (1)
|
294 |
|
|
|
295 |
|
|
#include <asm-generic/pgtable.h>
|
296 |
|
|
|
297 |
|
|
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
|
298 |
|
|
|
299 |
|
|
/*
|
300 |
|
|
* We provide our own get_unmapped area to cope with the virtual aliasing
|
301 |
|
|
* constraints placed on us by the cache architecture.
|
302 |
|
|
*/
|
303 |
|
|
#define HAVE_ARCH_UNMAPPED_AREA
|
304 |
|
|
|
305 |
|
|
#ifdef CONFIG_64BIT_PHYS_ADDR
|
306 |
|
|
#define io_remap_page_range remap_page_range_high
|
307 |
|
|
#else
|
308 |
|
|
#define io_remap_page_range remap_page_range
|
309 |
|
|
#endif
|
310 |
|
|
|
311 |
|
|
/*
|
312 |
|
|
* No page table caches to initialise
|
313 |
|
|
*/
|
314 |
|
|
#define pgtable_cache_init() do { } while (0)
|
315 |
|
|
|
316 |
|
|
#endif /* _ASM_PGTABLE_H */
|