1 |
1276 |
phoenix |
#ifndef __ASM_SH64_PGTABLE_H
|
2 |
|
|
#define __ASM_SH64_PGTABLE_H
|
3 |
|
|
|
4 |
|
|
/*
|
5 |
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
6 |
|
|
* License. See the file "COPYING" in the main directory of this archive
|
7 |
|
|
* for more details.
|
8 |
|
|
*
|
9 |
|
|
* include/asm-sh64/pgtable.h
|
10 |
|
|
*
|
11 |
|
|
* Copyright (C) 2000, 2001 Paolo Alberelli
|
12 |
|
|
* Copyright (C) 2003 Paul Mundt
|
13 |
|
|
* Copyright (C) 2003 Richard Curnow
|
14 |
|
|
*
|
15 |
|
|
* This file contains the functions and defines necessary to modify and use
|
16 |
|
|
* the SuperH page table tree.
|
17 |
|
|
*/
|
18 |
|
|
|
19 |
|
|
#ifndef __ASSEMBLY__
|
20 |
|
|
#include <asm/processor.h>
|
21 |
|
|
#include <asm/page.h>
|
22 |
|
|
#include <linux/threads.h>
|
23 |
|
|
#include <linux/config.h>
|
24 |
|
|
|
25 |
|
|
extern void paging_init(void);
|
26 |
|
|
|
27 |
|
|
extern void flush_cache_all(void);
|
28 |
|
|
extern void flush_cache_mm(struct mm_struct *mm);
|
29 |
|
|
extern void flush_cache_range(struct mm_struct *mm, unsigned long start,
|
30 |
|
|
unsigned long end);
|
31 |
|
|
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr);
|
32 |
|
|
|
33 |
|
|
extern void flush_page_to_ram(struct page *page);
|
34 |
|
|
|
35 |
|
|
extern void flush_icache_range(unsigned long start, unsigned long end);
|
36 |
|
|
extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
|
37 |
|
|
|
38 |
|
|
extern void flush_dcache_page(struct page *pg);
|
39 |
|
|
|
40 |
|
|
extern void flush_cache_sigtramp(unsigned long start, unsigned long end);
|
41 |
|
|
|
42 |
|
|
#ifdef CONFIG_DCACHE_DISABLED
|
43 |
|
|
|
44 |
|
|
#define sh64_dcache_purge_sets(base,sets) do { } while (0)
|
45 |
|
|
#define sh64_dcache_purge_all() do { } while (0)
|
46 |
|
|
#define sh64_dcache_purge_kernel_range(start,end) do { } while (0)
|
47 |
|
|
#define sh64_dcache_purge_coloured_phy_page(addr,eaddr) do { } while (0)
|
48 |
|
|
#define sh64_dcache_purge_phy_page(pg) do { } while (0)
|
49 |
|
|
#define sh64_dcache_purge_virt_page(mm,eaddr) do { } while (0)
|
50 |
|
|
#define sh64_dcache_purge_user_page(mm,eaddr) do { } while (0)
|
51 |
|
|
#define sh64_dcache_purge_user_range(mm,start,end) do { } while (0)
|
52 |
|
|
#define sh64_dcache_wback_current_user_range(start,end) do { } while (0)
|
53 |
|
|
|
54 |
|
|
#define copy_user_page(to, from, addr) memcpy(to, from, PAGE_SIZE)
|
55 |
|
|
#define clear_user_page(to, addr) memset(to, 0, PAGE_SIZE)
|
56 |
|
|
|
57 |
|
|
#endif /* CONFIG_DCACHE_DISABLED */
|
58 |
|
|
|
59 |
|
|
#ifdef CONFIG_ICACHE_DISABLED
|
60 |
|
|
|
61 |
|
|
#define sh64_icache_inv_all() do { } while (0)
|
62 |
|
|
#define sh64_icache_inv_kernel_range(start,end) do { } while (0)
|
63 |
|
|
#define sh64_icache_inv_user_page(vma,eaddr) do { } while (0)
|
64 |
|
|
#define sh64_icache_inv_user_page_range(mm,start,end) do { } while (0)
|
65 |
|
|
#define sh64_icache_inv_user_small_range(mm,start,len) do { } while (0)
|
66 |
|
|
#define sh64_icache_inv_current_user_range(start,end) do { } while (0)
|
67 |
|
|
|
68 |
|
|
#endif /* CONFIG_ICACHE_DISABLED */
|
69 |
|
|
|
70 |
|
|
/* We provide our own get_unmapped_area to avoid cache synonym issue */
|
71 |
|
|
#define HAVE_ARCH_UNMAPPED_AREA
|
72 |
|
|
|
73 |
|
|
/*
|
74 |
|
|
* Basically we have the same two-level (which is the logical three level
|
75 |
|
|
* Linux page table layout folded) page tables as the i386.
|
76 |
|
|
*/
|
77 |
|
|
|
78 |
|
|
/*
|
79 |
|
|
* ZERO_PAGE is a global shared page that is always zero: used
|
80 |
|
|
* for zero-mapped memory areas etc..
|
81 |
|
|
*/
|
82 |
|
|
extern unsigned char empty_zero_page[PAGE_SIZE];
|
83 |
|
|
#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
|
84 |
|
|
|
85 |
|
|
#endif /* !__ASSEMBLY__ */
|
86 |
|
|
|
87 |
|
|
/*
|
88 |
|
|
* NEFF and NPHYS related defines.
|
89 |
|
|
* FIXME : These need to be model-dependent. For now this is OK, SH5-101 and SH5-103
|
90 |
|
|
* implement 32 bits effective and 32 bits physical. But future implementations may
|
91 |
|
|
* extend beyond this.
|
92 |
|
|
*/
|
93 |
|
|
#define NEFF 32
|
94 |
|
|
#define NEFF_SIGN (1LL << (NEFF - 1))
|
95 |
|
|
#define NEFF_MASK (-1LL << NEFF)
|
96 |
|
|
|
97 |
|
|
#define NPHYS 32
|
98 |
|
|
#define NPHYS_SIGN (1LL << (NPHYS - 1))
|
99 |
|
|
#define NPHYS_MASK (-1LL << NPHYS)
|
100 |
|
|
|
101 |
|
|
/* Typically 2-level is sufficient up to 32 bits of virtual address space, beyond
|
102 |
|
|
that 3-level would be appropriate. */
|
103 |
|
|
#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
|
104 |
|
|
/* For 4k pages, this contains 512 entries, i.e. 9 bits worth of address. */
|
105 |
|
|
#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long))
|
106 |
|
|
#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */
|
107 |
|
|
#define PTE_SHIFT PAGE_SHIFT
|
108 |
|
|
#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE)
|
109 |
|
|
|
110 |
|
|
/* top level: PMD. */
|
111 |
|
|
#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
|
112 |
|
|
#define PGD_BITS (NEFF - PGDIR_SHIFT)
|
113 |
|
|
#define PTRS_PER_PGD (1<<PGD_BITS)
|
114 |
|
|
|
115 |
|
|
/* middle level: PMD. This doesn't do anything for the 2-level case. */
|
116 |
|
|
#define PTRS_PER_PMD (1)
|
117 |
|
|
|
118 |
|
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
119 |
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
120 |
|
|
#define PMD_SHIFT PGDIR_SHIFT
|
121 |
|
|
#define PMD_SIZE PGDIR_SIZE
|
122 |
|
|
#define PMD_MASK PGDIR_MASK
|
123 |
|
|
|
124 |
|
|
#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
|
125 |
|
|
/*
|
126 |
|
|
* three-level asymmetric paging structure: PGD is top level.
|
127 |
|
|
* The asymmetry comes from 32-bit pointers and 64-bit PTEs.
|
128 |
|
|
*/
|
129 |
|
|
/* bottom level: PTE. It's 9 bits = 512 pointers */
|
130 |
|
|
#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long))
|
131 |
|
|
#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */
|
132 |
|
|
#define PTE_SHIFT PAGE_SHIFT
|
133 |
|
|
#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE)
|
134 |
|
|
|
135 |
|
|
/* middle level: PMD. It's 10 bits = 1024 pointers */
|
136 |
|
|
#define PTRS_PER_PMD ((1<<PAGE_SHIFT)/sizeof(unsigned long long *))
|
137 |
|
|
#define PMD_MAGNITUDE 2 /* sizeof(unsigned long long *) magnit. */
|
138 |
|
|
#define PMD_SHIFT (PTE_SHIFT + PTE_BITS)
|
139 |
|
|
#define PMD_BITS (PAGE_SHIFT - PMD_MAGNITUDE)
|
140 |
|
|
|
141 |
|
|
/* top level: PMD. It's 1 bit = 2 pointers */
|
142 |
|
|
#define PGDIR_SHIFT (PMD_SHIFT + PMD_BITS)
|
143 |
|
|
#define PGD_BITS (NEFF - PGDIR_SHIFT)
|
144 |
|
|
#define PTRS_PER_PGD (1<<PGD_BITS)
|
145 |
|
|
|
146 |
|
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
147 |
|
|
#define PMD_MASK (~(PMD_SIZE-1))
|
148 |
|
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
149 |
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
150 |
|
|
|
151 |
|
|
#else
|
152 |
|
|
#error "No defined number of page table levels"
|
153 |
|
|
#endif
|
154 |
|
|
|
155 |
|
|
/*
|
156 |
|
|
* Error outputs.
|
157 |
|
|
*/
|
158 |
|
|
#define pte_ERROR(e) \
|
159 |
|
|
printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
|
160 |
|
|
#define pmd_ERROR(e) \
|
161 |
|
|
printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
|
162 |
|
|
#define pgd_ERROR(e) \
|
163 |
|
|
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
|
164 |
|
|
|
165 |
|
|
/*
|
166 |
|
|
* Table setting routines. Used within arch/mm only.
|
167 |
|
|
*/
|
168 |
|
|
#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
|
169 |
|
|
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
|
170 |
|
|
|
171 |
|
|
static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
|
172 |
|
|
{
|
173 |
|
|
unsigned long long x = ((unsigned long long) pteval.pte);
|
174 |
|
|
unsigned long long *xp = (unsigned long long *) pteptr;
|
175 |
|
|
/*
|
176 |
|
|
* Sign-extend based on NPHYS.
|
177 |
|
|
*/
|
178 |
|
|
*(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
|
179 |
|
|
}
|
180 |
|
|
|
181 |
|
|
static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
|
182 |
|
|
{
|
183 |
|
|
pmd_val(*pmdp) = (unsigned long) ptep;
|
184 |
|
|
}
|
185 |
|
|
|
186 |
|
|
/*
|
187 |
|
|
* PGD defines. Top level.
|
188 |
|
|
*/
|
189 |
|
|
|
190 |
|
|
/* To find an entry in a generic PGD. */
|
191 |
|
|
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
|
192 |
|
|
#define __pgd_offset(address) pgd_index(address)
|
193 |
|
|
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
|
194 |
|
|
|
195 |
|
|
/* To find an entry in a kernel PGD. */
|
196 |
|
|
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
197 |
|
|
|
198 |
|
|
/*
|
199 |
|
|
* PGD level access routines.
|
200 |
|
|
*
|
201 |
|
|
* Note1:
|
202 |
|
|
* There's no need to use physical addresses since the tree walk is all
|
203 |
|
|
* in performed in software, until the PTE translation.
|
204 |
|
|
*
|
205 |
|
|
* Note 2:
|
206 |
|
|
* A PGD entry can be uninitialized (_PGD_UNUSED), generically bad,
|
207 |
|
|
* clear (_PGD_EMPTY), present. When present, lower 3 nibbles contain
|
208 |
|
|
* _KERNPG_TABLE. Being a kernel virtual pointer also bit 31 must
|
209 |
|
|
* be 1. Assuming an arbitrary clear value of bit 31 set to 0 and
|
210 |
|
|
* lower 3 nibbles set to 0xFFF (_PGD_EMPTY) any other value is a
|
211 |
|
|
* bad pgd that must be notified via printk().
|
212 |
|
|
*
|
213 |
|
|
*/
|
214 |
|
|
#define _PGD_EMPTY 0x0
|
215 |
|
|
|
216 |
|
|
#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
|
217 |
|
|
static inline int pgd_none(pgd_t pgd) { return 0; }
|
218 |
|
|
static inline int pgd_bad(pgd_t pgd) { return 0; }
|
219 |
|
|
static inline int pgd_present(pgd_t pgd) { return 1; }
|
220 |
|
|
#define pgd_clear(xx) do { } while(0)
|
221 |
|
|
|
222 |
|
|
#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
|
223 |
|
|
#define pgd_present(pgd_entry) (1)
|
224 |
|
|
#define pgd_none(pgd_entry) (pgd_val((pgd_entry)) == _PGD_EMPTY)
|
225 |
|
|
/* TODO: Think later about what a useful definition of 'bad' would be now. */
|
226 |
|
|
#define pgd_bad(pgd_entry) (0)
|
227 |
|
|
#define pgd_clear(pgd_entry_p) (set_pgd((pgd_entry_p), __pgd(_PGD_EMPTY)))
|
228 |
|
|
|
229 |
|
|
#endif
|
230 |
|
|
|
231 |
|
|
|
232 |
|
|
#define pgd_page(pgd_entry) ((unsigned long) (pgd_val(pgd_entry) & PAGE_MASK))
|
233 |
|
|
|
234 |
|
|
/*
|
235 |
|
|
* PMD defines. Middle level.
|
236 |
|
|
*/
|
237 |
|
|
|
238 |
|
|
/* PGD to PMD dereferencing */
|
239 |
|
|
#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
|
240 |
|
|
static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
|
241 |
|
|
{
|
242 |
|
|
return (pmd_t *) dir;
|
243 |
|
|
}
|
244 |
|
|
#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
|
245 |
|
|
#define __pmd_offset(address) \
|
246 |
|
|
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
|
247 |
|
|
#define pmd_offset(dir, addr) \
|
248 |
|
|
((pmd_t *) ((pgd_val(*(dir))) & PAGE_MASK) + __pmd_offset((addr)))
|
249 |
|
|
#endif
|
250 |
|
|
|
251 |
|
|
/*
|
252 |
|
|
* PMD level access routines. Same notes as above.
|
253 |
|
|
*/
|
254 |
|
|
#define _PMD_EMPTY 0x0
|
255 |
|
|
/* Either the PMD is empty or present, it's not paged out */
|
256 |
|
|
#define pmd_present(pmd_entry) (1)
|
257 |
|
|
#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
|
258 |
|
|
#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
|
259 |
|
|
/* TODO: Think later about what a useful definition of 'bad' would be now. */
|
260 |
|
|
#define pmd_bad(pmd_entry) (0)
|
261 |
|
|
#define pmd_page(pmd_entry) ((unsigned long) (pmd_val(pmd_entry) & PAGE_MASK))
|
262 |
|
|
|
263 |
|
|
/* PMD to PTE dereferencing */
|
264 |
|
|
#define __pte_offset(address) \
|
265 |
|
|
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
266 |
|
|
|
267 |
|
|
#define pte_offset(dir, addr) \
|
268 |
|
|
((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + __pte_offset((addr)))
|
269 |
|
|
|
270 |
|
|
/* Round it up ! */
|
271 |
|
|
#define USER_PTRS_PER_PGD ((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE)
|
272 |
|
|
#define FIRST_USER_PGD_NR 0
|
273 |
|
|
|
274 |
|
|
#ifndef __ASSEMBLY__
|
275 |
|
|
#define VMALLOC_END 0xff000000
|
276 |
|
|
#define VMALLOC_START 0xf0000000
|
277 |
|
|
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
|
278 |
|
|
|
279 |
|
|
#define IOBASE_VADDR 0xff000000
|
280 |
|
|
#define IOBASE_END 0xffffffff
|
281 |
|
|
|
282 |
|
|
/*
|
283 |
|
|
* PTEL coherent flags.
|
284 |
|
|
* See Chapter 17 ST50 CPU Core Volume 1, Architecture.
|
285 |
|
|
*/
|
286 |
|
|
/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
|
287 |
|
|
positions, to avoid expensive bit shuffling on every refill. The remaining
|
288 |
|
|
bits are used for s/w purposes and masked out on each refill.
|
289 |
|
|
|
290 |
|
|
Note, the PTE slots are used to hold data of type swp_entry_t when a page is
|
291 |
|
|
swapped out. Only the _PAGE_PRESENT flag is significant when the page is
|
292 |
|
|
swapped out, and it must be placed so that it doesn't overlap either the
|
293 |
|
|
type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
|
294 |
|
|
at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
|
295 |
|
|
scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
|
296 |
|
|
[2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
|
297 |
|
|
into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
|
298 |
|
|
#define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
|
299 |
|
|
#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
|
300 |
|
|
#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
|
301 |
|
|
#define _PAGE_PRESENT 0x004 /* software: if allocated */
|
302 |
|
|
#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
|
303 |
|
|
#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
|
304 |
|
|
#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
|
305 |
|
|
#define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
|
306 |
|
|
#define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
|
307 |
|
|
#define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
|
308 |
|
|
#define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
|
309 |
|
|
#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
|
310 |
|
|
#define _PAGE_ACCESSED 0x800 /* software: page referenced */
|
311 |
|
|
|
312 |
|
|
/* Mask which drops software flags */
|
313 |
|
|
#define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL
|
314 |
|
|
/* Flags default: 4KB, Read, Not write, Not execute, Not user */
|
315 |
|
|
#define _PAGE_FLAGS_HARDWARE_DEFAULT 0x0000000000000040LL
|
316 |
|
|
|
317 |
|
|
/*
|
318 |
|
|
* Default flags for a Kernel page.
|
319 |
|
|
* This is fundametally also SHARED because the main use of this define
|
320 |
|
|
* (other than for PGD/PMD entries) is for the VMALLOC pool which is
|
321 |
|
|
* contextless.
|
322 |
|
|
*
|
323 |
|
|
* _PAGE_EXECUTE is required for modules
|
324 |
|
|
*
|
325 |
|
|
*/
|
326 |
|
|
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
|
327 |
|
|
_PAGE_EXECUTE | \
|
328 |
|
|
_PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
329 |
|
|
_PAGE_SHARED)
|
330 |
|
|
|
331 |
|
|
/* Default flags for a User page */
|
332 |
|
|
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
|
333 |
|
|
|
334 |
|
|
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
|
335 |
|
|
|
336 |
|
|
#define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
|
337 |
|
|
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
|
338 |
|
|
_PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_USER | \
|
339 |
|
|
_PAGE_SHARED)
|
340 |
|
|
/* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
|
341 |
|
|
* protection mode for the stack. */
|
342 |
|
|
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \
|
343 |
|
|
_PAGE_ACCESSED | _PAGE_USER | _PAGE_EXECUTE)
|
344 |
|
|
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \
|
345 |
|
|
_PAGE_ACCESSED | _PAGE_USER)
|
346 |
|
|
#define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
|
347 |
|
|
|
348 |
|
|
|
349 |
|
|
/*
|
350 |
|
|
* In ST50 we have full permissions (Read/Write/Execute/Shared).
|
351 |
|
|
* Just match'em all. These are for mmap(), therefore all at least
|
352 |
|
|
* User/Cachable/Present/Accessed. No point in making Fault on Write.
|
353 |
|
|
*/
|
354 |
|
|
#define __MMAP_COMMON (_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED)
|
355 |
|
|
/* sxwr */
|
356 |
|
|
#define __P000 __pgprot(__MMAP_COMMON)
|
357 |
|
|
#define __P001 __pgprot(__MMAP_COMMON | _PAGE_READ)
|
358 |
|
|
#define __P010 __pgprot(__MMAP_COMMON)
|
359 |
|
|
#define __P011 __pgprot(__MMAP_COMMON | _PAGE_READ)
|
360 |
|
|
#define __P100 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE)
|
361 |
|
|
#define __P101 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ)
|
362 |
|
|
#define __P110 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE)
|
363 |
|
|
#define __P111 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ)
|
364 |
|
|
|
365 |
|
|
#define __S000 __pgprot(__MMAP_COMMON | _PAGE_SHARED)
|
366 |
|
|
#define __S001 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ)
|
367 |
|
|
#define __S010 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_WRITE)
|
368 |
|
|
#define __S011 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ | _PAGE_WRITE)
|
369 |
|
|
#define __S100 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE)
|
370 |
|
|
#define __S101 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ)
|
371 |
|
|
#define __S110 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_WRITE)
|
372 |
|
|
#define __S111 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ | _PAGE_WRITE)
|
373 |
|
|
|
374 |
|
|
/* Make it a device mapping for maximum safety (e.g. for mapping device
|
375 |
|
|
registers into user-space via /dev/map). */
|
376 |
|
|
#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
|
377 |
|
|
|
378 |
|
|
/*
|
379 |
|
|
* Handling allocation failures during page table setup.
|
380 |
|
|
*/
|
381 |
|
|
extern void __handle_bad_pmd_kernel(pmd_t * pmd);
|
382 |
|
|
#define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x)
|
383 |
|
|
|
384 |
|
|
/*
|
385 |
|
|
* PTE level access routines.
|
386 |
|
|
*
|
387 |
|
|
* Note1:
|
388 |
|
|
* It's the tree walk leaf. This is physical address to be stored.
|
389 |
|
|
*
|
390 |
|
|
* Note 2:
|
391 |
|
|
* Regarding the choice of _PTE_EMPTY:
|
392 |
|
|
|
393 |
|
|
We must choose a bit pattern that cannot be valid, whether or not the page
|
394 |
|
|
is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
|
395 |
|
|
out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
|
396 |
|
|
left for us to select. If we force bit[7]==0 when swapped out, we could use
|
397 |
|
|
the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
|
398 |
|
|
we force bit[7]==1 when swapped out, we can use all zeroes to indicate
|
399 |
|
|
empty. This is convenient, because the page tables get cleared to zero
|
400 |
|
|
when they are allocated.
|
401 |
|
|
|
402 |
|
|
*/
|
403 |
|
|
#define _PTE_EMPTY 0x0
|
404 |
|
|
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
|
405 |
|
|
#define pte_clear(xp) (set_pte(xp, __pte(_PTE_EMPTY)))
|
406 |
|
|
#define pte_none(x) (pte_val(x) == _PTE_EMPTY)
|
407 |
|
|
|
408 |
|
|
/*
|
409 |
|
|
* Some definitions to translate between mem_map, PTEs, and page
|
410 |
|
|
* addresses:
|
411 |
|
|
*/
|
412 |
|
|
|
413 |
|
|
/*
|
414 |
|
|
* Given a PTE, return the index of the mem_map[] entry corresponding
|
415 |
|
|
* to the page frame the PTE. Get the absolute physical address, make
|
416 |
|
|
* a relative physical address and translate it to an index.
|
417 |
|
|
*/
|
418 |
|
|
#define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
|
419 |
|
|
__MEMORY_START) >> PAGE_SHIFT)
|
420 |
|
|
|
421 |
|
|
/*
|
422 |
|
|
* Given a PTE, return the "struct page *".
|
423 |
|
|
*/
|
424 |
|
|
#define pte_page(x) (mem_map + pte_pagenr(x))
|
425 |
|
|
|
426 |
|
|
/*
|
427 |
|
|
* Return number of (down rounded) MB corresponding to x pages.
|
428 |
|
|
*/
|
429 |
|
|
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
|
430 |
|
|
|
431 |
|
|
|
432 |
|
|
/*
|
433 |
|
|
* The following have defined behavior only work if pte_present() is true.
|
434 |
|
|
*/
|
435 |
|
|
extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }
|
436 |
|
|
extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXECUTE; }
|
437 |
|
|
extern inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
|
438 |
|
|
extern inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
|
439 |
|
|
extern inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; }
|
440 |
|
|
|
441 |
|
|
extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; }
|
442 |
|
|
extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
|
443 |
|
|
extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; }
|
444 |
|
|
extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
|
445 |
|
|
extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
|
446 |
|
|
|
447 |
|
|
extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; }
|
448 |
|
|
extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
|
449 |
|
|
extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; }
|
450 |
|
|
extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
|
451 |
|
|
extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
|
452 |
|
|
|
453 |
|
|
/*
|
454 |
|
|
* Conversion functions: convert a page and protection to a page entry.
|
455 |
|
|
*
|
456 |
|
|
* extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
|
457 |
|
|
*/
|
458 |
|
|
#define mk_pte(page,pgprot) \
|
459 |
|
|
({ \
|
460 |
|
|
pte_t __pte; \
|
461 |
|
|
\
|
462 |
|
|
set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
|
463 |
|
|
__MEMORY_START | pgprot_val((pgprot)))); \
|
464 |
|
|
__pte; \
|
465 |
|
|
})
|
466 |
|
|
|
467 |
|
|
/*
|
468 |
|
|
* This takes a (absolute) physical page address that is used
|
469 |
|
|
* by the remapping functions
|
470 |
|
|
*/
|
471 |
|
|
#define mk_pte_phys(physpage, pgprot) \
|
472 |
|
|
({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
|
473 |
|
|
|
474 |
|
|
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
475 |
|
|
{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
|
476 |
|
|
|
477 |
|
|
#define page_pte_prot(page, prot) mk_pte(page, prot)
|
478 |
|
|
#define page_pte(page) page_pte_prot(page, __pgprot(0))
|
479 |
|
|
|
480 |
|
|
#define pte_same(A,B) (pte_val(A) == pte_val(B))
|
481 |
|
|
|
482 |
|
|
|
483 |
|
|
extern void update_mmu_cache(struct vm_area_struct * vma,
|
484 |
|
|
unsigned long address, pte_t pte);
|
485 |
|
|
|
486 |
|
|
|
487 |
|
|
/* Swap-related things */
|
488 |
|
|
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
489 |
|
|
|
490 |
|
|
/* Encode and de-code a swap entry */
|
491 |
|
|
#define SWP_TYPE(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
|
492 |
|
|
#define SWP_OFFSET(x) ((x).val >> 8)
|
493 |
|
|
|
494 |
|
|
/* Avoid bit 2 for type */
|
495 |
|
|
static inline swp_entry_t SWP_ENTRY(unsigned long type, unsigned long offset)
|
496 |
|
|
{
|
497 |
|
|
unsigned long result;
|
498 |
|
|
/* Assert bit[7], to make swapped out page table entries distinct
|
499 |
|
|
from unused/uninitialised ones */
|
500 |
|
|
result = (offset << 8) + 0x80 + ((type & 0x3c) << 1) + (type & 3);
|
501 |
|
|
return (swp_entry_t) {result};
|
502 |
|
|
}
|
503 |
|
|
#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
504 |
|
|
#define swp_entry_to_pte(x) ((pte_t) { (x).val })
|
505 |
|
|
|
506 |
|
|
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
507 |
|
|
#define PageSkip(page) (0)
|
508 |
|
|
#define kern_addr_valid(addr) (1)
|
509 |
|
|
|
510 |
|
|
#define io_remap_page_range remap_page_range
|
511 |
|
|
#endif /* !__ASSEMBLY__ */
|
512 |
|
|
|
513 |
|
|
/*
|
514 |
|
|
* No page table caches to initialise
|
515 |
|
|
*/
|
516 |
|
|
#define pgtable_cache_init() do { } while (0)
|
517 |
|
|
|
518 |
|
|
/* This must be implemented for ptrace() */
|
519 |
|
|
#if 0
|
520 |
|
|
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
|
521 |
|
|
#endif
|
522 |
|
|
|
523 |
|
|
|
524 |
|
|
#endif /* __ASM_SH64_PGTABLE_H */
|