1 |
1275 |
phoenix |
/*
|
2 |
|
|
* include/asm-s390/pgtable.h
|
3 |
|
|
*
|
4 |
|
|
* S390 64bit version
|
5 |
|
|
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
6 |
|
|
* Author(s): Hartmut Penner (hp@de.ibm.com)
|
7 |
|
|
* Ulrich Weigand (weigand@de.ibm.com)
|
8 |
|
|
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
9 |
|
|
*
|
10 |
|
|
* Derived from "include/asm-i386/pgtable.h"
|
11 |
|
|
*/
|
12 |
|
|
|
13 |
|
|
#ifndef _ASM_S390_PGTABLE_H
|
14 |
|
|
#define _ASM_S390_PGTABLE_H
|
15 |
|
|
|
16 |
|
|
/*
|
17 |
|
|
* The Linux memory management assumes a three-level page table setup. On
|
18 |
|
|
* the S390, we use that, but "fold" the mid level into the top-level page
|
19 |
|
|
* table, so that we physically have the same two-level page table as the
|
20 |
|
|
* S390 mmu expects.
|
21 |
|
|
*
|
22 |
|
|
* This file contains the functions and defines necessary to modify and use
|
23 |
|
|
* the S390 page table tree.
|
24 |
|
|
*/
|
25 |
|
|
#ifndef __ASSEMBLY__
|
26 |
|
|
#include <asm/processor.h>
|
27 |
|
|
#include <linux/threads.h>
|
28 |
|
|
|
29 |
|
|
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
|
30 |
|
|
extern void paging_init(void);
|
31 |
|
|
|
32 |
|
|
/* Caches aren't brain-dead on S390. */
|
33 |
|
|
#define flush_cache_all() do { } while (0)
|
34 |
|
|
#define flush_cache_mm(mm) do { } while (0)
|
35 |
|
|
#define flush_cache_range(mm, start, end) do { } while (0)
|
36 |
|
|
#define flush_cache_page(vma, vmaddr) do { } while (0)
|
37 |
|
|
#define flush_page_to_ram(page) do { } while (0)
|
38 |
|
|
#define flush_dcache_page(page) do { } while (0)
|
39 |
|
|
#define flush_icache_range(start, end) do { } while (0)
|
40 |
|
|
#define flush_icache_page(vma,pg) do { } while (0)
|
41 |
|
|
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
|
42 |
|
|
|
43 |
|
|
/*
|
44 |
|
|
* The S390 doesn't have any external MMU info: the kernel page
|
45 |
|
|
* tables contain all the necessary information.
|
46 |
|
|
*/
|
47 |
|
|
#define update_mmu_cache(vma, address, pte) do { } while (0)
|
48 |
|
|
|
49 |
|
|
/*
|
50 |
|
|
* ZERO_PAGE is a global shared page that is always zero: used
|
51 |
|
|
* for zero-mapped memory areas etc..
|
52 |
|
|
*/
|
53 |
|
|
extern char empty_zero_page[PAGE_SIZE];
|
54 |
|
|
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
55 |
|
|
#endif /* !__ASSEMBLY__ */
|
56 |
|
|
|
57 |
|
|
/*
|
58 |
|
|
* PMD_SHIFT determines the size of the area a second-level page
|
59 |
|
|
* table can map
|
60 |
|
|
*/
|
61 |
|
|
#define PMD_SHIFT 21
|
62 |
|
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
63 |
|
|
#define PMD_MASK (~(PMD_SIZE-1))
|
64 |
|
|
|
65 |
|
|
/* PGDIR_SHIFT determines what a third-level page table entry can map */
|
66 |
|
|
#define PGDIR_SHIFT 30
|
67 |
|
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
68 |
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
69 |
|
|
|
70 |
|
|
/*
|
71 |
|
|
* entries per page directory level: the S390 is two to five-level,
|
72 |
|
|
* currently we use a 3 level lookup
|
73 |
|
|
*/
|
74 |
|
|
#define PTRS_PER_PTE 512
|
75 |
|
|
#define PTRS_PER_PMD 512
|
76 |
|
|
#define PTRS_PER_PGD 2048
|
77 |
|
|
|
78 |
|
|
/*
|
79 |
|
|
* pgd entries used up by user/kernel:
|
80 |
|
|
*/
|
81 |
|
|
#define USER_PTRS_PER_PGD 2048
|
82 |
|
|
#define USER_PGD_PTRS 2048
|
83 |
|
|
#define KERNEL_PGD_PTRS 2048
|
84 |
|
|
#define FIRST_USER_PGD_NR 0
|
85 |
|
|
|
86 |
|
|
#define pte_ERROR(e) \
|
87 |
|
|
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
|
88 |
|
|
#define pmd_ERROR(e) \
|
89 |
|
|
printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
|
90 |
|
|
#define pgd_ERROR(e) \
|
91 |
|
|
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
|
92 |
|
|
|
93 |
|
|
#ifndef __ASSEMBLY__
|
94 |
|
|
/*
|
95 |
|
|
* Just any arbitrary offset to the start of the vmalloc VM area: the
|
96 |
|
|
* current 8MB value just means that there will be a 8MB "hole" after the
|
97 |
|
|
* physical memory until the kernel virtual memory starts. That means that
|
98 |
|
|
* any out-of-bounds memory accesses will hopefully be caught.
|
99 |
|
|
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
|
100 |
|
|
* area for the same reason. ;)
|
101 |
|
|
*/
|
102 |
|
|
#define VMALLOC_OFFSET (8*1024*1024)
|
103 |
|
|
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \
|
104 |
|
|
& ~(VMALLOC_OFFSET-1))
|
105 |
|
|
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
|
106 |
|
|
#define VMALLOC_END (0x20000000000L)
|
107 |
|
|
|
108 |
|
|
|
109 |
|
|
/*
|
110 |
|
|
* A pagetable entry of S390 has following format:
|
111 |
|
|
* | PFRA |0IP0| OS |
|
112 |
|
|
* 0000000000111111111122222222223333333333444444444455555555556666
|
113 |
|
|
* 0123456789012345678901234567890123456789012345678901234567890123
|
114 |
|
|
*
|
115 |
|
|
* I Page-Invalid Bit: Page is not available for address-translation
|
116 |
|
|
* P Page-Protection Bit: Store access not possible for page
|
117 |
|
|
*
|
118 |
|
|
* A segmenttable entry of S390 has following format:
|
119 |
|
|
* | P-table origin | TT
|
120 |
|
|
* 0000000000111111111122222222223333333333444444444455555555556666
|
121 |
|
|
* 0123456789012345678901234567890123456789012345678901234567890123
|
122 |
|
|
*
|
123 |
|
|
* I Segment-Invalid Bit: Segment is not available for address-translation
|
124 |
|
|
* C Common-Segment Bit: Segment is not private (PoP 3-30)
|
125 |
|
|
* P Page-Protection Bit: Store access not possible for page
|
126 |
|
|
* TT Type 00
|
127 |
|
|
*
|
128 |
|
|
* A region table entry of S390 has following format:
|
129 |
|
|
* | S-table origin | TF TTTL
|
130 |
|
|
* 0000000000111111111122222222223333333333444444444455555555556666
|
131 |
|
|
* 0123456789012345678901234567890123456789012345678901234567890123
|
132 |
|
|
*
|
133 |
|
|
* I Segment-Invalid Bit: Segment is not available for address-translation
|
134 |
|
|
* TT Type 01
|
135 |
|
|
* TF
|
136 |
|
|
* TL Table lenght
|
137 |
|
|
*
|
138 |
|
|
* The regiontable origin of S390 has following format:
|
139 |
|
|
* | region table origon | DTTL
|
140 |
|
|
* 0000000000111111111122222222223333333333444444444455555555556666
|
141 |
|
|
* 0123456789012345678901234567890123456789012345678901234567890123
|
142 |
|
|
*
|
143 |
|
|
* X Space-Switch event:
|
144 |
|
|
* G Segment-Invalid Bit:
|
145 |
|
|
* P Private-Space Bit:
|
146 |
|
|
* S Storage-Alteration:
|
147 |
|
|
* R Real space
|
148 |
|
|
* TL Table-Length:
|
149 |
|
|
*
|
150 |
|
|
* A storage key has the following format:
|
151 |
|
|
* | ACC |F|R|C|0|
|
152 |
|
|
* 0 3 4 5 6 7
|
153 |
|
|
* ACC: access key
|
154 |
|
|
* F : fetch protection bit
|
155 |
|
|
* R : referenced bit
|
156 |
|
|
* C : changed bit
|
157 |
|
|
*/
|
158 |
|
|
|
159 |
|
|
/* Bits in the page table entry */
|
160 |
|
|
#define _PAGE_PRESENT 0x001 /* Software */
|
161 |
|
|
#define _PAGE_ISCLEAN 0x004 /* Software */
|
162 |
|
|
#define _PAGE_RO 0x200 /* HW read-only */
|
163 |
|
|
#define _PAGE_INVALID 0x400 /* HW invalid */
|
164 |
|
|
|
165 |
|
|
/* Bits in the segment table entry */
|
166 |
|
|
#define _PMD_ENTRY_INV 0x20 /* invalid segment table entry */
|
167 |
|
|
#define _PMD_ENTRY 0x00
|
168 |
|
|
|
169 |
|
|
/* Bits in the region third table entry */
|
170 |
|
|
#define _PGD_ENTRY_INV 0x20 /* region table entry invalid bit */
|
171 |
|
|
#define _PGD_ENTRY_MASK 0x04 /* region third table entry mask */
|
172 |
|
|
#define _PGD_ENTRY_LEN(x) ((x)&3) /* region table length bits */
|
173 |
|
|
#define _PGD_ENTRY_OFF(x) (((x)&3)<<6) /* region table offset bits */
|
174 |
|
|
|
175 |
|
|
/*
|
176 |
|
|
* User and kernel page directory
|
177 |
|
|
*/
|
178 |
|
|
#define _REGION_THIRD 0x4
|
179 |
|
|
#define _REGION_THIRD_LEN 0x1
|
180 |
|
|
#define _REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100)
|
181 |
|
|
#define _KERN_REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN)
|
182 |
|
|
|
183 |
|
|
/* Bits in the storage key */
|
184 |
|
|
#define _PAGE_CHANGED 0x02 /* HW changed bit */
|
185 |
|
|
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
|
186 |
|
|
|
187 |
|
|
/*
|
188 |
|
|
* No mapping available
|
189 |
|
|
*/
|
190 |
|
|
#define PAGE_INVALID __pgprot(_PAGE_INVALID)
|
191 |
|
|
#define PAGE_NONE_SHARED __pgprot(_PAGE_PRESENT|_PAGE_INVALID)
|
192 |
|
|
#define PAGE_NONE_PRIVATE __pgprot(_PAGE_PRESENT|_PAGE_INVALID|_PAGE_ISCLEAN)
|
193 |
|
|
#define PAGE_RO_SHARED __pgprot(_PAGE_PRESENT|_PAGE_RO)
|
194 |
|
|
#define PAGE_RO_PRIVATE __pgprot(_PAGE_PRESENT|_PAGE_RO|_PAGE_ISCLEAN)
|
195 |
|
|
#define PAGE_COPY __pgprot(_PAGE_PRESENT|_PAGE_RO|_PAGE_ISCLEAN)
|
196 |
|
|
#define PAGE_SHARED __pgprot(_PAGE_PRESENT)
|
197 |
|
|
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT)
|
198 |
|
|
|
199 |
|
|
/*
|
200 |
|
|
* The S390 can't do page protection for execute, and considers that the
|
201 |
|
|
* same are read. Also, write permissions imply read permissions. This is
|
202 |
|
|
* the closest we can get..
|
203 |
|
|
*/
|
204 |
|
|
#define __P000 PAGE_NONE_PRIVATE
|
205 |
|
|
#define __P001 PAGE_RO_PRIVATE
|
206 |
|
|
#define __P010 PAGE_COPY
|
207 |
|
|
#define __P011 PAGE_COPY
|
208 |
|
|
#define __P100 PAGE_RO_PRIVATE
|
209 |
|
|
#define __P101 PAGE_RO_PRIVATE
|
210 |
|
|
#define __P110 PAGE_COPY
|
211 |
|
|
#define __P111 PAGE_COPY
|
212 |
|
|
|
213 |
|
|
#define __S000 PAGE_NONE_SHARED
|
214 |
|
|
#define __S001 PAGE_RO_SHARED
|
215 |
|
|
#define __S010 PAGE_SHARED
|
216 |
|
|
#define __S011 PAGE_SHARED
|
217 |
|
|
#define __S100 PAGE_RO_SHARED
|
218 |
|
|
#define __S101 PAGE_RO_SHARED
|
219 |
|
|
#define __S110 PAGE_SHARED
|
220 |
|
|
#define __S111 PAGE_SHARED
|
221 |
|
|
|
222 |
|
|
/*
|
223 |
|
|
* Certain architectures need to do special things when PTEs
|
224 |
|
|
* within a page table are directly modified. Thus, the following
|
225 |
|
|
* hook is made available.
|
226 |
|
|
*/
|
227 |
|
|
extern inline void set_pte(pte_t *pteptr, pte_t pteval)
|
228 |
|
|
{
|
229 |
|
|
*pteptr = pteval;
|
230 |
|
|
}
|
231 |
|
|
|
232 |
|
|
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
|
233 |
|
|
|
234 |
|
|
/*
|
235 |
|
|
* pgd/pmd/pte query functions
|
236 |
|
|
*/
|
237 |
|
|
extern inline int __pgd_present(pgd_t *pgd)
|
238 |
|
|
{
|
239 |
|
|
unsigned long addr = (unsigned long) pgd;
|
240 |
|
|
unsigned long *pgd_slot = (unsigned long *) (addr & -8);
|
241 |
|
|
unsigned long offset = (addr & 4) >> 1;
|
242 |
|
|
|
243 |
|
|
if (*pgd_slot & _PGD_ENTRY_INV)
|
244 |
|
|
return 0;
|
245 |
|
|
if ((*pgd_slot & _PGD_ENTRY_OFF(3)) > _PGD_ENTRY_OFF(offset))
|
246 |
|
|
return 0;
|
247 |
|
|
if ((*pgd_slot & _PGD_ENTRY_LEN(3)) < _PGD_ENTRY_LEN(offset))
|
248 |
|
|
return 0;
|
249 |
|
|
return 1;
|
250 |
|
|
}
|
251 |
|
|
#define pgd_present(pgd) __pgd_present(&(pgd))
|
252 |
|
|
|
253 |
|
|
extern inline int __pgd_none(pgd_t *pgd)
|
254 |
|
|
{
|
255 |
|
|
return !__pgd_present(pgd);
|
256 |
|
|
}
|
257 |
|
|
#define pgd_none(pgd) __pgd_none(&(pgd))
|
258 |
|
|
|
259 |
|
|
extern inline int __pgd_bad(pgd_t *pgd)
|
260 |
|
|
{
|
261 |
|
|
unsigned long addr = (unsigned long) pgd;
|
262 |
|
|
unsigned long *pgd_slot = (unsigned long *) (addr & -8);
|
263 |
|
|
|
264 |
|
|
return (*pgd_slot & (~PAGE_MASK & ~_PGD_ENTRY_INV & ~_PGD_ENTRY_MASK &
|
265 |
|
|
~_PGD_ENTRY_LEN(3) & ~_PGD_ENTRY_OFF(3))) != 0;
|
266 |
|
|
}
|
267 |
|
|
#define pgd_bad(pgd) __pgd_bad(&(pgd))
|
268 |
|
|
|
269 |
|
|
extern inline int pmd_present(pmd_t pmd)
|
270 |
|
|
{
|
271 |
|
|
return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY;
|
272 |
|
|
}
|
273 |
|
|
|
274 |
|
|
extern inline int pmd_none(pmd_t pmd)
|
275 |
|
|
{
|
276 |
|
|
return pmd_val(pmd) & _PMD_ENTRY_INV;
|
277 |
|
|
}
|
278 |
|
|
|
279 |
|
|
extern inline int pmd_bad(pmd_t pmd)
|
280 |
|
|
{
|
281 |
|
|
return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY;
|
282 |
|
|
}
|
283 |
|
|
|
284 |
|
|
extern inline int pte_present(pte_t pte)
|
285 |
|
|
{
|
286 |
|
|
return pte_val(pte) & _PAGE_PRESENT;
|
287 |
|
|
}
|
288 |
|
|
|
289 |
|
|
extern inline int pte_none(pte_t pte)
|
290 |
|
|
{
|
291 |
|
|
return ((pte_val(pte) &
|
292 |
|
|
(_PAGE_INVALID | _PAGE_RO | _PAGE_PRESENT)) == _PAGE_INVALID);
|
293 |
|
|
}
|
294 |
|
|
|
295 |
|
|
#define pte_same(a,b) (pte_val(a) == pte_val(b))
|
296 |
|
|
|
297 |
|
|
/*
|
298 |
|
|
* query functions pte_write/pte_dirty/pte_young only work if
|
299 |
|
|
* pte_present() is true. Undefined behaviour if not..
|
300 |
|
|
*/
|
301 |
|
|
extern inline int pte_write(pte_t pte)
|
302 |
|
|
{
|
303 |
|
|
return (pte_val(pte) & _PAGE_RO) == 0;
|
304 |
|
|
}
|
305 |
|
|
|
306 |
|
|
extern inline int pte_dirty(pte_t pte)
|
307 |
|
|
{
|
308 |
|
|
int skey;
|
309 |
|
|
|
310 |
|
|
if (pte_val(pte) & _PAGE_ISCLEAN)
|
311 |
|
|
return 0;
|
312 |
|
|
asm volatile ("iske %0,%1" : "=d" (skey) : "a" (pte_val(pte)));
|
313 |
|
|
return skey & _PAGE_CHANGED;
|
314 |
|
|
}
|
315 |
|
|
|
316 |
|
|
extern inline int pte_young(pte_t pte)
|
317 |
|
|
{
|
318 |
|
|
int skey;
|
319 |
|
|
|
320 |
|
|
asm volatile ("iske %0,%1" : "=d" (skey) : "a" (pte_val(pte)));
|
321 |
|
|
return skey & _PAGE_REFERENCED;
|
322 |
|
|
}
|
323 |
|
|
|
324 |
|
|
/*
|
325 |
|
|
* pgd/pmd/pte modification functions
|
326 |
|
|
*/
|
327 |
|
|
extern inline void pgd_clear(pgd_t * pgdp)
|
328 |
|
|
{
|
329 |
|
|
unsigned long addr = (unsigned long) pgdp;
|
330 |
|
|
unsigned long *pgd_slot = (unsigned long *) (addr & -8);
|
331 |
|
|
unsigned long offset = addr & 4;
|
332 |
|
|
|
333 |
|
|
if (*pgd_slot & _PGD_ENTRY_INV) {
|
334 |
|
|
*pgd_slot = _PGD_ENTRY_INV;
|
335 |
|
|
return;
|
336 |
|
|
}
|
337 |
|
|
if (offset == 0 && (*pgd_slot & _PGD_ENTRY_LEN(2)) != 0) {
|
338 |
|
|
/* Clear lower pmd, upper pmd still used. */
|
339 |
|
|
*pgd_slot = (*pgd_slot & PAGE_MASK) | _PGD_ENTRY_MASK |
|
340 |
|
|
_PGD_ENTRY_OFF(2) | _PGD_ENTRY_LEN(3);
|
341 |
|
|
return;
|
342 |
|
|
}
|
343 |
|
|
if (offset == 4 && (*pgd_slot & _PGD_ENTRY_OFF(2)) == 0) {
|
344 |
|
|
/* Clear upped pmd, lower pmd still used. */
|
345 |
|
|
*pgd_slot = (*pgd_slot & PAGE_MASK) | _PGD_ENTRY_MASK |
|
346 |
|
|
_PGD_ENTRY_OFF(0) | _PGD_ENTRY_LEN(1);
|
347 |
|
|
return;
|
348 |
|
|
}
|
349 |
|
|
*pgd_slot = _PGD_ENTRY_INV;
|
350 |
|
|
}
|
351 |
|
|
|
352 |
|
|
extern inline void pmd_clear(pmd_t * pmdp)
|
353 |
|
|
{
|
354 |
|
|
pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
|
355 |
|
|
pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
|
356 |
|
|
}
|
357 |
|
|
|
358 |
|
|
extern inline void pte_clear(pte_t *ptep)
|
359 |
|
|
{
|
360 |
|
|
pte_val(*ptep) = _PAGE_INVALID;
|
361 |
|
|
}
|
362 |
|
|
|
363 |
|
|
#define PTE_INIT(x) pte_clear(x)
|
364 |
|
|
|
365 |
|
|
/*
|
366 |
|
|
* The following pte_modification functions only work if
|
367 |
|
|
* pte_present() is true. Undefined behaviour if not..
|
368 |
|
|
*/
|
369 |
|
|
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
370 |
|
|
{
|
371 |
|
|
pte_val(pte) &= PAGE_MASK | _PAGE_ISCLEAN;
|
372 |
|
|
pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_ISCLEAN;
|
373 |
|
|
return pte;
|
374 |
|
|
}
|
375 |
|
|
|
376 |
|
|
extern inline pte_t pte_wrprotect(pte_t pte)
|
377 |
|
|
{
|
378 |
|
|
pte_val(pte) |= _PAGE_RO;
|
379 |
|
|
return pte;
|
380 |
|
|
}
|
381 |
|
|
|
382 |
|
|
extern inline pte_t pte_mkwrite(pte_t pte)
|
383 |
|
|
{
|
384 |
|
|
pte_val(pte) &= ~(_PAGE_RO | _PAGE_ISCLEAN);
|
385 |
|
|
return pte;
|
386 |
|
|
}
|
387 |
|
|
|
388 |
|
|
extern inline pte_t pte_mkclean(pte_t pte)
|
389 |
|
|
{
|
390 |
|
|
/* The only user of pte_mkclean is the fork() code.
|
391 |
|
|
We must *not* clear the *physical* page dirty bit
|
392 |
|
|
just because fork() wants to clear the dirty bit in
|
393 |
|
|
*one* of the page's mappings. So we just do nothing. */
|
394 |
|
|
return pte;
|
395 |
|
|
}
|
396 |
|
|
|
397 |
|
|
extern inline pte_t pte_mkdirty(pte_t pte)
|
398 |
|
|
{
|
399 |
|
|
/* We do not explicitly set the dirty bit because the
|
400 |
|
|
* sske instruction is slow. It is faster to let the
|
401 |
|
|
* next instruction set the dirty bit.
|
402 |
|
|
*/
|
403 |
|
|
pte_val(pte) &= ~_PAGE_ISCLEAN;
|
404 |
|
|
return pte;
|
405 |
|
|
}
|
406 |
|
|
|
407 |
|
|
extern inline pte_t pte_mkold(pte_t pte)
|
408 |
|
|
{
|
409 |
|
|
asm volatile ("rrbe 0,%0" : : "a" (pte_val(pte)) : "cc" );
|
410 |
|
|
return pte;
|
411 |
|
|
}
|
412 |
|
|
|
413 |
|
|
extern inline pte_t pte_mkyoung(pte_t pte)
|
414 |
|
|
{
|
415 |
|
|
/* To set the referenced bit we read the first word from the real
|
416 |
|
|
* page with a special instruction: load using real address (lura).
|
417 |
|
|
* Isn't S/390 a nice architecture ?! */
|
418 |
|
|
asm volatile ("lura 0,%0" : : "a" (pte_val(pte) & PAGE_MASK) : "0" );
|
419 |
|
|
return pte;
|
420 |
|
|
}
|
421 |
|
|
|
422 |
|
|
static inline int ptep_test_and_clear_young(pte_t *ptep)
|
423 |
|
|
{
|
424 |
|
|
int ccode;
|
425 |
|
|
|
426 |
|
|
asm volatile ("rrbe 0,%1\n\t"
|
427 |
|
|
"ipm %0\n\t"
|
428 |
|
|
"srl %0,28\n\t"
|
429 |
|
|
: "=d" (ccode) : "a" (pte_val(*ptep)) : "cc" );
|
430 |
|
|
return ccode & 2;
|
431 |
|
|
}
|
432 |
|
|
|
433 |
|
|
static inline int ptep_test_and_clear_dirty(pte_t *ptep)
|
434 |
|
|
{
|
435 |
|
|
int skey;
|
436 |
|
|
|
437 |
|
|
if (pte_val(*ptep) & _PAGE_ISCLEAN)
|
438 |
|
|
return 0;
|
439 |
|
|
asm volatile ("iske %0,%1" : "=d" (skey) : "a" (*ptep));
|
440 |
|
|
if ((skey & _PAGE_CHANGED) == 0)
|
441 |
|
|
return 0;
|
442 |
|
|
/* We can't clear the changed bit atomically. For now we
|
443 |
|
|
* clear (!) the page referenced bit. */
|
444 |
|
|
asm volatile ("sske %0,%1"
|
445 |
|
|
: : "d" (0), "a" (*ptep));
|
446 |
|
|
return 1;
|
447 |
|
|
}
|
448 |
|
|
|
449 |
|
|
static inline pte_t ptep_get_and_clear(pte_t *ptep)
|
450 |
|
|
{
|
451 |
|
|
pte_t pte = *ptep;
|
452 |
|
|
pte_clear(ptep);
|
453 |
|
|
return pte;
|
454 |
|
|
}
|
455 |
|
|
|
456 |
|
|
static inline void ptep_set_wrprotect(pte_t *ptep)
|
457 |
|
|
{
|
458 |
|
|
pte_t old_pte = *ptep;
|
459 |
|
|
set_pte(ptep, pte_wrprotect(old_pte));
|
460 |
|
|
}
|
461 |
|
|
|
462 |
|
|
static inline void ptep_mkdirty(pte_t *ptep)
|
463 |
|
|
{
|
464 |
|
|
pte_mkdirty(*ptep);
|
465 |
|
|
}
|
466 |
|
|
|
467 |
|
|
/*
|
468 |
|
|
* Conversion functions: convert a page and protection to a page entry,
|
469 |
|
|
* and a page entry and page directory to the page they refer to.
|
470 |
|
|
*/
|
471 |
|
|
extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
|
472 |
|
|
{
|
473 |
|
|
pte_t __pte;
|
474 |
|
|
pte_val(__pte) = physpage + pgprot_val(pgprot);
|
475 |
|
|
return __pte;
|
476 |
|
|
}
|
477 |
|
|
|
478 |
|
|
#define mk_pte(pg, pgprot) \
|
479 |
|
|
({ \
|
480 |
|
|
struct page *__page = (pg); \
|
481 |
|
|
pgprot_t __pgprot = (pgprot); \
|
482 |
|
|
unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
|
483 |
|
|
pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
|
484 |
|
|
__pte; \
|
485 |
|
|
})
|
486 |
|
|
|
487 |
|
|
#define arch_set_page_uptodate(__page) \
|
488 |
|
|
do { \
|
489 |
|
|
asm volatile ("sske %0,%1" : : "d" (0), \
|
490 |
|
|
"a" (__pa((__page-mem_map) << PAGE_SHIFT)));\
|
491 |
|
|
} while (0)
|
492 |
|
|
|
493 |
|
|
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
|
494 |
|
|
|
495 |
|
|
#define pmd_page(pmd) \
|
496 |
|
|
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
|
497 |
|
|
|
498 |
|
|
/* to find an entry in a page-table-directory */
|
499 |
|
|
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
|
500 |
|
|
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
|
501 |
|
|
|
502 |
|
|
#define pgd_page(pgd) \
|
503 |
|
|
((unsigned long) __va(__pgd_val(pgd) & PAGE_MASK))
|
504 |
|
|
|
505 |
|
|
/* to find an entry in a kernel page-table-directory */
|
506 |
|
|
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
507 |
|
|
|
508 |
|
|
/* Find an entry in the second-level page table.. */
|
509 |
|
|
#define pmd_offset(dir,addr) \
|
510 |
|
|
((pmd_t *) pgd_page(dir) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
|
511 |
|
|
|
512 |
|
|
/* Find an entry in the third-level page table.. */
|
513 |
|
|
#define pte_offset(dir,addr) \
|
514 |
|
|
((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
|
515 |
|
|
|
516 |
|
|
/*
|
517 |
|
|
* A page-table entry has some bits we have to treat in a special way.
|
518 |
|
|
* Bits 52 and bit 55 have to be zero, otherwise an specification
|
519 |
|
|
* exception will occur instead of a page translation exception. The
|
520 |
|
|
* specifiation exception has the bad habit not to store necessary
|
521 |
|
|
* information in the lowcore.
|
522 |
|
|
* Bit 53 and bit 54 are the page invalid bit and the page protection
|
523 |
|
|
* bit. We set both to indicate a swapped page.
|
524 |
|
|
* Bit 63 is used as the software page present bit. If a page is
|
525 |
|
|
* swapped this obviously has to be zero.
|
526 |
|
|
* This leaves the bits 0-51 and bits 56-62 to store type and offset.
|
527 |
|
|
* We use the 7 bits from 56-62 for the type and the 52 bits from 0-51
|
528 |
|
|
* for the offset.
|
529 |
|
|
* | offset |0110|type |0
|
530 |
|
|
* 0000000000111111111122222222223333333333444444444455555555556666
|
531 |
|
|
* 0123456789012345678901234567890123456789012345678901234567890123
|
532 |
|
|
*/
|
533 |
|
|
extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
|
534 |
|
|
{
|
535 |
|
|
pte_t pte;
|
536 |
|
|
pte_val(pte) = (type << 1) | (offset << 12) | _PAGE_INVALID | _PAGE_RO;
|
537 |
|
|
pte_val(pte) &= 0xfffffffffffff6fe; /* better to be paranoid */
|
538 |
|
|
return pte;
|
539 |
|
|
}
|
540 |
|
|
|
541 |
|
|
#define SWP_TYPE(entry) (((entry).val >> 1) & 0x3f)
|
542 |
|
|
#define SWP_OFFSET(entry) ((entry).val >> 12)
|
543 |
|
|
#define SWP_ENTRY(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
|
544 |
|
|
|
545 |
|
|
#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
546 |
|
|
#define swp_entry_to_pte(x) ((pte_t) { (x).val })
|
547 |
|
|
|
548 |
|
|
#endif /* !__ASSEMBLY__ */
|
549 |
|
|
|
550 |
|
|
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
551 |
|
|
#define PageSkip(page) (0)
|
552 |
|
|
#define kern_addr_valid(addr) (1)
|
553 |
|
|
|
554 |
|
|
/*
|
555 |
|
|
* No page table caches to initialise
|
556 |
|
|
*/
|
557 |
|
|
#define pgtable_cache_init() do { } while (0)
|
558 |
|
|
|
559 |
|
|
#endif /* _S390_PAGE_H */
|
560 |
|
|
|