OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ia64/] [pgtable.h] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef _ASM_IA64_PGTABLE_H
2
#define _ASM_IA64_PGTABLE_H
3
 
4
/*
5
 * This file contains the functions and defines necessary to modify and use
6
 * the IA-64 page table tree.
7
 *
8
 * This hopefully works with any (fixed) IA-64 page-size, as defined
9
 * in <asm/page.h> (currently 8192).
10
 *
11
 * Copyright (C) 1998-2002 Hewlett-Packard Co
12
 *      David Mosberger-Tang <davidm@hpl.hp.com>
13
 */
14
 
15
#include <linux/config.h>
16
 
17
#include <asm/mman.h>
18
#include <asm/page.h>
19
#include <asm/processor.h>
20
#include <asm/system.h>
21
#include <asm/types.h>
22
 
23
#define IA64_MAX_PHYS_BITS      50      /* max. number of physical address bits (architected) */
24
 
25
/*
26
 * First, define the various bits in a PTE.  Note that the PTE format
27
 * matches the VHPT short format, the firt doubleword of the VHPD long
28
 * format, and the first doubleword of the TLB insertion format.
29
 */
30
#define _PAGE_P_BIT             0
31
#define _PAGE_A_BIT             5
32
#define _PAGE_D_BIT             6
33
 
34
#define _PAGE_P                 (1 << _PAGE_P_BIT)      /* page present bit */
35
#define _PAGE_MA_WB             (0x0 <<  2)     /* write back memory attribute */
36
#define _PAGE_MA_UC             (0x4 <<  2)     /* uncacheable memory attribute */
37
#define _PAGE_MA_UCE            (0x5 <<  2)     /* UC exported attribute */
38
#define _PAGE_MA_WC             (0x6 <<  2)     /* write coalescing memory attribute */
39
#define _PAGE_MA_NAT            (0x7 <<  2)     /* not-a-thing attribute */
40
#define _PAGE_MA_MASK           (0x7 <<  2)
41
#define _PAGE_PL_0              (0 <<  7)       /* privilege level 0 (kernel) */
42
#define _PAGE_PL_1              (1 <<  7)       /* privilege level 1 (unused) */
43
#define _PAGE_PL_2              (2 <<  7)       /* privilege level 2 (unused) */
44
#define _PAGE_PL_3              (3 <<  7)       /* privilege level 3 (user) */
45
#define _PAGE_PL_MASK           (3 <<  7)
46
#define _PAGE_AR_R              (0 <<  9)       /* read only */
47
#define _PAGE_AR_RX             (1 <<  9)       /* read & execute */
48
#define _PAGE_AR_RW             (2 <<  9)       /* read & write */
49
#define _PAGE_AR_RWX            (3 <<  9)       /* read, write & execute */
50
#define _PAGE_AR_R_RW           (4 <<  9)       /* read / read & write */
51
#define _PAGE_AR_RX_RWX         (5 <<  9)       /* read & exec / read, write & exec */
52
#define _PAGE_AR_RWX_RW         (6 <<  9)       /* read, write & exec / read & write */
53
#define _PAGE_AR_X_RX           (7 <<  9)       /* exec & promote / read & exec */
54
#define _PAGE_AR_MASK           (7 <<  9)
55
#define _PAGE_AR_SHIFT          9
56
#define _PAGE_A                 (1 << _PAGE_A_BIT)      /* page accessed bit */
57
#define _PAGE_D                 (1 << _PAGE_D_BIT)      /* page dirty bit */
58
#define _PAGE_PPN_MASK          (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
59
#define _PAGE_ED                (__IA64_UL(1) << 52)    /* exception deferral */
60
#define _PAGE_PROTNONE          (__IA64_UL(1) << 63)
61
 
62
#define _PFN_MASK               _PAGE_PPN_MASK
63
#define _PAGE_CHG_MASK          (_PFN_MASK | _PAGE_A | _PAGE_D)
64
 
65
#define _PAGE_SIZE_4K   12
66
#define _PAGE_SIZE_8K   13
67
#define _PAGE_SIZE_16K  14
68
#define _PAGE_SIZE_64K  16
69
#define _PAGE_SIZE_256K 18
70
#define _PAGE_SIZE_1M   20
71
#define _PAGE_SIZE_4M   22
72
#define _PAGE_SIZE_16M  24
73
#define _PAGE_SIZE_64M  26
74
#define _PAGE_SIZE_256M 28
75
 
76
#define __ACCESS_BITS           _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
77
#define __DIRTY_BITS_NO_ED      _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
78
#define __DIRTY_BITS            _PAGE_ED | __DIRTY_BITS_NO_ED
79
 
80
/*
81
 * Definitions for first level:
82
 *
83
 * PGDIR_SHIFT determines what a first-level page table entry can map.
84
 */
85
#define PGDIR_SHIFT             (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
86
#define PGDIR_SIZE              (__IA64_UL(1) << PGDIR_SHIFT)
87
#define PGDIR_MASK              (~(PGDIR_SIZE-1))
88
#define PTRS_PER_PGD            (__IA64_UL(1) << (PAGE_SHIFT-3))
89
#define USER_PTRS_PER_PGD       (5*PTRS_PER_PGD/8)      /* regions 0-4 are user regions */
90
#define FIRST_USER_PGD_NR       0
91
 
92
/*
93
 * Definitions for second level:
94
 *
95
 * PMD_SHIFT determines the size of the area a second-level page table
96
 * can map.
97
 */
98
#define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT-3))
99
#define PMD_SIZE        (__IA64_UL(1) << PMD_SHIFT)
100
#define PMD_MASK        (~(PMD_SIZE-1))
101
#define PTRS_PER_PMD    (__IA64_UL(1) << (PAGE_SHIFT-3))
102
 
103
/*
104
 * Definitions for third level:
105
 */
106
#define PTRS_PER_PTE    (__IA64_UL(1) << (PAGE_SHIFT-3))
107
 
108
/*
109
 * All the normal masks have the "page accessed" bits on, as any time
110
 * they are used, the page is accessed. They are cleared only by the
111
 * page-out routines.
112
 */
113
#define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_A)
114
#define PAGE_SHARED     __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
115
#define PAGE_READONLY   __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
116
#define PAGE_COPY       __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
117
#define PAGE_GATE       __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
118
#define PAGE_KERNEL     __pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
119
#define PAGE_KERNELRX   __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
120
 
121
# ifndef __ASSEMBLY__
122
 
123
#include <asm/bitops.h>
124
#include <asm/mmu_context.h>
125
#include <asm/processor.h>
126
 
127
/*
128
 * Next come the mappings that determine how mmap() protection bits
129
 * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented.  The
130
 * _P version gets used for a private shared memory segment, the _S
131
 * version gets used for a shared memory segment with MAP_SHARED on.
132
 * In a private shared memory segment, we do a copy-on-write if a task
133
 * attempts to write to the page.
134
 */
135
        /* xwr */
136
#define __P000  PAGE_NONE
137
#define __P001  PAGE_READONLY
138
#define __P010  PAGE_READONLY   /* write to priv pg -> copy & make writable */
139
#define __P011  PAGE_READONLY   /* ditto */
140
#define __P100  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
141
#define __P101  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
142
#define __P110  PAGE_COPY
143
#define __P111  PAGE_COPY
144
 
145
#define __S000  PAGE_NONE
146
#define __S001  PAGE_READONLY
147
#define __S010  PAGE_SHARED     /* we don't have (and don't need) write-only */
148
#define __S011  PAGE_SHARED
149
#define __S100  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
150
#define __S101  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
151
#define __S110  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
152
#define __S111  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
153
 
154
#define pgd_ERROR(e)    printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
155
#define pmd_ERROR(e)    printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
156
#define pte_ERROR(e)    printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
157
 
158
 
159
/* Quick test to see if ADDR is a (potentially) valid physical address. */
160
static inline long
161
ia64_phys_addr_valid (unsigned long addr)
162
{
163
        return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
164
}
165
 
166
/*
167
 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
168
 * memory.  For the return value to be meaningful, ADDR must be >=
169
 * PAGE_OFFSET.  This operation can be relatively expensive (e.g.,
170
 * require a hash-, or multi-level tree-lookup or something of that
171
 * sort) but it guarantees to return TRUE only if accessing the page
172
 * at that address does not cause an error.  Note that there may be
173
 * addresses for which kern_addr_valid() returns FALSE even though an
174
 * access would not cause an error (e.g., this is typically true for
175
 * memory mapped I/O regions.
176
 *
177
 * XXX Need to implement this for IA-64.
178
 */
179
#define kern_addr_valid(addr)   (1)
180
 
181
 
182
/*
183
 * Now come the defines and routines to manage and access the three-level
184
 * page table.
185
 */
186
 
187
/*
188
 * On some architectures, special things need to be done when setting
189
 * the PTE in a page table.  Nothing special needs to be on IA-64.
190
 */
191
#define set_pte(ptep, pteval)   (*(ptep) = (pteval))
192
 
193
#define RGN_SIZE        (1UL << 61)
194
#define RGN_KERNEL      7
195
 
196
#define VMALLOC_START           (0xa000000000000000 + 3*PAGE_SIZE)
197
#define VMALLOC_VMADDR(x)       ((unsigned long)(x))
198
#define VMALLOC_END_INIT        (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) 
199
#define VMALLOC_END             vmalloc_end
200
extern unsigned long vmalloc_end;
201
 
202
/*
203
 * Conversion functions: convert a page and protection to a page entry,
204
 * and a page entry and page directory to the page they refer to.
205
 */
206
#define mk_pte(page,pgprot)                                                     \
207
({                                                                              \
208
        pte_t __pte;                                                            \
209
                                                                                \
210
        pte_val(__pte) = ((page - mem_map) << PAGE_SHIFT) | pgprot_val(pgprot); \
211
        __pte;                                                                  \
212
})
213
 
214
/* This takes a physical page address that is used by the remapping functions */
215
#define mk_pte_phys(physpage, pgprot) \
216
({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
217
 
218
#define pte_modify(_pte, newprot) \
219
        (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
220
 
221
#define page_pte_prot(page,prot)        mk_pte(page, prot)
222
#define page_pte(page)                  page_pte_prot(page, __pgprot(0))
223
 
224
#define pte_none(pte)                   (!pte_val(pte))
225
#define pte_present(pte)                (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
226
#define pte_clear(pte)                  (pte_val(*(pte)) = 0UL)
227
/* pte_page() returns the "struct page *" corresponding to the PTE: */
228
#define pte_page(pte)                   (mem_map + (unsigned long) ((pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT))
229
 
230
#define pmd_none(pmd)                   (!pmd_val(pmd))
231
#define pmd_bad(pmd)                    (!ia64_phys_addr_valid(pmd_val(pmd)))
232
#define pmd_present(pmd)                (pmd_val(pmd) != 0UL)
233
#define pmd_clear(pmdp)                 (pmd_val(*(pmdp)) = 0UL)
234
#define pmd_page(pmd)                   ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
235
 
236
#define pgd_none(pgd)                   (!pgd_val(pgd))
237
#define pgd_bad(pgd)                    (!ia64_phys_addr_valid(pgd_val(pgd)))
238
#define pgd_present(pgd)                (pgd_val(pgd) != 0UL)
239
#define pgd_clear(pgdp)                 (pgd_val(*(pgdp)) = 0UL)
240
#define pgd_page(pgd)                   ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
241
 
242
/*
243
 * The following have defined behavior only work if pte_present() is true.
244
 */
245
#define pte_read(pte)           (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
246
#define pte_write(pte)  ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
247
#define pte_exec(pte)           ((pte_val(pte) & _PAGE_AR_RX) != 0)
248
#define pte_dirty(pte)          ((pte_val(pte) & _PAGE_D) != 0)
249
#define pte_young(pte)          ((pte_val(pte) & _PAGE_A) != 0)
250
/*
251
 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
252
 * access rights:
253
 */
254
#define pte_wrprotect(pte)      (__pte(pte_val(pte) & ~_PAGE_AR_RW))
255
#define pte_mkwrite(pte)        (__pte(pte_val(pte) | _PAGE_AR_RW))
256
#define pte_mkexec(pte)         (__pte(pte_val(pte) | _PAGE_AR_RX))
257
#define pte_mkold(pte)          (__pte(pte_val(pte) & ~_PAGE_A))
258
#define pte_mkyoung(pte)        (__pte(pte_val(pte) | _PAGE_A))
259
#define pte_mkclean(pte)        (__pte(pte_val(pte) & ~_PAGE_D))
260
#define pte_mkdirty(pte)        (__pte(pte_val(pte) | _PAGE_D))
261
 
262
/*
263
 * Macro to make mark a page protection value as "uncacheable".  Note
264
 * that "protection" is really a misnomer here as the protection value
265
 * contains the memory attribute bits, dirty bits, and various other
266
 * bits as well.
267
 */
268
#define pgprot_noncached(prot)          __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
269
 
270
/*
271
 * Macro to make mark a page protection value as "write-combining".
272
 * Note that "protection" is really a misnomer here as the protection
273
 * value contains the memory attribute bits, dirty bits, and various
274
 * other bits as well.  Accesses through a write-combining translation
275
 * works bypasses the caches, but does allow for consecutive writes to
276
 * be combined into single (but larger) write transactions.
277
 */
278
#define pgprot_writecombine(prot)       __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
279
 
280
/*
281
 * Return the region index for virtual address ADDRESS.
282
 */
283
static inline unsigned long
284
rgn_index (unsigned long address)
285
{
286
        ia64_va a;
287
 
288
        a.l = address;
289
        return a.f.reg;
290
}
291
 
292
/*
293
 * Return the region offset for virtual address ADDRESS.
294
 */
295
static inline unsigned long
296
rgn_offset (unsigned long address)
297
{
298
        ia64_va a;
299
 
300
        a.l = address;
301
        return a.f.off;
302
}
303
 
304
static inline unsigned long
305
pgd_index (unsigned long address)
306
{
307
        unsigned long region = address >> 61;
308
        unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
309
 
310
        return (region << (PAGE_SHIFT - 6)) | l1index;
311
}
312
 
313
/* The offset in the 1-level directory is given by the 3 region bits
314
   (61..63) and the seven level-1 bits (33-39).  */
315
static inline pgd_t*
316
pgd_offset (struct mm_struct *mm, unsigned long address)
317
{
318
        return mm->pgd + pgd_index(address);
319
}
320
 
321
/* In the kernel's mapped region we have a full 43 bit space available and completely
322
   ignore the region number (since we know its in region number 5). */
323
#define pgd_offset_k(addr) \
324
        (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
325
 
326
/* Find an entry in the second-level page table.. */
327
#define pmd_offset(dir,addr) \
328
        ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
329
 
330
/* Find an entry in the third-level page table.. */
331
#define pte_offset(dir,addr) \
332
        ((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
333
 
334
/* atomic versions of the some PTE manipulations: */
335
 
336
static inline int
337
ptep_test_and_clear_young (pte_t *ptep)
338
{
339
#ifdef CONFIG_SMP
340
        return test_and_clear_bit(_PAGE_A_BIT, ptep);
341
#else
342
        pte_t pte = *ptep;
343
        if (!pte_young(pte))
344
                return 0;
345
        set_pte(ptep, pte_mkold(pte));
346
        return 1;
347
#endif
348
}
349
 
350
static inline int
351
ptep_test_and_clear_dirty (pte_t *ptep)
352
{
353
#ifdef CONFIG_SMP
354
        return test_and_clear_bit(_PAGE_D_BIT, ptep);
355
#else
356
        pte_t pte = *ptep;
357
        if (!pte_dirty(pte))
358
                return 0;
359
        set_pte(ptep, pte_mkclean(pte));
360
        return 1;
361
#endif
362
}
363
 
364
static inline pte_t
365
ptep_get_and_clear (pte_t *ptep)
366
{
367
#ifdef CONFIG_SMP
368
        return __pte(xchg((long *) ptep, 0));
369
#else
370
        pte_t pte = *ptep;
371
        pte_clear(ptep);
372
        return pte;
373
#endif
374
}
375
 
376
static inline void
377
ptep_set_wrprotect (pte_t *ptep)
378
{
379
#ifdef CONFIG_SMP
380
        unsigned long new, old;
381
 
382
        do {
383
                old = pte_val(*ptep);
384
                new = pte_val(pte_wrprotect(__pte (old)));
385
        } while (cmpxchg((unsigned long *) ptep, old, new) != old);
386
#else
387
        pte_t old_pte = *ptep;
388
        set_pte(ptep, pte_wrprotect(old_pte));
389
#endif
390
}
391
 
392
static inline void
393
ptep_mkdirty (pte_t *ptep)
394
{
395
#ifdef CONFIG_SMP
396
        set_bit(_PAGE_D_BIT, ptep);
397
#else
398
        pte_t old_pte = *ptep;
399
        set_pte(ptep, pte_mkdirty(old_pte));
400
#endif
401
}
402
 
403
static inline int
404
pte_same (pte_t a, pte_t b)
405
{
406
        return pte_val(a) == pte_val(b);
407
}
408
 
409
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
410
extern void paging_init (void);
411
 
412
#define SWP_TYPE(entry)                 (((entry).val >> 1) & 0xff)
413
#define SWP_OFFSET(entry)               (((entry).val << 1) >> 10)
414
#define SWP_ENTRY(type,offset)          ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) })
415
#define pte_to_swp_entry(pte)           ((swp_entry_t) { pte_val(pte) })
416
#define swp_entry_to_pte(x)             ((pte_t) { (x).val })
417
 
418
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
419
#define PageSkip(page)          (0)
420
 
421
#define io_remap_page_range remap_page_range    /* XXX is this right? */
422
 
423
/*
424
 * ZERO_PAGE is a global shared page that is always zero: used
425
 * for zero-mapped memory areas etc..
426
 */
427
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
428
extern struct page *zero_page_memmap_ptr;
429
#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
430
 
431
/* We provide our own get_unmapped_area to cope with VA holes for userland */
432
#define HAVE_ARCH_UNMAPPED_AREA
433
 
434
#ifdef CONFIG_HUGETLB_PAGE
435
#define HUGETLB_PGDIR_SHIFT     (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
436
#define HUGETLB_PGDIR_SIZE      (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
437
#define HUGETLB_PGDIR_MASK      (~(HUGETLB_PGDIR_SIZE-1))
438
#endif
439
 
440
/*
441
 * No page table caches to initialise
442
 */
443
#define pgtable_cache_init()    do { } while (0)
444
 
445
/* arch mem_map init routines are needed due to holes in a virtual mem_map */
446
#define HAVE_ARCH_MEMMAP_INIT
447
 
448
typedef unsigned long memmap_init_callback_t(struct page *start,
449
        struct page *end, int zone, unsigned long start_paddr, int highmem);
450
 
451
extern unsigned long arch_memmap_init (memmap_init_callback_t *callback,
452
        struct page *start, struct page *end, int zone,
453
        unsigned long start_paddr, int highmem);
454
 
455
# endif /* !__ASSEMBLY__ */
456
 
457
/*
458
 * Identity-mapped regions use a large page size.  We'll call such large pages
459
 * "granules".  If you can think of a better name that's unambiguous, let me
460
 * know...
461
 */
462
#if defined(CONFIG_IA64_GRANULE_64MB)
463
# define IA64_GRANULE_SHIFT     _PAGE_SIZE_64M
464
#elif defined(CONFIG_IA64_GRANULE_16MB)
465
# define IA64_GRANULE_SHIFT     _PAGE_SIZE_16M
466
#endif
467
#define IA64_GRANULE_SIZE       (1 << IA64_GRANULE_SHIFT)
468
/*
469
 * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
470
 */
471
#define KERNEL_TR_PAGE_SHIFT    _PAGE_SIZE_64M
472
#define KERNEL_TR_PAGE_SIZE     (1 << KERNEL_TR_PAGE_SHIFT)
473
#define KERNEL_TR_PAGE_NUM      ((KERNEL_START - PAGE_OFFSET) / KERNEL_TR_PAGE_SIZE)
474
 
475
#endif /* _ASM_IA64_PGTABLE_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.