1 |
30 |
unneback |
/*
|
2 |
|
|
* pgtable.h
|
3 |
|
|
*
|
4 |
|
|
* PowerPC memory management structures
|
5 |
|
|
*
|
6 |
|
|
* It is a stripped down version of linux ppc file...
|
7 |
|
|
*
|
8 |
|
|
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
|
9 |
|
|
* Canon Centre Recherche France.
|
10 |
|
|
*
|
11 |
|
|
* The license and distribution terms for this file may be
|
12 |
|
|
* found in found in the file LICENSE in this distribution or at
|
13 |
|
|
* http://www.OARcorp.com/rtems/license.html.
|
14 |
|
|
*
|
15 |
|
|
* $Id: pgtable.h,v 1.2 2001-09-27 12:01:30 chris Exp $
|
16 |
|
|
*/
|
17 |
|
|
|
18 |
|
|
#ifndef _PPC_PGTABLE_H
|
19 |
|
|
#define _PPC_PGTABLE_H
|
20 |
|
|
|
21 |
|
|
/*
|
22 |
|
|
* The PowerPC MMU uses a hash table containing PTEs, together with
|
23 |
|
|
* a set of 16 segment registers (on 32-bit implementations), to define
|
24 |
|
|
* the virtual to physical address mapping.
|
25 |
|
|
*
|
26 |
|
|
* We use the hash table as an extended TLB, i.e. a cache of currently
|
27 |
|
|
* active mappings. We maintain a two-level page table tree, much like
|
28 |
|
|
* that used by the i386, for the sake of the Linux memory management code.
|
29 |
|
|
* Low-level assembler code in head.S (procedure hash_page) is responsible
|
30 |
|
|
* for extracting ptes from the tree and putting them into the hash table
|
31 |
|
|
* when necessary, and updating the accessed and modified bits in the
|
32 |
|
|
* page table tree.
|
33 |
|
|
*
|
34 |
|
|
* The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
|
35 |
|
|
* We also use the two level tables, but we can put the real bits in them
|
36 |
|
|
* needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
|
37 |
|
|
* Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
|
38 |
|
|
* additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
|
39 |
|
|
* based upon user/super access. The TLB does not have accessed nor write
|
40 |
|
|
* protect. We assume that if the TLB get loaded with an entry it is
|
41 |
|
|
* accessed, and overload the changed bit for write protect. We use
|
42 |
|
|
* two bits in the software pte that are supposed to be set to zero in
|
43 |
|
|
* the TLB entry (24 and 25) for these indicators. Although the level 1
|
44 |
|
|
* descriptor contains the guarded and writethrough/copyback bits, we can
|
45 |
|
|
* set these at the page level since they get copied from the Mx_TWC
|
46 |
|
|
* register when the TLB entry is loaded. We will use bit 27 for guard, since
|
47 |
|
|
* that is where it exists in the MD_TWC, and bit 26 for writethrough.
|
48 |
|
|
* These will get masked from the level 2 descriptor at TLB load time, and
|
49 |
|
|
* copied to the MD_TWC before it gets loaded.
|
50 |
|
|
*/
|
51 |
|
|
|
52 |
|
|
/* PMD_SHIFT determines the size of the area mapped by the second-level page tables */
|
53 |
|
|
#define PMD_SHIFT 22
|
54 |
|
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
55 |
|
|
#define PMD_MASK (~(PMD_SIZE-1))
|
56 |
|
|
|
57 |
|
|
/* PGDIR_SHIFT determines what a third-level page table entry can map */
|
58 |
|
|
#define PGDIR_SHIFT 22
|
59 |
|
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
60 |
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
61 |
|
|
|
62 |
|
|
/*
|
63 |
|
|
* entries per page directory level: our page-table tree is two-level, so
|
64 |
|
|
* we don't really have any PMD directory.
|
65 |
|
|
*/
|
66 |
|
|
#define PTRS_PER_PTE 1024
|
67 |
|
|
#define PTRS_PER_PMD 1
|
68 |
|
|
#define PTRS_PER_PGD 1024
|
69 |
|
|
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
|
70 |
|
|
|
71 |
|
|
/* Just any arbitrary offset to the start of the vmalloc VM area: the
|
72 |
|
|
* current 64MB value just means that there will be a 64MB "hole" after the
|
73 |
|
|
* physical memory until the kernel virtual memory starts. That means that
|
74 |
|
|
* any out-of-bounds memory accesses will hopefully be caught.
|
75 |
|
|
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
|
76 |
|
|
* area for the same reason. ;)
|
77 |
|
|
*
|
78 |
|
|
* We no longer map larger than phys RAM with the BATs so we don't have
|
79 |
|
|
* to worry about the VMALLOC_OFFSET causing problems. We do have to worry
|
80 |
|
|
* about clashes between our early calls to ioremap() that start growing down
|
81 |
|
|
* from ioremap_base being run into the VM area allocations (growing upwards
|
82 |
|
|
* from VMALLOC_START). For this reason we have ioremap_bot to check when
|
83 |
|
|
* we actually run into our mappings setup in the early boot with the VM
|
84 |
|
|
* system. This really does become a problem for machines with good amounts
|
85 |
|
|
* of RAM. -- Cort
|
86 |
|
|
*/
|
87 |
|
|
#define VMALLOC_OFFSET (0x4000000) /* 64M */
|
88 |
|
|
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
|
89 |
|
|
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
|
90 |
|
|
#define VMALLOC_END ioremap_bot
|
91 |
|
|
|
92 |
|
|
/*
|
93 |
|
|
* Bits in a linux-style PTE. These match the bits in the
|
94 |
|
|
* (hardware-defined) PowerPC PTE as closely as possible.
|
95 |
|
|
*/
|
96 |
|
|
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
|
97 |
|
|
#define _PAGE_USER 0x002 /* matches one of the PP bits */
|
98 |
|
|
#define _PAGE_RW 0x004 /* software: user write access allowed */
|
99 |
|
|
#define _PAGE_GUARDED 0x008
|
100 |
|
|
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
|
101 |
|
|
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
|
102 |
|
|
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
|
103 |
|
|
#define _PAGE_DIRTY 0x080 /* C: page changed */
|
104 |
|
|
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
|
105 |
|
|
#define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */
|
106 |
|
|
#define _PAGE_SHARED 0
|
107 |
|
|
|
108 |
|
|
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
|
109 |
|
|
|
110 |
|
|
#define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED
|
111 |
|
|
#define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
|
112 |
|
|
|
113 |
|
|
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
|
114 |
|
|
|
115 |
|
|
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | \
|
116 |
|
|
_PAGE_SHARED)
|
117 |
|
|
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
|
118 |
|
|
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
|
119 |
|
|
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED)
|
120 |
|
|
#define PAGE_KERNEL_CI __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | \
|
121 |
|
|
_PAGE_NO_CACHE )
|
122 |
|
|
|
123 |
|
|
/*
|
124 |
|
|
* The PowerPC can only do execute protection on a segment (256MB) basis,
|
125 |
|
|
* not on a page basis. So we consider execute permission the same as read.
|
126 |
|
|
* Also, write permissions imply read permissions.
|
127 |
|
|
* This is the closest we can get..
|
128 |
|
|
*/
|
129 |
|
|
#define __P000 PAGE_NONE
|
130 |
|
|
#define __P001 PAGE_READONLY
|
131 |
|
|
#define __P010 PAGE_COPY
|
132 |
|
|
#define __P011 PAGE_COPY
|
133 |
|
|
#define __P100 PAGE_READONLY
|
134 |
|
|
#define __P101 PAGE_READONLY
|
135 |
|
|
#define __P110 PAGE_COPY
|
136 |
|
|
#define __P111 PAGE_COPY
|
137 |
|
|
|
138 |
|
|
#define __S000 PAGE_NONE
|
139 |
|
|
#define __S001 PAGE_READONLY
|
140 |
|
|
#define __S010 PAGE_SHARED
|
141 |
|
|
#define __S011 PAGE_SHARED
|
142 |
|
|
#define __S100 PAGE_READONLY
|
143 |
|
|
#define __S101 PAGE_READONLY
|
144 |
|
|
#define __S110 PAGE_SHARED
|
145 |
|
|
#define __S111 PAGE_SHARED
|
146 |
|
|
#endif /* _PPC_PGTABLE_H */
|