1 |
1276 |
phoenix |
#ifndef _M68K_PGTABLE_H
|
2 |
|
|
#define _M68K_PGTABLE_H
|
3 |
|
|
|
4 |
|
|
#include <linux/config.h>
|
5 |
|
|
#include <asm/setup.h>
|
6 |
|
|
|
7 |
|
|
#ifndef __ASSEMBLY__
|
8 |
|
|
#include <asm/processor.h>
|
9 |
|
|
#include <linux/threads.h>
|
10 |
|
|
|
11 |
|
|
/*
|
12 |
|
|
* This file contains the functions and defines necessary to modify and use
|
13 |
|
|
* the m68k page table tree.
|
14 |
|
|
*/
|
15 |
|
|
|
16 |
|
|
#include <asm/virtconvert.h>
|
17 |
|
|
|
18 |
|
|
/* Certain architectures need to do special things when pte's
|
19 |
|
|
* within a page table are directly modified. Thus, the following
|
20 |
|
|
* hook is made available.
|
21 |
|
|
*/
|
22 |
|
|
#define set_pte(pteptr, pteval) \
|
23 |
|
|
do{ \
|
24 |
|
|
*(pteptr) = (pteval); \
|
25 |
|
|
} while(0)
|
26 |
|
|
|
27 |
|
|
|
28 |
|
|
/* PMD_SHIFT determines the size of the area a second-level page table can map */
|
29 |
|
|
#ifdef CONFIG_SUN3
|
30 |
|
|
#define PMD_SHIFT 17
|
31 |
|
|
#else
|
32 |
|
|
#define PMD_SHIFT 22
|
33 |
|
|
#endif
|
34 |
|
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
35 |
|
|
#define PMD_MASK (~(PMD_SIZE-1))
|
36 |
|
|
|
37 |
|
|
/* PGDIR_SHIFT determines what a third-level page table entry can map */
|
38 |
|
|
#ifdef CONFIG_SUN3
|
39 |
|
|
#define PGDIR_SHIFT 17
|
40 |
|
|
#else
|
41 |
|
|
#define PGDIR_SHIFT 25
|
42 |
|
|
#endif
|
43 |
|
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
44 |
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
45 |
|
|
|
46 |
|
|
/*
|
47 |
|
|
* entries per page directory level: the m68k is configured as three-level,
|
48 |
|
|
* so we do have PMD level physically.
|
49 |
|
|
*/
|
50 |
|
|
#ifdef CONFIG_SUN3
|
51 |
|
|
#define PTRS_PER_PTE 16
|
52 |
|
|
#define PTRS_PER_PMD 1
|
53 |
|
|
#define PTRS_PER_PGD 2048
|
54 |
|
|
#else
|
55 |
|
|
#define PTRS_PER_PTE 1024
|
56 |
|
|
#define PTRS_PER_PMD 8
|
57 |
|
|
#define PTRS_PER_PGD 128
|
58 |
|
|
#endif
|
59 |
|
|
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
|
60 |
|
|
#define FIRST_USER_PGD_NR 0
|
61 |
|
|
|
62 |
|
|
/* Virtual address region for use by kernel_map() */
|
63 |
|
|
#ifdef CONFIG_SUN3
|
64 |
|
|
#define KMAP_START 0x0DC00000
|
65 |
|
|
#define KMAP_END 0x0E000000
|
66 |
|
|
#else
|
67 |
|
|
#define KMAP_START 0xd0000000
|
68 |
|
|
#define KMAP_END 0xf0000000
|
69 |
|
|
#endif
|
70 |
|
|
|
71 |
|
|
#ifndef CONFIG_SUN3
|
72 |
|
|
/* Just any arbitrary offset to the start of the vmalloc VM area: the
|
73 |
|
|
* current 8MB value just means that there will be a 8MB "hole" after the
|
74 |
|
|
* physical memory until the kernel virtual memory starts. That means that
|
75 |
|
|
* any out-of-bounds memory accesses will hopefully be caught.
|
76 |
|
|
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
|
77 |
|
|
* area for the same reason. ;)
|
78 |
|
|
*/
|
79 |
|
|
#define VMALLOC_OFFSET (8*1024*1024)
|
80 |
|
|
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
|
81 |
|
|
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
|
82 |
|
|
#define VMALLOC_END KMAP_START
|
83 |
|
|
#else
|
84 |
|
|
extern unsigned long vmalloc_end;
|
85 |
|
|
#define VMALLOC_START 0x0f800000
|
86 |
|
|
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
|
87 |
|
|
#define VMALLOC_END vmalloc_end
|
88 |
|
|
#endif /* CONFIG_SUN3 */
|
89 |
|
|
|
90 |
|
|
/* zero page used for uninitialized stuff */
|
91 |
|
|
extern unsigned long empty_zero_page;
|
92 |
|
|
|
93 |
|
|
/*
|
94 |
|
|
* BAD_PAGETABLE is used when we need a bogus page-table, while
|
95 |
|
|
* BAD_PAGE is used for a bogus page.
|
96 |
|
|
*
|
97 |
|
|
* ZERO_PAGE is a global shared page that is always zero: used
|
98 |
|
|
* for zero-mapped memory areas etc..
|
99 |
|
|
*/
|
100 |
|
|
extern pte_t __bad_page(void);
|
101 |
|
|
extern pte_t * __bad_pagetable(void);
|
102 |
|
|
|
103 |
|
|
#define BAD_PAGETABLE __bad_pagetable()
|
104 |
|
|
#define BAD_PAGE __bad_page()
|
105 |
|
|
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
106 |
|
|
|
107 |
|
|
/* number of bits that fit into a memory pointer */
|
108 |
|
|
#define BITS_PER_PTR (8*sizeof(unsigned long))
|
109 |
|
|
|
110 |
|
|
/* to align the pointer to a pointer address */
|
111 |
|
|
#define PTR_MASK (~(sizeof(void*)-1))
|
112 |
|
|
|
113 |
|
|
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
|
114 |
|
|
/* 64-bit machines, beware! SRB. */
|
115 |
|
|
#define SIZEOF_PTR_LOG2 2
|
116 |
|
|
|
117 |
|
|
/*
|
118 |
|
|
* Check if the addr/len goes up to the end of a physical
|
119 |
|
|
* memory chunk. Used for DMA functions.
|
120 |
|
|
*/
|
121 |
|
|
#ifdef CONFIG_SINGLE_MEMORY_CHUNK
|
122 |
|
|
/*
|
123 |
|
|
* It makes no sense to consider whether we cross a memory boundary if
|
124 |
|
|
* we support just one physical chunk of memory.
|
125 |
|
|
*/
|
126 |
|
|
static inline int mm_end_of_chunk(unsigned long addr, int len)
|
127 |
|
|
{
|
128 |
|
|
return 0;
|
129 |
|
|
}
|
130 |
|
|
#else
|
131 |
|
|
int mm_end_of_chunk (unsigned long addr, int len);
|
132 |
|
|
#endif
|
133 |
|
|
|
134 |
|
|
extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
|
135 |
|
|
|
136 |
|
|
/*
|
137 |
|
|
* The m68k doesn't have any external MMU info: the kernel page
|
138 |
|
|
* tables contain all the necessary information. The Sun3 does, but
|
139 |
|
|
* they are updated on demand.
|
140 |
|
|
*/
|
141 |
|
|
static inline void update_mmu_cache(struct vm_area_struct *vma,
|
142 |
|
|
unsigned long address, pte_t pte)
|
143 |
|
|
{
|
144 |
|
|
}
|
145 |
|
|
|
146 |
|
|
#ifdef CONFIG_SUN3
|
147 |
|
|
/* Macros to (de)construct the fake PTEs representing swap pages. */
|
148 |
|
|
#define SWP_TYPE(x) ((x).val & 0x7F)
|
149 |
|
|
#define SWP_OFFSET(x) (((x).val) >> 7)
|
150 |
|
|
#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) | ((offset) << 7)) })
|
151 |
|
|
#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
152 |
|
|
#define swp_entry_to_pte(x) ((pte_t) { (x).val })
|
153 |
|
|
|
154 |
|
|
#else
|
155 |
|
|
|
156 |
|
|
/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
|
157 |
|
|
#define SWP_TYPE(x) (((x).val >> 3) & 0x1ff)
|
158 |
|
|
#define SWP_OFFSET(x) ((x).val >> 12)
|
159 |
|
|
#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 3) | ((offset) << 12) })
|
160 |
|
|
#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
161 |
|
|
#define swp_entry_to_pte(x) ((pte_t) { (x).val })
|
162 |
|
|
|
163 |
|
|
#endif /* CONFIG_SUN3 */
|
164 |
|
|
|
165 |
|
|
#endif /* !__ASSEMBLY__ */
|
166 |
|
|
|
167 |
|
|
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
168 |
|
|
#define PageSkip(page) (0)
|
169 |
|
|
#define kern_addr_valid(addr) (1)
|
170 |
|
|
|
171 |
|
|
#define io_remap_page_range remap_page_range
|
172 |
|
|
|
173 |
|
|
/* MMU-specific headers */
|
174 |
|
|
|
175 |
|
|
#ifdef CONFIG_SUN3
|
176 |
|
|
#include <asm/sun3_pgtable.h>
|
177 |
|
|
#else
|
178 |
|
|
#include <asm/motorola_pgtable.h>
|
179 |
|
|
#endif
|
180 |
|
|
|
181 |
|
|
#ifndef __ASSEMBLY__
|
182 |
|
|
#include <asm-generic/pgtable.h>
|
183 |
|
|
#endif /* !__ASSEMBLY__ */
|
184 |
|
|
|
185 |
|
|
/*
|
186 |
|
|
* No page table caches to initialise
|
187 |
|
|
*/
|
188 |
|
|
#define pgtable_cache_init() do { } while (0)
|
189 |
|
|
|
190 |
|
|
#endif /* _M68K_PGTABLE_H */
|