1 |
1276 |
phoenix |
/* $Id: pgalloc.h,v 1.1.1.1 2004-04-15 03:00:54 phoenix Exp $ */
|
2 |
|
|
#ifndef _SPARC64_PGALLOC_H
|
3 |
|
|
#define _SPARC64_PGALLOC_H
|
4 |
|
|
|
5 |
|
|
#include <linux/config.h>
|
6 |
|
|
#include <linux/kernel.h>
|
7 |
|
|
#include <linux/sched.h>
|
8 |
|
|
|
9 |
|
|
#include <asm/page.h>
|
10 |
|
|
#include <asm/spitfire.h>
|
11 |
|
|
#include <asm/pgtable.h>
|
12 |
|
|
|
13 |
|
|
/* Cache and TLB flush operations. */
|
14 |
|
|
|
15 |
|
|
/* These are the same regardless of whether this is an SMP kernel or not. */
|
16 |
|
|
#define flush_cache_mm(__mm) \
|
17 |
|
|
do { if ((__mm) == current->mm) flushw_user(); } while(0)
|
18 |
|
|
#define flush_cache_range(mm, start, end) \
|
19 |
|
|
flush_cache_mm(mm)
|
20 |
|
|
#define flush_cache_page(vma, page) \
|
21 |
|
|
flush_cache_mm((vma)->vm_mm)
|
22 |
|
|
|
23 |
|
|
/* This is unnecessary on the SpitFire since D-CACHE is write-through. */
|
24 |
|
|
#define flush_page_to_ram(page) do { } while (0)
|
25 |
|
|
|
26 |
|
|
/*
|
27 |
|
|
* On spitfire, the icache doesn't snoop local stores and we don't
|
28 |
|
|
* use block commit stores (which invalidate icache lines) during
|
29 |
|
|
* module load, so we need this.
|
30 |
|
|
*/
|
31 |
|
|
extern void flush_icache_range(unsigned long start, unsigned long end);
|
32 |
|
|
|
33 |
|
|
extern void __flush_dcache_page(void *addr, int flush_icache);
|
34 |
|
|
extern void __flush_icache_page(unsigned long);
|
35 |
|
|
extern void flush_dcache_page_impl(struct page *page);
|
36 |
|
|
#ifdef CONFIG_SMP
|
37 |
|
|
extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
|
38 |
|
|
extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
|
39 |
|
|
#else
|
40 |
|
|
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
|
41 |
|
|
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
|
42 |
|
|
#endif
|
43 |
|
|
|
44 |
|
|
extern void flush_dcache_page(struct page *page);
|
45 |
|
|
|
46 |
|
|
extern void __flush_dcache_range(unsigned long start, unsigned long end);
|
47 |
|
|
|
48 |
|
|
extern void __flush_cache_all(void);
|
49 |
|
|
|
50 |
|
|
extern void __flush_tlb_all(void);
|
51 |
|
|
extern void __flush_tlb_mm(unsigned long context, unsigned long r);
|
52 |
|
|
extern void __flush_tlb_range(unsigned long context, unsigned long start,
|
53 |
|
|
unsigned long r, unsigned long end,
|
54 |
|
|
unsigned long pgsz, unsigned long size);
|
55 |
|
|
extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
|
56 |
|
|
|
57 |
|
|
#ifndef CONFIG_SMP
|
58 |
|
|
|
59 |
|
|
#define flush_cache_all() __flush_cache_all()
|
60 |
|
|
#define flush_tlb_all() __flush_tlb_all()
|
61 |
|
|
|
62 |
|
|
#define flush_tlb_mm(__mm) \
|
63 |
|
|
do { if(CTX_VALID((__mm)->context)) \
|
64 |
|
|
__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
|
65 |
|
|
} while(0)
|
66 |
|
|
|
67 |
|
|
#define flush_tlb_range(__mm, start, end) \
|
68 |
|
|
do { if(CTX_VALID((__mm)->context)) { \
|
69 |
|
|
unsigned long __start = (start)&PAGE_MASK; \
|
70 |
|
|
unsigned long __end = PAGE_ALIGN(end); \
|
71 |
|
|
__flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
|
72 |
|
|
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
|
73 |
|
|
(__end - __start)); \
|
74 |
|
|
} \
|
75 |
|
|
} while(0)
|
76 |
|
|
|
77 |
|
|
#define flush_tlb_page(vma, page) \
|
78 |
|
|
do { struct mm_struct *__mm = (vma)->vm_mm; \
|
79 |
|
|
if(CTX_VALID(__mm->context)) \
|
80 |
|
|
__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
|
81 |
|
|
SECONDARY_CONTEXT); \
|
82 |
|
|
} while(0)
|
83 |
|
|
|
84 |
|
|
#else /* CONFIG_SMP */
|
85 |
|
|
|
86 |
|
|
extern void smp_flush_cache_all(void);
|
87 |
|
|
extern void smp_flush_tlb_all(void);
|
88 |
|
|
extern void smp_flush_tlb_mm(struct mm_struct *mm);
|
89 |
|
|
extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
|
90 |
|
|
unsigned long end);
|
91 |
|
|
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
|
92 |
|
|
|
93 |
|
|
#define flush_cache_all() smp_flush_cache_all()
|
94 |
|
|
#define flush_tlb_all() smp_flush_tlb_all()
|
95 |
|
|
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
|
96 |
|
|
#define flush_tlb_range(mm, start, end) \
|
97 |
|
|
smp_flush_tlb_range(mm, start, end)
|
98 |
|
|
#define flush_tlb_page(vma, page) \
|
99 |
|
|
smp_flush_tlb_page((vma)->vm_mm, page)
|
100 |
|
|
|
101 |
|
|
#endif /* ! CONFIG_SMP */
|
102 |
|
|
|
103 |
|
|
extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
|
104 |
|
|
unsigned long end)
|
105 |
|
|
{
|
106 |
|
|
/* Note the signed type. */
|
107 |
|
|
long s = start, e = end, vpte_base;
|
108 |
|
|
if (s > e)
|
109 |
|
|
/* Nobody should call us with start below VM hole and end above.
|
110 |
|
|
See if it is really true. */
|
111 |
|
|
BUG();
|
112 |
|
|
#if 0
|
113 |
|
|
/* Currently free_pgtables guarantees this. */
|
114 |
|
|
s &= PMD_MASK;
|
115 |
|
|
e = (e + PMD_SIZE - 1) & PMD_MASK;
|
116 |
|
|
#endif
|
117 |
|
|
vpte_base = (tlb_type == spitfire ?
|
118 |
|
|
VPTE_BASE_SPITFIRE :
|
119 |
|
|
VPTE_BASE_CHEETAH);
|
120 |
|
|
flush_tlb_range(mm,
|
121 |
|
|
vpte_base + (s >> (PAGE_SHIFT - 3)),
|
122 |
|
|
vpte_base + (e >> (PAGE_SHIFT - 3)));
|
123 |
|
|
}
|
124 |
|
|
|
125 |
|
|
/* Page table allocation/freeing. */
|
126 |
|
|
#ifdef CONFIG_SMP
|
127 |
|
|
/* Sliiiicck */
|
128 |
|
|
#define pgt_quicklists cpu_data[smp_processor_id()]
|
129 |
|
|
#else
|
130 |
|
|
extern struct pgtable_cache_struct {
|
131 |
|
|
unsigned long *pgd_cache;
|
132 |
|
|
unsigned long *pte_cache[2];
|
133 |
|
|
unsigned int pgcache_size;
|
134 |
|
|
unsigned int pgdcache_size;
|
135 |
|
|
} pgt_quicklists;
|
136 |
|
|
#endif
|
137 |
|
|
#define pgd_quicklist (pgt_quicklists.pgd_cache)
|
138 |
|
|
#define pmd_quicklist ((unsigned long *)0)
|
139 |
|
|
#define pte_quicklist (pgt_quicklists.pte_cache)
|
140 |
|
|
#define pgtable_cache_size (pgt_quicklists.pgcache_size)
|
141 |
|
|
#define pgd_cache_size (pgt_quicklists.pgdcache_size)
|
142 |
|
|
|
143 |
|
|
#ifndef CONFIG_SMP
|
144 |
|
|
|
145 |
|
|
extern __inline__ void free_pgd_fast(pgd_t *pgd)
|
146 |
|
|
{
|
147 |
|
|
struct page *page = virt_to_page(pgd);
|
148 |
|
|
|
149 |
|
|
if (!page->pprev_hash) {
|
150 |
|
|
(unsigned long *)page->next_hash = pgd_quicklist;
|
151 |
|
|
pgd_quicklist = (unsigned long *)page;
|
152 |
|
|
}
|
153 |
|
|
(unsigned long)page->pprev_hash |=
|
154 |
|
|
(((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1);
|
155 |
|
|
pgd_cache_size++;
|
156 |
|
|
}
|
157 |
|
|
|
158 |
|
|
extern __inline__ pgd_t *get_pgd_fast(void)
|
159 |
|
|
{
|
160 |
|
|
struct page *ret;
|
161 |
|
|
|
162 |
|
|
if ((ret = (struct page *)pgd_quicklist) != NULL) {
|
163 |
|
|
unsigned long mask = (unsigned long)ret->pprev_hash;
|
164 |
|
|
unsigned long off = 0;
|
165 |
|
|
|
166 |
|
|
if (mask & 1)
|
167 |
|
|
mask &= ~1;
|
168 |
|
|
else {
|
169 |
|
|
off = PAGE_SIZE / 2;
|
170 |
|
|
mask &= ~2;
|
171 |
|
|
}
|
172 |
|
|
(unsigned long)ret->pprev_hash = mask;
|
173 |
|
|
if (!mask)
|
174 |
|
|
pgd_quicklist = (unsigned long *)ret->next_hash;
|
175 |
|
|
ret = (struct page *)(__page_address(ret) + off);
|
176 |
|
|
pgd_cache_size--;
|
177 |
|
|
} else {
|
178 |
|
|
struct page *page = alloc_page(GFP_KERNEL);
|
179 |
|
|
|
180 |
|
|
if (page) {
|
181 |
|
|
ret = (struct page *)page_address(page);
|
182 |
|
|
clear_page(ret);
|
183 |
|
|
(unsigned long)page->pprev_hash = 2;
|
184 |
|
|
(unsigned long *)page->next_hash = pgd_quicklist;
|
185 |
|
|
pgd_quicklist = (unsigned long *)page;
|
186 |
|
|
pgd_cache_size++;
|
187 |
|
|
}
|
188 |
|
|
}
|
189 |
|
|
return (pgd_t *)ret;
|
190 |
|
|
}
|
191 |
|
|
|
192 |
|
|
#else /* CONFIG_SMP */
|
193 |
|
|
|
194 |
|
|
extern __inline__ void free_pgd_fast(pgd_t *pgd)
|
195 |
|
|
{
|
196 |
|
|
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
|
197 |
|
|
pgd_quicklist = (unsigned long *) pgd;
|
198 |
|
|
pgtable_cache_size++;
|
199 |
|
|
}
|
200 |
|
|
|
201 |
|
|
extern __inline__ pgd_t *get_pgd_fast(void)
|
202 |
|
|
{
|
203 |
|
|
unsigned long *ret;
|
204 |
|
|
|
205 |
|
|
if((ret = pgd_quicklist) != NULL) {
|
206 |
|
|
pgd_quicklist = (unsigned long *)(*ret);
|
207 |
|
|
ret[0] = 0;
|
208 |
|
|
pgtable_cache_size--;
|
209 |
|
|
} else {
|
210 |
|
|
ret = (unsigned long *) __get_free_page(GFP_KERNEL);
|
211 |
|
|
if(ret)
|
212 |
|
|
memset(ret, 0, PAGE_SIZE);
|
213 |
|
|
}
|
214 |
|
|
return (pgd_t *)ret;
|
215 |
|
|
}
|
216 |
|
|
|
217 |
|
|
extern __inline__ void free_pgd_slow(pgd_t *pgd)
|
218 |
|
|
{
|
219 |
|
|
free_page((unsigned long)pgd);
|
220 |
|
|
}
|
221 |
|
|
|
222 |
|
|
#endif /* CONFIG_SMP */
|
223 |
|
|
|
224 |
|
|
#if (L1DCACHE_SIZE > PAGE_SIZE) /* is there D$ aliasing problem */
|
225 |
|
|
#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
|
226 |
|
|
#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
|
227 |
|
|
#else
|
228 |
|
|
#define VPTE_COLOR(address) 0
|
229 |
|
|
#define DCACHE_COLOR(address) 0
|
230 |
|
|
#endif
|
231 |
|
|
|
232 |
|
|
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
|
233 |
|
|
|
234 |
|
|
extern __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
235 |
|
|
{
|
236 |
|
|
pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
|
237 |
|
|
if (pmd)
|
238 |
|
|
memset(pmd, 0, PAGE_SIZE);
|
239 |
|
|
return pmd;
|
240 |
|
|
}
|
241 |
|
|
|
242 |
|
|
extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
|
243 |
|
|
{
|
244 |
|
|
unsigned long *ret;
|
245 |
|
|
int color = 0;
|
246 |
|
|
|
247 |
|
|
if (pte_quicklist[color] == NULL)
|
248 |
|
|
color = 1;
|
249 |
|
|
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
|
250 |
|
|
pte_quicklist[color] = (unsigned long *)(*ret);
|
251 |
|
|
ret[0] = 0;
|
252 |
|
|
pgtable_cache_size--;
|
253 |
|
|
}
|
254 |
|
|
return (pmd_t *)ret;
|
255 |
|
|
}
|
256 |
|
|
|
257 |
|
|
extern __inline__ void free_pmd_fast(pmd_t *pmd)
|
258 |
|
|
{
|
259 |
|
|
unsigned long color = DCACHE_COLOR((unsigned long)pmd);
|
260 |
|
|
*(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
|
261 |
|
|
pte_quicklist[color] = (unsigned long *) pmd;
|
262 |
|
|
pgtable_cache_size++;
|
263 |
|
|
}
|
264 |
|
|
|
265 |
|
|
extern __inline__ void free_pmd_slow(pmd_t *pmd)
|
266 |
|
|
{
|
267 |
|
|
free_page((unsigned long)pmd);
|
268 |
|
|
}
|
269 |
|
|
|
270 |
|
|
#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE)
|
271 |
|
|
|
272 |
|
|
extern pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address);
|
273 |
|
|
|
274 |
|
|
extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
|
275 |
|
|
{
|
276 |
|
|
unsigned long color = VPTE_COLOR(address);
|
277 |
|
|
unsigned long *ret;
|
278 |
|
|
|
279 |
|
|
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
|
280 |
|
|
pte_quicklist[color] = (unsigned long *)(*ret);
|
281 |
|
|
ret[0] = 0;
|
282 |
|
|
pgtable_cache_size--;
|
283 |
|
|
}
|
284 |
|
|
return (pte_t *)ret;
|
285 |
|
|
}
|
286 |
|
|
|
287 |
|
|
extern __inline__ void free_pte_fast(pte_t *pte)
|
288 |
|
|
{
|
289 |
|
|
unsigned long color = DCACHE_COLOR((unsigned long)pte);
|
290 |
|
|
*(unsigned long *)pte = (unsigned long) pte_quicklist[color];
|
291 |
|
|
pte_quicklist[color] = (unsigned long *) pte;
|
292 |
|
|
pgtable_cache_size++;
|
293 |
|
|
}
|
294 |
|
|
|
295 |
|
|
extern __inline__ void free_pte_slow(pte_t *pte)
|
296 |
|
|
{
|
297 |
|
|
free_page((unsigned long)pte);
|
298 |
|
|
}
|
299 |
|
|
|
300 |
|
|
#define pte_free(pte) free_pte_fast(pte)
|
301 |
|
|
#define pmd_free(pmd) free_pmd_fast(pmd)
|
302 |
|
|
#define pgd_free(pgd) free_pgd_fast(pgd)
|
303 |
|
|
#define pgd_alloc(mm) get_pgd_fast()
|
304 |
|
|
|
305 |
|
|
extern int do_check_pgt_cache(int, int);
|
306 |
|
|
|
307 |
|
|
#endif /* _SPARC64_PGALLOC_H */
|