OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ppc/] [pgtable.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1276 phoenix
#ifdef __KERNEL__
2
#ifndef _PPC_PGTABLE_H
3
#define _PPC_PGTABLE_H
4
 
5
#include <linux/config.h>
6
 
7
#ifndef __ASSEMBLY__
8
#include <linux/sched.h>
9
#include <linux/threads.h>
10
#include <asm/processor.h>              /* For TASK_SIZE */
11
#include <asm/mmu.h>
12
#include <asm/page.h>
13
 
14
extern void _tlbie(unsigned long address);
15
extern void _tlbia(void);
16
 
17
#ifdef CONFIG_4xx
18
#ifdef CONFIG_PIN_TLB
19
/* When pinning entries on the 4xx, we have to use a software function
20
 * to ensure we don't remove them since there isn't any hardware support
21
 * for this.
22
 */
23
#define __tlbia()       _tlbia()
24
#else
25
#define __tlbia()       asm volatile ("tlbia; sync" : : : "memory")
26
#endif
27
 
28
static inline void local_flush_tlb_all(void)
29
        { __tlbia(); }
30
static inline void local_flush_tlb_mm(struct mm_struct *mm)
31
        { __tlbia(); }
32
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
33
                                unsigned long vmaddr)
34
        { _tlbie(vmaddr); }
35
static inline void local_flush_tlb_range(struct mm_struct *mm,
36
                                unsigned long start, unsigned long end)
37
        { __tlbia(); }
38
#define update_mmu_cache(vma, addr, pte)        do { } while (0)
39
 
40
#elif defined(CONFIG_8xx)
41
#define __tlbia()       asm volatile ("tlbia; sync" : : : "memory")
42
 
43
static inline void local_flush_tlb_all(void)
44
        { __tlbia(); }
45
static inline void local_flush_tlb_mm(struct mm_struct *mm)
46
        { __tlbia(); }
47
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
48
                                unsigned long vmaddr)
49
        { _tlbie(vmaddr); }
50
static inline void local_flush_tlb_range(struct mm_struct *mm,
51
                                unsigned long start, unsigned long end)
52
        { __tlbia(); }
53
#define update_mmu_cache(vma, addr, pte)        do { } while (0)
54
 
55
#else   /* 6xx, 7xx, 7xxx cpus */
56
struct mm_struct;
57
struct vm_area_struct;
58
extern void local_flush_tlb_all(void);
59
extern void local_flush_tlb_mm(struct mm_struct *mm);
60
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
61
extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
62
                            unsigned long end);
63
 
64
/*
65
 * This gets called at the end of handling a page fault, when
66
 * the kernel has put a new PTE into the page table for the process.
67
 * We use it to put a corresponding HPTE into the hash table
68
 * ahead of time, instead of waiting for the inevitable extra
69
 * hash-table miss exception.
70
 */
71
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
72
#endif
73
 
74
#define flush_tlb_all local_flush_tlb_all
75
#define flush_tlb_mm local_flush_tlb_mm
76
#define flush_tlb_page local_flush_tlb_page
77
#define flush_tlb_range local_flush_tlb_range
78
 
79
/*
80
 * This is called in munmap when we have freed up some page-table
81
 * pages.  We don't need to do anything here, there's nothing special
82
 * about our page-table pages.  -- paulus
83
 */
84
static inline void flush_tlb_pgtables(struct mm_struct *mm,
85
                                      unsigned long start, unsigned long end)
86
{
87
}
88
 
89
/*
90
 * No cache flushing is required when address mappings are
91
 * changed, because the caches on PowerPCs are physically
92
 * addressed.  -- paulus
93
 * Also, when SMP we use the coherency (M) bit of the
94
 * BATs and PTEs.  -- Cort
95
 */
96
#define flush_cache_all()               do { } while (0)
97
#define flush_cache_mm(mm)              do { } while (0)
98
#define flush_cache_range(mm, a, b)     do { } while (0)
99
#define flush_cache_page(vma, p)        do { } while (0)
100
#define flush_page_to_ram(page)         do { } while (0)
101
 
102
extern void flush_icache_user_range(struct vm_area_struct *vma,
103
                struct page *page, unsigned long addr, int len);
104
extern void flush_icache_range(unsigned long, unsigned long);
105
extern void __flush_dcache_icache(void *page_va);
106
extern void flush_dcache_page(struct page *page);
107
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
108
 
109
extern unsigned long va_to_phys(unsigned long address);
110
extern pte_t *va_to_pte(unsigned long address);
111
extern unsigned long ioremap_bot, ioremap_base;
112
extern unsigned long vmalloc_start;
113
 
114
/* Start and end of the vmalloc area. */
115
#define VMALLOC_START   vmalloc_start
116
#define VMALLOC_END     ioremap_bot
117
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
118
 
119
#endif /* __ASSEMBLY__ */
120
 
121
/*
122
 * The PowerPC MMU uses a hash table containing PTEs, together with
123
 * a set of 16 segment registers (on 32-bit implementations), to define
124
 * the virtual to physical address mapping.
125
 *
126
 * We use the hash table as an extended TLB, i.e. a cache of currently
127
 * active mappings.  We maintain a two-level page table tree, much
128
 * like that used by the i386, for the sake of the Linux memory
129
 * management code.  Low-level assembler code in hashtable.S
130
 * (procedure hash_page) is responsible for extracting ptes from the
131
 * tree and putting them into the hash table when necessary, and
132
 * updating the accessed and modified bits in the page table tree.
133
 */
134
 
135
/*
136
 * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
137
 * We also use the two level tables, but we can put the real bits in them
138
 * needed for the TLB and tablewalk.  These definitions require Mx_CTR.PPM = 0,
139
 * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1.  The level 2 descriptor has
140
 * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
141
 * based upon user/super access.  The TLB does not have accessed nor write
142
 * protect.  We assume that if the TLB get loaded with an entry it is
143
 * accessed, and overload the changed bit for write protect.  We use
144
 * two bits in the software pte that are supposed to be set to zero in
145
 * the TLB entry (24 and 25) for these indicators.  Although the level 1
146
 * descriptor contains the guarded and writethrough/copyback bits, we can
147
 * set these at the page level since they get copied from the Mx_TWC
148
 * register when the TLB entry is loaded.  We will use bit 27 for guard, since
149
 * that is where it exists in the MD_TWC, and bit 26 for writethrough.
150
 * These will get masked from the level 2 descriptor at TLB load time, and
151
 * copied to the MD_TWC before it gets loaded.
152
 * Large page sizes added.  We currently support two sizes, 4K and 8M.
153
 * This also allows a TLB hander optimization because we can directly
154
 * load the PMD into MD_TWC.  The 8M pages are only used for kernel
155
 * mapping of well known areas.  The PMD (PGD) entries contain control
156
 * flags in addition to the address, so care must be taken that the
157
 * software no longer assumes these are only pointers.
158
 */
159
 
160
/*
161
 * At present, all PowerPC 400-class processors share a similar TLB
162
 * architecture. The instruction and data sides share a unified,
163
 * 64-entry, fully-associative TLB which is maintained totally under
164
 * software control. In addition, the instruction side has a
165
 * hardware-managed, 4-entry, fully-associative TLB which serves as a
166
 * first level to the shared TLB. These two TLBs are known as the UTLB
167
 * and ITLB, respectively (see "mmu.h" for definitions).
168
 */
169
 
170
/*
171
 * The normal case is that PTEs are 32-bits and we have a 1-page
172
 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
173
 *
174
 * For any >32-bit physical address platform, we can use the following
175
 * two level page table layout where the pgdir is 8KB and the MS 13 bits
176
 * are an index to the second level table.  The combined pgdir/pmd first
177
 * level has 2048 entries and the second level has 512 64-bit PTE entries.
178
 * -Matt
179
 */
180
/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
181
#define PMD_SHIFT       (PAGE_SHIFT + PTE_SHIFT)
182
#define PMD_SIZE        (1UL << PMD_SHIFT)
183
#define PMD_MASK        (~(PMD_SIZE-1))
184
 
185
/* PGDIR_SHIFT determines what a top-level page table entry can map */
186
#define PGDIR_SHIFT     PMD_SHIFT
187
#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
188
#define PGDIR_MASK      (~(PGDIR_SIZE-1))
189
 
190
/*
191
 * entries per page directory level: our page-table tree is two-level, so
192
 * we don't really have any PMD directory.
193
 */
194
#define PTRS_PER_PTE    (1 << PTE_SHIFT)
195
#define PTRS_PER_PMD    1
196
#define PTRS_PER_PGD    (1 << (32 - PGDIR_SHIFT))
197
 
198
#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
199
#define FIRST_USER_PGD_NR       0
200
 
201
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
202
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
203
 
204
#define pte_ERROR(e) \
205
        printk("%s:%d: bad pte "PTE_FMT".\n", __FILE__, __LINE__, pte_val(e))
206
#define pmd_ERROR(e) \
207
        printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
208
#define pgd_ERROR(e) \
209
        printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
210
 
211
/*
212
 * Bits in a linux-style PTE.  These match the bits in the
213
 * (hardware-defined) PowerPC PTE as closely as possible.
214
 */
215
 
216
#if defined(CONFIG_40x)
217
 
218
/* There are several potential gotchas here.  The 40x hardware TLBLO
219
   field looks like this:
220
 
221
 
222
   RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
223
 
224
   Where possible we make the Linux PTE bits match up with this
225
 
226
   - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
227
     support down to 1k pages), this is done in the TLBMiss exception
228
     handler.
229
   - We use only zones 0 (for kernel pages) and 1 (for user pages)
230
     of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
231
     miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
232
     zone.
233
   - PRESENT *must* be in the bottom two bits because swap cache
234
     entries use the top 30 bits.  Because 4xx doesn't support SMP
235
     anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
236
     is cleared in the TLB miss handler before the TLB entry is loaded.
237
   - All other bits of the PTE are loaded into TLBLO without
238
     modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
239
     software PTE bits.  We actually use use bits 21, 24, 25, and
240
     30 respectively for the software bits: ACCESSED, DIRTY, RW, and
241
     PRESENT.
242
*/
243
 
244
/* Definitions for 4xx embedded chips. */
245
#define _PAGE_GUARDED   0x001   /* G: page is guarded from prefetch */
246
#define _PAGE_PRESENT   0x002   /* software: PTE contains a translation */
247
#define _PAGE_NO_CACHE  0x004   /* I: caching is inhibited */
248
#define _PAGE_WRITETHRU 0x008   /* W: caching is write-through */
249
#define _PAGE_USER      0x010   /* matches one of the zone permission bits */
250
#define _PAGE_RW        0x040   /* software: Writes permitted */
251
#define _PAGE_DIRTY     0x080   /* software: dirty page */
252
#define _PAGE_HWWRITE   0x100   /* hardware: Dirty & RW, set in exception */
253
#define _PAGE_HWEXEC    0x200   /* hardware: EX permission */
254
#define _PAGE_ACCESSED  0x400   /* software: R: page referenced */
255
#define _PMD_PRESENT    PAGE_MASK
256
 
257
#elif defined(CONFIG_44x)
258
/*
259
 * Definitions for PPC44x
260
 *
261
 * Because of the 3 word TLB entries to support 36-bit addressing,
262
 * the attribute are difficult to map in such a fashion that they
263
 * are easily loaded during exception processing.  I decided to
264
 * organize the entry so the ERPN is the only portion in the
265
 * upper word of the PTE and the attribute bits below are packed
266
 * in as sensibly as they can be in the area below a 4KB page size
267
 * oriented RPN.  This at least makes it easy to load the RPN and
268
 * ERPN fields in the TLB. -Matt
269
 *
270
 * Note that these bits preclude future use of a page size
271
 * less than 4KB.
272
 */
273
#define _PAGE_PRESENT   0x00000001              /* S: PTE valid */
274
#define _PAGE_RW        0x00000002              /* S: Write permission */
275
#define _PAGE_DIRTY     0x00000004              /* S: Page dirty */
276
#define _PAGE_ACCESSED  0x00000008              /* S: Page referenced */
277
#define _PAGE_HWWRITE   0x00000010              /* H: Dirty & RW */
278
#define _PAGE_HWEXEC    0x00000020              /* H: Execute permission */
279
#define _PAGE_USER      0x00000040              /* S: User page */
280
#define _PAGE_ENDIAN    0x00000080              /* H: E bit */
281
#define _PAGE_GUARDED   0x00000100              /* H: G bit */
282
#define _PAGE_COHERENT  0x00000200              /* H: M bit */
283
#define _PAGE_FILE      0x00000400              /* S: nonlinear file mapping */
284
#define _PAGE_NO_CACHE  0x00000400              /* H: I bit */
285
#define _PAGE_WRITETHRU 0x00000800              /* H: W bit */
286
 
287
/* TODO: Add large page lowmem mapping support */
288
#define _PMD_PRESENT    PAGE_MASK
289
#define _PMD_PRESENT_MASK (PAGE_MASK)
290
#define _PMD_BAD        (~PAGE_MASK)
291
 
292
/* ERPN in a PTE never gets cleared, ignore it */
293
#define _PTE_NONE_MASK 0xffffffff00000000ULL
294
 
295
#elif defined(CONFIG_8xx)
296
/* Definitions for 8xx embedded chips. */
297
#define _PAGE_PRESENT   0x0001  /* Page is valid */
298
#define _PAGE_NO_CACHE  0x0002  /* I: cache inhibit */
299
#define _PAGE_SHARED    0x0004  /* No ASID (context) compare */
300
 
301
/* These five software bits must be masked out when the entry is loaded
302
 * into the TLB.
303
 */
304
#define _PAGE_EXEC      0x0008  /* software: i-cache coherency required */
305
#define _PAGE_GUARDED   0x0010  /* software: guarded access */
306
#define _PAGE_DIRTY     0x0020  /* software: page changed */
307
#define _PAGE_RW        0x0040  /* software: user write access allowed */
308
#define _PAGE_ACCESSED  0x0080  /* software: page referenced */
309
 
310
/* Setting any bits in the nibble with the follow two controls will
311
 * require a TLB exception handler change.  It is assumed unused bits
312
 * are always zero.
313
 */
314
#define _PAGE_HWWRITE   0x0100  /* h/w write enable: never set in Linux PTE */
315
#define _PAGE_USER      0x0800  /* One of the PP bits, the other is USER&~RW */
316
 
317
#define _PMD_PRESENT    PAGE_MASK
318
#define _PMD_PAGE_MASK  0x000c
319
#define _PMD_PAGE_8M    0x000c
320
 
321
/*
322
 * The 8xx TLB miss handler allegedly sets _PAGE_ACCESSED in the PTE
323
 * for an address even if _PAGE_PRESENT is not set, as a performance
324
 * optimization.  This is a bug if you ever want to use swap unless
325
 * _PAGE_ACCESSED is 2, which it isn't, or unless you have 8xx-specific
326
 * definitions for __swp_entry etc. below, which would be gross.
327
 *  -- paulus
328
 */
329
#define _PTE_NONE_MASK  _PAGE_ACCESSED
330
 
331
#else /* CONFIG_6xx */
332
/* Definitions for 60x, 740/750, etc. */
333
#define _PAGE_PRESENT   0x001   /* software: pte contains a translation */
334
#define _PAGE_HASHPTE   0x002   /* hash_page has made an HPTE for this pte */
335
#define _PAGE_USER      0x004   /* usermode access allowed */
336
#define _PAGE_GUARDED   0x008   /* G: prohibit speculative access */
337
#define _PAGE_COHERENT  0x010   /* M: enforce memory coherence (SMP systems) */
338
#define _PAGE_NO_CACHE  0x020   /* I: cache inhibit */
339
#define _PAGE_WRITETHRU 0x040   /* W: cache write-through */
340
#define _PAGE_DIRTY     0x080   /* C: page changed */
341
#define _PAGE_ACCESSED  0x100   /* R: page referenced */
342
#define _PAGE_EXEC      0x200   /* software: i-cache coherency required */
343
#define _PAGE_RW        0x400   /* software: user write access allowed */
344
#define _PMD_PRESENT    PAGE_MASK
345
 
346
#define _PTE_NONE_MASK  _PAGE_HASHPTE
347
 
348
#endif
349
 
350
/*
351
 * Some bits are only used on some cpu families...
352
 */
353
#ifndef _PAGE_HASHPTE
354
#define _PAGE_HASHPTE   0
355
#endif
356
#ifndef _PTE_NONE_MASK
357
#define _PTE_NONE_MASK  0
358
#endif
359
#ifndef _PAGE_SHARED
360
#define _PAGE_SHARED    0
361
#endif
362
#ifndef _PAGE_HWWRITE
363
#define _PAGE_HWWRITE   0
364
#endif
365
#ifndef _PAGE_HWEXEC
366
#define _PAGE_HWEXEC    0
367
#endif
368
#ifndef _PAGE_EXEC
369
#define _PAGE_EXEC      0
370
#endif
371
 
372
#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
373
 
374
/*
375
 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
376
 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
377
 * to have it in the Linux PTE, and in fact the bit could be reused for
378
 * another purpose.  -- paulus.
379
 */
380
#define _PAGE_BASE      _PAGE_PRESENT | _PAGE_ACCESSED
381
#define _PAGE_WRENABLE  _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
382
 
383
/*
384
 * 44x wants _PAGE_GUARDED on all kernel pages for various reasons.
385
 * Allegedly that doesn't hurt performance.  -- paulus
386
 */
387
#ifdef CONFIG_44x
388
#define _PAGE_KERNEL    _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC | _PAGE_GUARDED
389
#else
390
#define _PAGE_KERNEL    _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC
391
#endif
392
 
393
#define _PAGE_IO        _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
394
 
395
#define PAGE_NONE       __pgprot(_PAGE_BASE)
396
#define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
397
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
398
#define PAGE_SHARED     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
399
#define PAGE_SHARED_X   __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
400
#define PAGE_COPY       __pgprot(_PAGE_BASE | _PAGE_USER)
401
#define PAGE_COPY_X     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
402
 
403
#define PAGE_KERNEL     __pgprot(_PAGE_KERNEL)
404
#define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_SHARED)
405
#define PAGE_KERNEL_CI  __pgprot(_PAGE_IO)
406
 
407
/*
408
 * The PowerPC can only do execute protection on a segment (256MB) basis,
409
 * not on a page basis.  So we consider execute permission the same as read.
410
 * Also, write permissions imply read permissions.
411
 * This is the closest we can get..
412
 */
413
#define __P000  PAGE_NONE
414
#define __P001  PAGE_READONLY_X
415
#define __P010  PAGE_COPY
416
#define __P011  PAGE_COPY_X
417
#define __P100  PAGE_READONLY
418
#define __P101  PAGE_READONLY_X
419
#define __P110  PAGE_COPY
420
#define __P111  PAGE_COPY_X
421
 
422
#define __S000  PAGE_NONE
423
#define __S001  PAGE_READONLY_X
424
#define __S010  PAGE_SHARED
425
#define __S011  PAGE_SHARED_X
426
#define __S100  PAGE_READONLY
427
#define __S101  PAGE_READONLY_X
428
#define __S110  PAGE_SHARED
429
#define __S111  PAGE_SHARED_X
430
 
431
#ifndef __ASSEMBLY__
432
/*
433
 * ZERO_PAGE is a global shared page that is always zero: used
434
 * for zero-mapped memory areas etc..
435
 */
436
extern unsigned long empty_zero_page[1024];
437
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
438
 
439
#endif /* __ASSEMBLY__ */
440
 
441
#define pte_none(pte)           ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
442
#define pte_present(pte)        (pte_val(pte) & _PAGE_PRESENT)
443
#define pte_clear(ptep)         do { set_pte((ptep), __pte(0)); } while (0)
444
 
445
#define pmd_none(pmd)           (!pmd_val(pmd))
446
#define pmd_bad(pmd)            ((pmd_val(pmd) & _PMD_PRESENT) == 0)
447
#define pmd_present(pmd)        ((pmd_val(pmd) & _PMD_PRESENT) != 0)
448
#define pmd_clear(pmdp)         do { pmd_val(*(pmdp)) = 0; } while (0)
449
 
450
#define pte_page(x)             (mem_map+(unsigned long)((pte_val(x)-PPC_MEMSTART) >> PAGE_SHIFT))
451
 
452
#ifndef __ASSEMBLY__
453
/*
454
 * The "pgd_xxx()" functions here are trivial for a folded two-level
455
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
456
 * into the pgd entry)
457
 */
458
static inline int pgd_none(pgd_t pgd)           { return 0; }
459
static inline int pgd_bad(pgd_t pgd)            { return 0; }
460
static inline int pgd_present(pgd_t pgd)        { return 1; }
461
#define pgd_clear(xp)                           do { } while (0)
462
#define pgd_page(pgd) \
463
        ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
464
 
465
/*
466
 * The following only work if pte_present() is true.
467
 * Undefined behaviour if not..
468
 */
469
static inline int pte_read(pte_t pte)           { return pte_val(pte) & _PAGE_USER; }
470
static inline int pte_write(pte_t pte)          { return pte_val(pte) & _PAGE_RW; }
471
static inline int pte_exec(pte_t pte)           { return pte_val(pte) & _PAGE_EXEC; }
472
static inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
473
static inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
474
 
475
static inline void pte_uncache(pte_t pte)       { pte_val(pte) |= _PAGE_NO_CACHE; }
476
static inline void pte_cache(pte_t pte)         { pte_val(pte) &= ~_PAGE_NO_CACHE; }
477
 
478
static inline pte_t pte_rdprotect(pte_t pte) {
479
        pte_val(pte) &= ~_PAGE_USER; return pte; }
480
static inline pte_t pte_wrprotect(pte_t pte) {
481
        pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
482
static inline pte_t pte_exprotect(pte_t pte) {
483
        pte_val(pte) &= ~_PAGE_EXEC; return pte; }
484
static inline pte_t pte_mkclean(pte_t pte) {
485
        pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
486
static inline pte_t pte_mkold(pte_t pte) {
487
        pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
488
 
489
static inline pte_t pte_mkread(pte_t pte) {
490
        pte_val(pte) |= _PAGE_USER; return pte; }
491
static inline pte_t pte_mkexec(pte_t pte) {
492
        pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
493
static inline pte_t pte_mkwrite(pte_t pte) {
494
        pte_val(pte) |= _PAGE_RW; return pte; }
495
static inline pte_t pte_mkdirty(pte_t pte) {
496
        pte_val(pte) |= _PAGE_DIRTY; return pte; }
497
static inline pte_t pte_mkyoung(pte_t pte) {
498
        pte_val(pte) |= _PAGE_ACCESSED; return pte; }
499
 
500
/*
501
 * Conversion functions: convert a page and protection to a page entry,
502
 * and a page entry and page directory to the page they refer to.
503
 */
504
 
505
static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
506
{
507
        pte_t pte;
508
        pte_val(pte) = physpage | pgprot_val(pgprot);
509
        return pte;
510
}
511
 
512
#define mk_pte(page,pgprot) \
513
({                                                                      \
514
        pte_t pte;                                                      \
515
        pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + PPC_MEMSTART) | pgprot_val(pgprot); \
516
        pte;                                                    \
517
})
518
 
519
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
520
{
521
        pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
522
        return pte;
523
}
524
 
525
/*
526
 * Atomic PTE updates.
527
 *
528
 * pte_update clears and sets bit atomically, and returns
529
 * the old pte value.
530
 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
531
 * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
532
 */
533
static inline unsigned long pte_update(pte_t *p, unsigned long clr,
534
                                       unsigned long set)
535
{
536
        unsigned long old, tmp;
537
 
538
        __asm__ __volatile__("\
539
1:      lwarx   %0,0,%3\n\
540
        andc    %1,%0,%4\n\
541
        or      %1,%1,%5\n"
542
        PPC405_ERR77(0,%3)
543
"       stwcx.  %1,0,%3\n\
544
        bne-    1b"
545
        : "=&r" (old), "=&r" (tmp), "=m" (*p)
546
        : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p)
547
        : "cc" );
548
        return old;
549
}
550
 
551
/*
552
 * set_pte stores a linux PTE into the linux page table.
553
 * On machines which use an MMU hash table we avoid changing the
554
 * _PAGE_HASHPTE bit.
555
 */
556
static inline void set_pte(pte_t *ptep, pte_t pte)
557
{
558
#if _PAGE_HASHPTE != 0
559
        pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
560
#else
561
        *ptep = pte;
562
#endif
563
}
564
 
565
static inline int ptep_test_and_clear_young(pte_t *ptep)
566
{
567
        return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
568
}
569
 
570
static inline int ptep_test_and_clear_dirty(pte_t *ptep)
571
{
572
        return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
573
}
574
 
575
static inline pte_t ptep_get_and_clear(pte_t *ptep)
576
{
577
        return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
578
}
579
 
580
static inline void ptep_set_wrprotect(pte_t *ptep)
581
{
582
        pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
583
}
584
 
585
static inline void ptep_mkdirty(pte_t *ptep)
586
{
587
        pte_update(ptep, 0, _PAGE_DIRTY);
588
}
589
 
590
#define pte_same(A,B)   (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
591
 
592
#define pmd_page(pmd)   (pmd_val(pmd) & PAGE_MASK)
593
 
594
/* to find an entry in a kernel page-table-directory */
595
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
596
 
597
/* to find an entry in a page-table-directory */
598
#define pgd_index(address)       ((address) >> PGDIR_SHIFT)
599
#define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
600
 
601
/* Find an entry in the second-level page table.. */
602
static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
603
{
604
        return (pmd_t *) dir;
605
}
606
 
607
/* Find an entry in the third-level page table.. */
608
static inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
609
{
610
        return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
611
}
612
 
613
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
614
 
615
extern void paging_init(void);
616
 
617
/*
618
 * When flushing the tlb entry for a page, we also need to flush the hash
619
 * table entry.  flush_hash_page is assembler (for speed) in hashtable.S.
620
 */
621
extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);
622
 
623
/* Add an HPTE to the hash table */
624
extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
625
 
626
/*
627
 * Encode and decode a swap entry.
628
 * Note that the bits we use in a PTE for representing a swap entry
629
 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
630
 * (if used).  -- paulus
631
 */
632
#define SWP_TYPE(entry)                 ((entry).val & 0x3f)
633
#define SWP_OFFSET(entry)               ((entry).val >> 6)
634
#define SWP_ENTRY(type, offset)         ((swp_entry_t) { (type) | ((offset) << 6) })
635
#define pte_to_swp_entry(pte)           ((swp_entry_t) { pte_val(pte) >> 2 })
636
#define swp_entry_to_pte(x)             ((pte_t) { (x).val << 2 })
637
 
638
/* CONFIG_APUS */
639
/* For virtual address to physical address conversion */
640
extern void cache_clear(__u32 addr, int length);
641
extern void cache_push(__u32 addr, int length);
642
extern int mm_end_of_chunk (unsigned long addr, int len);
643
extern unsigned long iopa(unsigned long addr);
644
extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
645
 
646
/* Values for nocacheflag and cmode */
647
/* These are not used by the APUS kernel_map, but prevents
648
   compilation errors. */
649
#define IOMAP_FULL_CACHING      0
650
#define IOMAP_NOCACHE_SER       1
651
#define IOMAP_NOCACHE_NONSER    2
652
#define IOMAP_NO_COPYBACK       3
653
 
654
/*
655
 * Map some physical address range into the kernel address space.
656
 */
657
extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
658
                                int nocacheflag, unsigned long *memavailp );
659
 
660
/*
661
 * Set cache mode of (kernel space) address range.
662
 */
663
extern void kernel_set_cachemode (unsigned long address, unsigned long size,
664
                                 unsigned int cmode);
665
 
666
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
667
#define kern_addr_valid(addr)   (1)
668
 
669
#define io_remap_page_range remap_page_range
670
 
671
/*
672
 * No page table caches to initialise
673
 */
674
#define pgtable_cache_init()    do { } while (0)
675
 
676
#endif /* __ASSEMBLY__ */
677
#endif /* _PPC_PGTABLE_H */
678
#endif /* __KERNEL__ */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.