OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ppc64/] [pgtable.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef _PPC64_PGTABLE_H
2
#define _PPC64_PGTABLE_H
3
 
4
/*
5
 * This file contains the functions and defines necessary to modify and use
6
 * the ppc64 hashed page table.
7
 */
8
 
9
#ifndef __ASSEMBLY__
10
#include <asm/processor.h>              /* For TASK_SIZE */
11
#include <asm/mmu.h>
12
#include <asm/page.h>
13
#endif /* __ASSEMBLY__ */
14
 
15
/* PMD_SHIFT determines what a second-level page table entry can map */
16
#define PMD_SHIFT       (PAGE_SHIFT + PAGE_SHIFT - 3)
17
#define PMD_SIZE        (1UL << PMD_SHIFT)
18
#define PMD_MASK        (~(PMD_SIZE-1))
19
 
20
/* PGDIR_SHIFT determines what a third-level page table entry can map */
21
#define PGDIR_SHIFT     (PAGE_SHIFT + (PAGE_SHIFT - 3) + (PAGE_SHIFT - 2))
22
#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
23
#define PGDIR_MASK      (~(PGDIR_SIZE-1))
24
 
25
/*
26
 * Entries per page directory level.  The PTE level must use a 64b record
27
 * for each page table entry.  The PMD and PGD level use a 32b record for
28
 * each entry by assuming that each entry is page aligned.
29
 */
30
#define PTE_INDEX_SIZE  9
31
#define PMD_INDEX_SIZE  10
32
#define PGD_INDEX_SIZE  10
33
 
34
#define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
35
#define PTRS_PER_PMD    (1 << PMD_INDEX_SIZE)
36
#define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
37
 
38
#if 0
39
/* DRENG / PPPBBB This is a compiler bug!!! */
40
#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
41
#else
42
#define USER_PTRS_PER_PGD       (1024)
43
#endif
44
#define FIRST_USER_PGD_NR       0
45
 
46
#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
47
                    PGD_INDEX_SIZE + PAGE_SHIFT)
48
 
49
/*
50
 * Define the address range of the vmalloc VM area.
51
 */
52
#define VMALLOC_START (0xD000000000000000)
53
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
54
 
55
#ifndef CONFIG_SHARED_MEMORY_ADDRESSING
56
#define VMALLOC_END   (VMALLOC_START + VALID_EA_BITS)
57
#else
58
#define VMALLOC_END   (VMALLOC_START + (VALID_EA_BITS >> 1))
59
#define SMALLOC_START (VMALLOC_START + (VALID_EA_BITS >> 1) + 1)
60
#define SMALLOC_END   (VMALLOC_START + VALID_EA_BITS)
61
#define SMALLOC_EA_SHIFT 40
62
#define SMALLOC_ESID_SHIFT 12
63
#endif
64
 
65
/*
66
 * Define the address range of the imalloc VM area.
67
 * (used for ioremap)
68
 */
69
#define IMALLOC_START (ioremap_bot)
70
#define IMALLOC_VMADDR(x) ((unsigned long)(x))
71
#define IMALLOC_BASE  (0xE000000000000000)
72
#define IMALLOC_END   (IMALLOC_BASE + VALID_EA_BITS)
73
 
74
/*
75
 * Define the address range mapped virt <-> physical
76
 */
77
#define KRANGE_START KERNELBASE
78
#define KRANGE_END   (KRANGE_START + VALID_EA_BITS)
79
 
80
/*
81
 * Define the user address range
82
 */
83
#define USER_START (0UL)
84
#define USER_END   (USER_START + VALID_EA_BITS)
85
 
86
 
87
/*
88
 * Bits in a linux-style PTE.  These match the bits in the
89
 * (hardware-defined) PowerPC PTE as closely as possible.
90
 */
91
#define _PAGE_PRESENT   0x001UL /* software: pte contains a translation */
92
#define _PAGE_USER      0x002UL /* matches one of the PP bits */
93
#define _PAGE_RW        0x004UL /* software: user write access allowed */
94
#define _PAGE_GUARDED   0x008UL
95
#define _PAGE_COHERENT  0x010UL /* M: enforce memory coherence (SMP systems) */
96
#define _PAGE_NO_CACHE  0x020UL /* I: cache inhibit */
97
#define _PAGE_WRITETHRU 0x040UL /* W: cache write-through */
98
#define _PAGE_DIRTY     0x080UL /* C: page changed */
99
#define _PAGE_ACCESSED  0x100UL /* R: page referenced */
100
#define _PAGE_HPTENOIX  0x200UL /* software: pte HPTE slot unknown */
101
#define _PAGE_HASHPTE   0x400UL /* software: pte has an associated HPTE */
102
#define _PAGE_EXEC      0x800UL /* software: i-cache coherence required */
103
#define _PAGE_SECONDARY 0x8000UL /* software: HPTE is in secondary group */
104
#define _PAGE_GROUP_IX  0x7000UL /* software: HPTE index within group */
105
/* Bits 0x7000 identify the index within an HPT Group */
106
#define _PAGE_HPTEFLAGS (_PAGE_HASHPTE | _PAGE_HPTENOIX | _PAGE_SECONDARY | _PAGE_GROUP_IX)
107
/* PAGE_MASK gives the right answer below, but only by accident */
108
/* It should be preserving the high 48 bits and then specifically */
109
/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
110
#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HPTEFLAGS)
111
 
112
#define _PAGE_BASE      (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
113
 
114
#define _PAGE_WRENABLE  (_PAGE_RW | _PAGE_DIRTY)
115
 
116
/* __pgprot defined in asm-ppc64/page.h */
117
#define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
118
 
119
#define PAGE_SHARED     __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
120
#define PAGE_SHARED_X   __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
121
#define PAGE_COPY       __pgprot(_PAGE_BASE | _PAGE_USER)
122
#define PAGE_COPY_X     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
123
#define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
124
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
125
#define PAGE_KERNEL     __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
126
#define PAGE_KERNEL_CI  __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
127
                               _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
128
 
129
/*
130
 * The PowerPC can only do execute protection on a segment (256MB) basis,
131
 * not on a page basis.  So we consider execute permission the same as read.
132
 * Also, write permissions imply read permissions.
133
 * This is the closest we can get..
134
 */
135
#define __P000  PAGE_NONE
136
#define __P001  PAGE_READONLY_X
137
#define __P010  PAGE_COPY
138
#define __P011  PAGE_COPY_X
139
#define __P100  PAGE_READONLY
140
#define __P101  PAGE_READONLY_X
141
#define __P110  PAGE_COPY
142
#define __P111  PAGE_COPY_X
143
 
144
#define __S000  PAGE_NONE
145
#define __S001  PAGE_READONLY_X
146
#define __S010  PAGE_SHARED
147
#define __S011  PAGE_SHARED_X
148
#define __S100  PAGE_READONLY
149
#define __S101  PAGE_READONLY_X
150
#define __S110  PAGE_SHARED
151
#define __S111  PAGE_SHARED_X
152
 
153
#ifndef __ASSEMBLY__
154
 
155
/*
156
 * ZERO_PAGE is a global shared page that is always zero: used
157
 * for zero-mapped memory areas etc..
158
 */
159
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
160
#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
161
#endif /* __ASSEMBLY__ */
162
 
163
/* shift to put page number into pte */
164
#define PTE_SHIFT (16)
165
 
166
#ifndef __ASSEMBLY__
167
 
168
/*
169
 * Conversion functions: convert a page and protection to a page entry,
170
 * and a page entry and page directory to the page they refer to.
171
 *
172
 * mk_pte_phys takes a physical address as input
173
 *
174
 * mk_pte takes a (struct page *) as input
175
 */
176
 
177
#define mk_pte_phys(physpage,pgprot)                                      \
178
({                                                                        \
179
        pte_t pte;                                                        \
180
        pte_val(pte) = (((physpage)<<(PTE_SHIFT-PAGE_SHIFT)) | pgprot_val(pgprot)); \
181
        pte;                                                              \
182
})
183
 
184
#define mk_pte(page,pgprot)                                               \
185
({                                                                        \
186
        pte_t pte;                                                        \
187
        pte_val(pte) = ((unsigned long)((page) - mem_map) << PTE_SHIFT) |   \
188
                        pgprot_val(pgprot);                               \
189
        pte;                                                              \
190
})
191
 
192
#define pte_modify(_pte, newprot) \
193
  (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
194
 
195
#define pte_none(pte)           ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
196
#define pte_present(pte)        (pte_val(pte) & _PAGE_PRESENT)
197
 
198
/* pte_clear moved to later in this file */
199
 
200
#define pte_pagenr(x)           ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
201
#define pte_page(x)             (mem_map+pte_pagenr(x))
202
 
203
#define pmd_set(pmdp, ptep)     (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep)))
204
#define pmd_none(pmd)           (!pmd_val(pmd))
205
#define pmd_bad(pmd)            ((pmd_val(pmd)) == 0)
206
#define pmd_present(pmd)        ((pmd_val(pmd)) != 0)
207
#define pmd_clear(pmdp)         (pmd_val(*(pmdp)) = 0)
208
#define pmd_page(pmd)           (__bpn_to_ba(pmd_val(pmd)))
209
#define pgd_set(pgdp, pmdp)     (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp)))
210
#define pgd_none(pgd)           (!pgd_val(pgd))
211
#define pgd_bad(pgd)            ((pgd_val(pgd)) == 0)
212
#define pgd_present(pgd)        (pgd_val(pgd) != 0UL)
213
#define pgd_clear(pgdp)         (pgd_val(*(pgdp)) = 0UL)
214
#define pgd_page(pgd)           (__bpn_to_ba(pgd_val(pgd))) 
215
 
216
/*
217
 * Find an entry in a page-table-directory.  We combine the address region
218
 * (the high order N bits) and the pgd portion of the address.
219
 */
220
#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD -1))
221
 
222
#define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
223
 
224
/* Find an entry in the second-level page table.. */
225
#define pmd_offset(dir,addr) \
226
  ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
227
 
228
/* Find an entry in the third-level page table.. */
229
#define pte_offset(dir,addr) \
230
  ((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
231
 
232
/* to find an entry in a kernel page-table-directory */
233
/* This now only contains the vmalloc pages */
234
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
235
 
236
/* to find an entry in the ioremap page-table-directory */
237
#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address))
238
 
239
#define pages_to_mb(x)          ((x) >> (20-PAGE_SHIFT))
240
 
241
/*
242
 * The following only work if pte_present() is true.
243
 * Undefined behaviour if not..
244
 */
245
static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_USER;}
246
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
247
static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC;}
248
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
249
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
250
 
251
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
252
static inline void pte_cache(pte_t pte)   { pte_val(pte) &= ~_PAGE_NO_CACHE; }
253
 
254
static inline pte_t pte_rdprotect(pte_t pte) {
255
        pte_val(pte) &= ~_PAGE_USER; return pte; }
256
static inline pte_t pte_exprotect(pte_t pte) {
257
        pte_val(pte) &= ~_PAGE_EXEC; return pte; }
258
static inline pte_t pte_wrprotect(pte_t pte) {
259
        pte_val(pte) &= ~(_PAGE_RW); return pte; }
260
static inline pte_t pte_mkclean(pte_t pte) {
261
        pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
262
static inline pte_t pte_mkold(pte_t pte) {
263
        pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
264
 
265
static inline pte_t pte_mkread(pte_t pte) {
266
        pte_val(pte) |= _PAGE_USER; return pte; }
267
static inline pte_t pte_mkexec(pte_t pte) {
268
        pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
269
static inline pte_t pte_mkwrite(pte_t pte) {
270
        pte_val(pte) |= _PAGE_RW; return pte; }
271
static inline pte_t pte_mkdirty(pte_t pte) {
272
        pte_val(pte) |= _PAGE_DIRTY; return pte; }
273
static inline pte_t pte_mkyoung(pte_t pte) {
274
        pte_val(pte) |= _PAGE_ACCESSED; return pte; }
275
 
276
/* Atomic PTE updates */
277
 
278
static inline unsigned long pte_update( pte_t *p, unsigned long clr,
279
                                        unsigned long set )
280
{
281
        unsigned long old, tmp;
282
 
283
        __asm__ __volatile__("\n\
284
1:      ldarx   %0,0,%3 \n\
285
        andc    %1,%0,%4 \n\
286
        or      %1,%1,%5 \n\
287
        stdcx.  %1,0,%3 \n\
288
        bne-    1b"
289
        : "=&r" (old), "=&r" (tmp), "=m" (*p)
290
        : "r" (p), "r" (clr), "r" (set), "m" (*p)
291
        : "cc" );
292
        return old;
293
}
294
 
295
static inline int ptep_test_and_clear_young(pte_t *ptep)
296
{
297
        return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
298
}
299
 
300
static inline int ptep_test_and_clear_dirty(pte_t *ptep)
301
{
302
        return (pte_update(ptep, _PAGE_DIRTY, 0) & _PAGE_DIRTY) != 0;
303
}
304
 
305
static inline pte_t ptep_get_and_clear(pte_t *ptep)
306
{
307
        return __pte(pte_update(ptep, ~_PAGE_HPTEFLAGS, 0));
308
}
309
 
310
static inline void ptep_set_wrprotect(pte_t *ptep)
311
{
312
        pte_update(ptep, _PAGE_RW, 0);
313
}
314
 
315
static inline void ptep_mkdirty(pte_t *ptep)
316
{
317
        pte_update(ptep, 0, _PAGE_DIRTY);
318
}
319
 
320
#define pte_same(A,B)   (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
321
 
322
/*
323
 * set_pte stores a linux PTE into the linux page table.
324
 * On machines which use an MMU hash table we avoid changing the
325
 * _PAGE_HASHPTE bit.
326
 */
327
static inline void set_pte(pte_t *ptep, pte_t pte)
328
{
329
        pte_update(ptep, ~_PAGE_HPTEFLAGS, pte_val(pte) & ~_PAGE_HPTEFLAGS);
330
}
331
 
332
static inline void pte_clear(pte_t * ptep)
333
{
334
        pte_update(ptep, ~_PAGE_HPTEFLAGS, 0);
335
}
336
 
337
struct mm_struct;
338
struct vm_area_struct;
339
extern void local_flush_tlb_all(void);
340
extern void local_flush_tlb_mm(struct mm_struct *mm);
341
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
342
extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
343
                            unsigned long end);
344
 
345
#define flush_tlb_all local_flush_tlb_all
346
#define flush_tlb_mm local_flush_tlb_mm
347
#define flush_tlb_page local_flush_tlb_page
348
#define flush_tlb_range local_flush_tlb_range
349
 
350
static inline void flush_tlb_pgtables(struct mm_struct *mm,
351
                                      unsigned long start, unsigned long end)
352
{
353
        /* PPC has hw page tables. */
354
}
355
 
356
/*
357
 * No cache flushing is required when address mappings are
358
 * changed, because the caches on PowerPCs are physically
359
 * addressed.
360
 */
361
#define flush_cache_all()               do { } while (0)
362
#define flush_cache_mm(mm)              do { } while (0)
363
#define flush_cache_range(mm, a, b)     do { } while (0)
364
#define flush_cache_page(vma, p)        do { } while (0)
365
#define flush_page_to_ram(page)         do { } while (0)
366
 
367
extern void flush_icache_user_range(struct vm_area_struct *vma,
368
                        struct page *page, unsigned long addr, int len);
369
extern void flush_icache_range(unsigned long, unsigned long);
370
extern void __flush_dcache_icache(void *page_va);
371
extern void flush_dcache_page(struct page *page);
372
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
373
 
374
extern unsigned long va_to_phys(unsigned long address);
375
extern pte_t *va_to_pte(unsigned long address);
376
extern unsigned long ioremap_bot, ioremap_base;
377
 
378
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
379
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
380
 
381
#define pte_ERROR(e) \
382
        printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
383
#define pmd_ERROR(e) \
384
        printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))
385
#define pgd_ERROR(e) \
386
        printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))
387
 
388
extern pgd_t swapper_pg_dir[1024];
389
extern pgd_t ioremap_dir[1024];
390
 
391
extern void paging_init(void);
392
 
393
/*
394
 * Page tables may have changed.  We don't need to do anything here
395
 * as entries are faulted into the hash table by the low-level
396
 * data/instruction access exception handlers.
397
 */
398
/*
399
 * We won't be able to use update_mmu_cache to update the
400
 * hardware page table because we need to update the pte
401
 * as well, but we don't get the address of the pte, only
402
 * its value.
403
 */
404
#define update_mmu_cache(vma, addr, pte)        do { } while (0)
405
 
406
extern void flush_hash_segments(unsigned low_vsid, unsigned high_vsid);
407
extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t *ptep);
408
extern void build_valid_hpte(unsigned long vsid, unsigned long ea,
409
                             unsigned long pa, pte_t * ptep,
410
                             unsigned hpteflags, unsigned bolted );
411
 
412
/* Encode and de-code a swap entry */
413
#define SWP_TYPE(entry)                 (((entry).val >> 1) & 0x3f)
414
#define SWP_OFFSET(entry)               ((entry).val >> 8)
415
#define SWP_ENTRY(type, offset)         ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
416
#define pte_to_swp_entry(pte)           ((swp_entry_t) { pte_val(pte) >> PTE_SHIFT })
417
#define swp_entry_to_pte(x)             ((pte_t) { (x).val << PTE_SHIFT })
418
 
419
/*
420
 * kern_addr_valid is intended to indicate whether an address is a valid
421
 * kernel address.  Most 32-bit archs define it as always true (like this)
422
 * but most 64-bit archs actually perform a test.  What should we do here?
423
 * The only use is in fs/ncpfs/dir.c
424
 */
425
#define kern_addr_valid(addr)   (1)
426
 
427
#ifdef CONFIG_PPC_ISERIES
428
#define io_remap_page_range remap_page_range
429
#else
430
extern int io_remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
431
#endif
432
 
433
/*
434
 * No page table caches to initialise
435
 */
436
#define pgtable_cache_init()    do { } while (0)
437
 
438
extern void updateBoltedHptePP(unsigned long newpp, unsigned long ea);
439
extern void hpte_init_pSeries(void);
440
extern void hpte_init_iSeries(void);
441
 
442
extern void make_pte(HPTE * htab, unsigned long va, unsigned long pa,
443
                int mode, unsigned long hash_mask, int large);
444
 
445
#endif /* __ASSEMBLY__ */
446
#endif /* _PPC64_PGTABLE_H */

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.