OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [include/] [asm-alpha/] [pgtable.h] - Blame information for rev 1777

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1632 jcastillo
#ifndef _ALPHA_PGTABLE_H
2
#define _ALPHA_PGTABLE_H
3
 
4
/*
5
 * This file contains the functions and defines necessary to modify and use
6
 * the alpha page table tree.
7
 *
8
 * This hopefully works with any standard alpha page-size, as defined
9
 * in <asm/page.h> (currently 8192).
10
 */
11
 
12
#include <asm/system.h>
13
#include <asm/mmu_context.h>
14
 
15
/* Caches aren't brain-dead on the alpha. */
16
#define flush_cache_all()                       do { } while (0)
17
#define flush_cache_mm(mm)                      do { } while (0)
18
#define flush_cache_range(mm, start, end)       do { } while (0)
19
#define flush_cache_page(vma, vmaddr)           do { } while (0)
20
#define flush_page_to_ram(page)                 do { } while (0)
21
 
22
/*
23
 * Force a context reload. This is needed when we
24
 * change the page table pointer or when we update
25
 * the ASN of the current process.
26
 */
27
static inline void reload_context(struct task_struct *task)
28
{
29
        __asm__ __volatile__(
30
                "bis %0,%0,$16\n\t"
31
                "call_pal %1"
32
                : /* no outputs */
33
                : "r" (&task->tss), "i" (PAL_swpctx)
34
                : "$0", "$1", "$16", "$22", "$23", "$24", "$25");
35
}
36
 
37
/*
38
 * Use a few helper functions to hide the ugly broken ASN
39
 * numbers on early alpha's (ev4 and ev45)
40
 */
41
#ifdef BROKEN_ASN
42
 
43
#define flush_tlb_current(x) tbiap()
44
#define flush_tlb_other(x) do { } while (0)
45
 
46
#else
47
 
48
extern void get_new_asn_and_reload(struct task_struct *, struct mm_struct *);
49
 
50
#define flush_tlb_current(mm) get_new_asn_and_reload(current, mm)
51
#define flush_tlb_other(mm) do { (mm)->context = 0; } while (0)
52
 
53
#endif
54
 
55
/*
56
 * Flush just one page in the current TLB set.
57
 * We need to be very careful about the icache here, there
58
 * is no way to invalidate a specific icache page..
59
 */
60
static inline void flush_tlb_current_page(struct mm_struct * mm,
61
        struct vm_area_struct *vma,
62
        unsigned long addr)
63
{
64
#ifdef BROKEN_ASN
65
        tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
66
#else
67
        if (vma->vm_flags & VM_EXEC)
68
                flush_tlb_current(mm);
69
        else
70
                tbi(2, addr);
71
#endif
72
}
73
 
74
/*
75
 * Flush current user mapping.
76
 */
77
static inline void flush_tlb(void)
78
{
79
        flush_tlb_current(current->mm);
80
}
81
 
82
/*
83
 * Flush everything (kernel mapping may also have
84
 * changed due to vmalloc/vfree)
85
 */
86
static inline void flush_tlb_all(void)
87
{
88
        tbia();
89
}
90
 
91
/*
92
 * Flush a specified user mapping
93
 */
94
static inline void flush_tlb_mm(struct mm_struct *mm)
95
{
96
        if (mm != current->mm)
97
                flush_tlb_other(mm);
98
        else
99
                flush_tlb_current(mm);
100
}
101
 
102
/*
103
 * Page-granular tlb flush.
104
 *
105
 * do a tbisd (type = 2) normally, and a tbis (type = 3)
106
 * if it is an executable mapping.  We want to avoid the
107
 * itlb flush, because that potentially also does a
108
 * icache flush.
109
 */
110
static inline void flush_tlb_page(struct vm_area_struct *vma,
111
        unsigned long addr)
112
{
113
        struct mm_struct * mm = vma->vm_mm;
114
 
115
        if (mm != current->mm)
116
                flush_tlb_other(mm);
117
        else
118
                flush_tlb_current_page(mm, vma, addr);
119
}
120
 
121
/*
122
 * Flush a specified range of user mapping: on the
123
 * alpha we flush the whole user tlb
124
 */
125
static inline void flush_tlb_range(struct mm_struct *mm,
126
        unsigned long start, unsigned long end)
127
{
128
        flush_tlb_mm(mm);
129
}
130
 
131
/* Certain architectures need to do special things when pte's
132
 * within a page table are directly modified.  Thus, the following
133
 * hook is made available.
134
 */
135
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
136
 
137
/* PMD_SHIFT determines the size of the area a second-level page table can map */
138
#define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT-3))
139
#define PMD_SIZE        (1UL << PMD_SHIFT)
140
#define PMD_MASK        (~(PMD_SIZE-1))
141
 
142
/* PGDIR_SHIFT determines what a third-level page table entry can map */
143
#define PGDIR_SHIFT     (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
144
#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
145
#define PGDIR_MASK      (~(PGDIR_SIZE-1))
146
 
147
/*
148
 * entries per page directory level: the alpha is three-level, with
149
 * all levels having a one-page page table.
150
 *
151
 * The PGD is special: the last entry is reserved for self-mapping.
152
 */
153
#define PTRS_PER_PTE    (1UL << (PAGE_SHIFT-3))
154
#define PTRS_PER_PMD    (1UL << (PAGE_SHIFT-3))
155
#define PTRS_PER_PGD    ((1UL << (PAGE_SHIFT-3))-1)
156
 
157
/* the no. of pointers that fit on a page: this will go away */
158
#define PTRS_PER_PAGE   (1UL << (PAGE_SHIFT-3))
159
 
160
#define VMALLOC_START           0xFFFFFE0000000000
161
#define VMALLOC_VMADDR(x)       ((unsigned long)(x))
162
 
163
/*
164
 * OSF/1 PAL-code-imposed page table bits
165
 */
166
#define _PAGE_VALID     0x0001
167
#define _PAGE_FOR       0x0002  /* used for page protection (fault on read) */
168
#define _PAGE_FOW       0x0004  /* used for page protection (fault on write) */
169
#define _PAGE_FOE       0x0008  /* used for page protection (fault on exec) */
170
#define _PAGE_ASM       0x0010
171
#define _PAGE_KRE       0x0100  /* xxx - see below on the "accessed" bit */
172
#define _PAGE_URE       0x0200  /* xxx */
173
#define _PAGE_KWE       0x1000  /* used to do the dirty bit in software */
174
#define _PAGE_UWE       0x2000  /* used to do the dirty bit in software */
175
 
176
/* .. and these are ours ... */
177
#define _PAGE_DIRTY     0x20000
178
#define _PAGE_ACCESSED  0x40000
179
 
180
/*
181
 * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
182
 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
183
 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
184
 * the KRE/URE bits to watch for it. That way we don't need to overload the
185
 * KWE/UWE bits with both handling dirty and accessed.
186
 *
187
 * Note that the kernel uses the accessed bit just to check whether to page
188
 * out a page or not, so it doesn't have to be exact anyway.
189
 */
190
 
191
#define __DIRTY_BITS    (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
192
#define __ACCESS_BITS   (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
193
 
194
#define _PFN_MASK       0xFFFFFFFF00000000
195
 
196
#define _PAGE_TABLE     (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
197
#define _PAGE_CHG_MASK  (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
198
 
199
/*
200
 * All the normal masks have the "page accessed" bits on, as any time they are used,
201
 * the page is accessed. They are cleared only by the page-out routines
202
 */
203
#define PAGE_NONE       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
204
#define PAGE_SHARED     __pgprot(_PAGE_VALID | __ACCESS_BITS)
205
#define PAGE_COPY       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
206
#define PAGE_READONLY   __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
207
#define PAGE_KERNEL     __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
208
 
209
#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
210
 
211
#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
212
#define _PAGE_S(x) _PAGE_NORMAL(x)
213
 
214
/*
215
 * The hardware can handle write-only mappings, but as the alpha
216
 * architecture does byte-wide writes with a read-modify-write
217
 * sequence, it's not practical to have write-without-read privs.
218
 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
219
 * arch/alpha/mm/fault.c)
220
 */
221
        /* xwr */
222
#define __P000  _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
223
#define __P001  _PAGE_P(_PAGE_FOE | _PAGE_FOW)
224
#define __P010  _PAGE_P(_PAGE_FOE)
225
#define __P011  _PAGE_P(_PAGE_FOE)
226
#define __P100  _PAGE_P(_PAGE_FOW | _PAGE_FOR)
227
#define __P101  _PAGE_P(_PAGE_FOW)
228
#define __P110  _PAGE_P(0)
229
#define __P111  _PAGE_P(0)
230
 
231
#define __S000  _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
232
#define __S001  _PAGE_S(_PAGE_FOE | _PAGE_FOW)
233
#define __S010  _PAGE_S(_PAGE_FOE)
234
#define __S011  _PAGE_S(_PAGE_FOE)
235
#define __S100  _PAGE_S(_PAGE_FOW | _PAGE_FOR)
236
#define __S101  _PAGE_S(_PAGE_FOW)
237
#define __S110  _PAGE_S(0)
238
#define __S111  _PAGE_S(0)
239
 
240
/*
241
 * BAD_PAGETABLE is used when we need a bogus page-table, while
242
 * BAD_PAGE is used for a bogus page.
243
 *
244
 * ZERO_PAGE is a global shared page that is always zero: used
245
 * for zero-mapped memory areas etc..
246
 */
247
extern pte_t __bad_page(void);
248
extern pmd_t * __bad_pagetable(void);
249
 
250
extern unsigned long __zero_page(void);
251
 
252
#define BAD_PAGETABLE   __bad_pagetable()
253
#define BAD_PAGE        __bad_page()
254
#define ZERO_PAGE       0xfffffc000030A000
255
 
256
/* number of bits that fit into a memory pointer */
257
#define BITS_PER_PTR                    (8*sizeof(unsigned long))
258
 
259
/* to align the pointer to a pointer address */
260
#define PTR_MASK                        (~(sizeof(void*)-1))
261
 
262
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
263
#define SIZEOF_PTR_LOG2                 3
264
 
265
/* to find an entry in a page-table */
266
#define PAGE_PTR(address)               \
267
  ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
268
 
269
extern unsigned long high_memory;
270
 
271
/*
272
 * Conversion functions: convert a page and protection to a page entry,
273
 * and a page entry and page directory to the page they refer to.
274
 */
275
extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
276
{ pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
277
 
278
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
279
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
280
 
281
extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
282
{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
283
 
284
extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
285
{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
286
 
287
extern inline unsigned long pte_page(pte_t pte)
288
{ return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
289
 
290
extern inline unsigned long pmd_page(pmd_t pmd)
291
{ return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
292
 
293
extern inline unsigned long pgd_page(pgd_t pgd)
294
{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
295
 
296
extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
297
extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_VALID; }
298
extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
299
 
300
extern inline int pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
301
extern inline int pmd_bad(pmd_t pmd)            { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE || pmd_page(pmd) > high_memory; }
302
extern inline int pmd_present(pmd_t pmd)        { return pmd_val(pmd) & _PAGE_VALID; }
303
extern inline void pmd_clear(pmd_t * pmdp)      { pmd_val(*pmdp) = 0; }
304
 
305
extern inline int pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
306
extern inline int pgd_bad(pgd_t pgd)            { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
307
extern inline int pgd_present(pgd_t pgd)        { return pgd_val(pgd) & _PAGE_VALID; }
308
extern inline void pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
309
 
310
/*
311
 * The following only work if pte_present() is true.
312
 * Undefined behaviour if not..
313
 */
314
extern inline int pte_read(pte_t pte)           { return !(pte_val(pte) & _PAGE_FOR); }
315
extern inline int pte_write(pte_t pte)          { return !(pte_val(pte) & _PAGE_FOW); }
316
extern inline int pte_exec(pte_t pte)           { return !(pte_val(pte) & _PAGE_FOE); }
317
extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
318
extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
319
 
320
extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOW; return pte; }
321
extern inline pte_t pte_rdprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOR; return pte; }
322
extern inline pte_t pte_exprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOE; return pte; }
323
extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
324
extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
325
extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_FOW; return pte; }
326
extern inline pte_t pte_mkread(pte_t pte)       { pte_val(pte) &= ~_PAGE_FOR; return pte; }
327
extern inline pte_t pte_mkexec(pte_t pte)       { pte_val(pte) &= ~_PAGE_FOE; return pte; }
328
extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= __DIRTY_BITS; return pte; }
329
extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= __ACCESS_BITS; return pte; }
330
 
331
/*
332
 * To set the page-dir. Note the self-mapping in the last entry
333
 *
334
 * Also note that if we update the current process ptbr, we need to
335
 * update the PAL-cached ptbr value as well.. There doesn't seem to
336
 * be any "wrptbr" PAL-insn, but we can do a dummy swpctx to ourself
337
 * instead.
338
 */
339
extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
340
{
341
        pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
342
        tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
343
        if (tsk == current)
344
                reload_context(tsk);
345
}
346
 
347
#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
348
 
349
/* to find an entry in a page-table-directory. */
350
extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
351
{
352
        return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
353
}
354
 
355
/* Find an entry in the second-level page table.. */
356
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
357
{
358
        return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
359
}
360
 
361
/* Find an entry in the third-level page table.. */
362
extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
363
{
364
        return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
365
}
366
 
367
/*
368
 * Allocate and free page tables. The xxx_kernel() versions are
369
 * used to allocate a kernel page table - this turns on ASN bits
370
 * if any.
371
 */
372
extern inline void pte_free_kernel(pte_t * pte)
373
{
374
        free_page((unsigned long) pte);
375
}
376
 
377
extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
378
{
379
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
380
        if (pmd_none(*pmd)) {
381
                pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
382
                if (pmd_none(*pmd)) {
383
                        if (page) {
384
                                pmd_set(pmd, page);
385
                                return page + address;
386
                        }
387
                        pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
388
                        return NULL;
389
                }
390
                free_page((unsigned long) page);
391
        }
392
        if (pmd_bad(*pmd)) {
393
                printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
394
                pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
395
                return NULL;
396
        }
397
        return (pte_t *) pmd_page(*pmd) + address;
398
}
399
 
400
extern inline void pmd_free_kernel(pmd_t * pmd)
401
{
402
        free_page((unsigned long) pmd);
403
}
404
 
405
extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
406
{
407
        address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
408
        if (pgd_none(*pgd)) {
409
                pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
410
                if (pgd_none(*pgd)) {
411
                        if (page) {
412
                                pgd_set(pgd, page);
413
                                return page + address;
414
                        }
415
                        pgd_set(pgd, BAD_PAGETABLE);
416
                        return NULL;
417
                }
418
                free_page((unsigned long) page);
419
        }
420
        if (pgd_bad(*pgd)) {
421
                printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
422
                pgd_set(pgd, BAD_PAGETABLE);
423
                return NULL;
424
        }
425
        return (pmd_t *) pgd_page(*pgd) + address;
426
}
427
 
428
extern inline void pte_free(pte_t * pte)
429
{
430
        free_page((unsigned long) pte);
431
}
432
 
433
extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
434
{
435
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
436
        if (pmd_none(*pmd)) {
437
                pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
438
                if (pmd_none(*pmd)) {
439
                        if (page) {
440
                                pmd_set(pmd, page);
441
                                return page + address;
442
                        }
443
                        pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
444
                        return NULL;
445
                }
446
                free_page((unsigned long) page);
447
        }
448
        if (pmd_bad(*pmd)) {
449
                printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
450
                pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
451
                return NULL;
452
        }
453
        return (pte_t *) pmd_page(*pmd) + address;
454
}
455
 
456
extern inline void pmd_free(pmd_t * pmd)
457
{
458
        free_page((unsigned long) pmd);
459
}
460
 
461
extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
462
{
463
        address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
464
        if (pgd_none(*pgd)) {
465
                pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
466
                if (pgd_none(*pgd)) {
467
                        if (page) {
468
                                pgd_set(pgd, page);
469
                                return page + address;
470
                        }
471
                        pgd_set(pgd, BAD_PAGETABLE);
472
                        return NULL;
473
                }
474
                free_page((unsigned long) page);
475
        }
476
        if (pgd_bad(*pgd)) {
477
                printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
478
                pgd_set(pgd, BAD_PAGETABLE);
479
                return NULL;
480
        }
481
        return (pmd_t *) pgd_page(*pgd) + address;
482
}
483
 
484
extern inline void pgd_free(pgd_t * pgd)
485
{
486
        free_page((unsigned long) pgd);
487
}
488
 
489
extern inline pgd_t * pgd_alloc(void)
490
{
491
        return (pgd_t *) get_free_page(GFP_KERNEL);
492
}
493
 
494
extern pgd_t swapper_pg_dir[1024];
495
 
496
/*
497
 * The alpha doesn't have any external MMU info: the kernel page
498
 * tables contain all the necessary information.
499
 */
500
extern inline void update_mmu_cache(struct vm_area_struct * vma,
501
        unsigned long address, pte_t pte)
502
{
503
}
504
 
505
/*
506
 * Non-present pages: high 24 bits are offset, next 8 bits type,
507
 * low 32 bits zero..
508
 */
509
extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
510
{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
511
 
512
#define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
513
#define SWP_OFFSET(entry) ((entry) >> 40)
514
#define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
515
 
516
#endif /* _ALPHA_PGTABLE_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.