OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-x86_64/] [pgtable.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef _X86_64_PGTABLE_H
2
#define _X86_64_PGTABLE_H
3
 
4
/*
5
 * This file contains the functions and defines necessary to modify and use
6
 * the x86-64 page table tree.
7
 *
8
 * x86-64 has a 4 level table setup. Generic linux MM only supports
9
 * three levels. The fourth level is currently a single static page that
10
 * is shared by everybody and just contains a pointer to the current
11
 * three level page setup on the beginning and some kernel mappings at
12
 * the end. For more details see Documentation/x86_64/mm.txt
13
 */
14
#ifndef __ASSEMBLY__
15
#include <asm/processor.h>
16
#include <asm/fixmap.h>
17
#include <asm/bitops.h>
18
#include <asm/pda.h>
19
#include <linux/threads.h>
20
#include <linux/config.h>
21
 
22
extern pgd_t level3_kernel_pgt[512];
23
extern pgd_t level3_physmem_pgt[512];
24
extern pgd_t level3_ident_pgt[512];
25
extern pmd_t level2_kernel_pgt[512];
26
extern pml4_t init_level4_pgt[];
27
extern pgd_t boot_vmalloc_pgt[];
28
 
29
extern void paging_init(void);
30
 
31
#define swapper_pg_dir NULL
32
 
33
/* Caches aren't brain-dead on the intel. */
34
#define flush_cache_all()                       do { } while (0)
35
#define flush_cache_mm(mm)                      do { } while (0)
36
#define flush_cache_range(mm, start, end)       do { } while (0)
37
#define flush_cache_page(vma, vmaddr)           do { } while (0)
38
#define flush_page_to_ram(page)                 do { } while (0)
39
#define flush_dcache_page(page)                 do { } while (0)
40
#define flush_icache_range(start, end)          do { } while (0)
41
#define flush_icache_page(vma,pg)               do { } while (0)
42
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
43
 
44
#define __flush_tlb()                                                   \
45
        do {                                                            \
46
                unsigned long tmpreg;                                   \
47
                                                                        \
48
                __asm__ __volatile__(                                   \
49
                        "movq %%cr3, %0;  # flush TLB \n"               \
50
                        "movq %0, %%cr3;              \n"               \
51
                        : "=r" (tmpreg)                                 \
52
                        :: "memory");                                   \
53
        } while (0)
54
 
55
/*
56
 * Global pages have to be flushed a bit differently. Not a real
57
 * performance problem because this does not happen often.
58
 */
59
#define __flush_tlb_global()                                            \
60
        do {                                                            \
61
                unsigned long tmpreg;                                   \
62
                                                                        \
63
                __asm__ __volatile__(                                   \
64
                        "movq %1, %%cr4;  # turn off PGE     \n"        \
65
                        "movq %%cr3, %0;  # flush TLB        \n"        \
66
                        "movq %0, %%cr3;                     \n"        \
67
                        "movq %2, %%cr4;  # turn PGE back on \n"        \
68
                        : "=&r" (tmpreg)                                \
69
                        : "r" (mmu_cr4_features & ~(u64)X86_CR4_PGE),   \
70
                          "r" (mmu_cr4_features)                        \
71
                        : "memory");                                    \
72
        } while (0)
73
 
74
#define __flush_tlb_all() __flush_tlb_global()
75
 
76
#define __flush_tlb_one(addr) __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
77
 
78
/*
79
 * ZERO_PAGE is a global shared page that is always zero: used
80
 * for zero-mapped memory areas etc..
81
 */
82
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
83
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
84
 
85
#endif /* !__ASSEMBLY__ */
86
 
87
#define PML4_SHIFT      39
88
#define PTRS_PER_PML4   512
89
 
90
/*
91
 * PGDIR_SHIFT determines what a 3rd level page table entry can map
92
 */
93
#define PGDIR_SHIFT     30
94
#define PTRS_PER_PGD    512
95
 
96
/*
97
 * PMD_SHIFT determines the size of the area a middle-level
98
 * page table can map
99
 */
100
#define PMD_SHIFT       21
101
#define PTRS_PER_PMD    512
102
 
103
/*
104
 * entries per page directory level
105
 */
106
#define PTRS_PER_PTE    512
107
 
108
#define pte_ERROR(e) \
109
        printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
110
#define pmd_ERROR(e) \
111
        printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
112
#define pgd_ERROR(e) \
113
        printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
114
 
115
#define pml4_none(x)    (!pml4_val(x))
116
#define pgd_none(x)     (!pgd_val(x))
117
 
118
 
119
extern inline int pgd_present(pgd_t pgd)        { return !pgd_none(pgd); }
120
 
121
static inline void set_pte(pte_t *dst, pte_t val)
122
{
123
        pte_val(*dst) = pte_val(val);
124
}
125
 
126
static inline void set_pmd(pmd_t *dst, pmd_t val)
127
{
128
        pmd_val(*dst) = pmd_val(val);
129
}
130
 
131
static inline void set_pgd(pgd_t *dst, pgd_t val)
132
{
133
        pgd_val(*dst) = pgd_val(val);
134
}
135
 
136
static inline void set_pml4(pml4_t *dst, pml4_t val)
137
{
138
        pml4_val(*dst) = pml4_val(val);
139
}
140
 
141
extern inline void __pgd_clear (pgd_t * pgd)
142
{
143
        set_pgd(pgd, __pgd(0));
144
}
145
 
146
extern inline void pgd_clear (pgd_t * pgd)
147
{
148
        __pgd_clear(pgd);
149
        __flush_tlb();
150
}
151
 
152
#define pgd_page(pgd) \
153
((unsigned long) __va(pgd_val(pgd) & PHYSICAL_PAGE_MASK))
154
#define __mk_pgd(address,prot) ((pgd_t) { (address) | pgprot_val(prot) })
155
 
156
/* Find an entry in the second-level page table.. */
157
#define pmd_offset(dir, address) ((pmd_t *) pgd_page(*(dir)) + \
158
                        __pmd_offset(address))
159
#define __mk_pmd(address,prot) ((pmd_t) { ((address) | pgprot_val(prot)) & __supported_pte_mask})
160
 
161
#define ptep_get_and_clear(xp)  __pte(xchg(&(xp)->pte, 0))
162
#define pte_same(a, b)          ((a).pte == (b).pte)
163
#define __mk_pte(page_nr,pgprot) \
164
        __pte(((page_nr) << PAGE_SHIFT) | (pgprot_val(pgprot) & __supported_pte_mask))
165
#define PML4_SIZE       (1UL << PML4_SHIFT)
166
#define PML4_MASK       (~(PML4_SIZE-1))
167
#define PMD_SIZE        (1UL << PMD_SHIFT)
168
#define PMD_MASK        (~(PMD_SIZE-1))
169
#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
170
#define PGDIR_MASK      (~(PGDIR_SIZE-1))
171
 
172
#define USER_PTRS_PER_PGD       (TASK_SIZE/PGDIR_SIZE)
173
#define FIRST_USER_PGD_NR       0
174
 
175
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
176
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
177
 
178
#define BOOT_USER_L4_PTRS 1
179
#define BOOT_KERNEL_L4_PTRS 511 /* But we will do it in 4rd level */
180
 
181
 
182
 
183
#ifndef __ASSEMBLY__
184
/* IO mappings are the 509th slot in the PML4. We map them high up to make sure
185
   they never appear in the node hash table in DISCONTIG configs. */
186
#define IOMAP_START      0xfffffe8000000000
187
 
188
/* vmalloc space occupies the 510th slot in the PML4. You can have upto 512GB of
189
   vmalloc/ioremap space. */
190
 
191
#define VMALLOC_START    0xffffff0000000000
192
#define VMALLOC_END      0xffffff7fffffffff
193
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
194
 
195
#define MODULES_VADDR    0xffffffffa0000000
196
#define MODULES_END      0xffffffffafffffff
197
#define MODULES_LEN   (MODULES_END - MODULES_VADDR)
198
 
199
#define _PAGE_BIT_PRESENT       0
200
#define _PAGE_BIT_RW            1
201
#define _PAGE_BIT_USER          2
202
#define _PAGE_BIT_PWT           3       /* Write Through */
203
#define _PAGE_BIT_PCD           4       /* Cache disable */
204
#define _PAGE_BIT_ACCESSED      5
205
#define _PAGE_BIT_DIRTY         6
206
#define _PAGE_BIT_PSE           7       /* 2MB page */
207
#define _PAGE_BIT_GLOBAL        8       /* Global TLB entry PPro+ */
208
#define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
209
 
210
#define _PAGE_PRESENT   0x001
211
#define _PAGE_RW        0x002
212
#define _PAGE_USER      0x004
213
#define _PAGE_PWT       0x008
214
#define _PAGE_PCD       0x010
215
#define _PAGE_ACCESSED  0x020
216
#define _PAGE_DIRTY     0x040
217
#define _PAGE_PSE       0x080   /* 2MB page */
218
#define _PAGE_GLOBAL    0x100   /* Global TLB entry */
219
#define _PAGE_PGE       _PAGE_GLOBAL
220
#define _PAGE_NX        (1UL<<_PAGE_BIT_NX)
221
 
222
#define _PAGE_PROTNONE  0x080   /* If not present */
223
 
224
#define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
225
#define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
226
 
227
#define KERNPG_TABLE    __pgprot(_KERNPG_TABLE)
228
 
229
#define _PAGE_CHG_MASK  (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
230
 
231
#define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
232
#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
233
#define PAGE_SHARED_NOEXEC      __pgprot(_PAGE_NX | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
234
#define PAGE_COPY_NOEXEC        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
235
#define PAGE_COPY PAGE_COPY_NOEXEC
236
#define PAGE_COPY_EXEC  \
237
        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
238
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
239
#define PAGE_READONLY_EXEC \
240
        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
241
#define PAGE_EXECONLY PAGE_READONLY_EXEC
242
 
243
#define PAGE_LARGE (_PAGE_PSE|_PAGE_PRESENT) 
244
 
245
#define __PAGE_KERNEL \
246
        (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
247
#define __PAGE_KERNEL_NOCACHE   (__PAGE_KERNEL | _PAGE_PCD)
248
#define __PAGE_KERNEL_RO        (__PAGE_KERNEL & ~_PAGE_RW)
249
#define __PAGE_KERNEL_VSYSCALL  (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
250
#define __PAGE_KERNEL_LARGE     (__PAGE_KERNEL | _PAGE_PSE)
251
#define __PAGE_KERNEL_LARGE_NOCACHE     (__PAGE_KERNEL_LARGE | _PAGE_PCD)
252
#define __PAGE_KERNEL_EXECUTABLE (__PAGE_KERNEL & ~_PAGE_NX)
253
#define __PAGE_USER_NOCACHE_RO  \
254
        (_PAGE_PRESENT | _PAGE_USER | _PAGE_DIRTY | _PAGE_ACCESSED)
255
 
256
extern unsigned long __supported_pte_mask;
257
#define __PTE_SUPP(x) __pgprot((x) & __supported_pte_mask)
258
 
259
/* _NX is masked away in mk_pmd/pte */
260
 
261
#define PAGE_KERNEL __PTE_SUPP(__PAGE_KERNEL|_PAGE_GLOBAL)
262
#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO|_PAGE_GLOBAL)
263
#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE|_PAGE_GLOBAL)
264
#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL|_PAGE_GLOBAL)
265
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE|_PAGE_GLOBAL)
266
#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE|_PAGE_GLOBAL)
267
#define PAGE_USER_NOCACHE_RO __pgprot(__PAGE_USER_NOCACHE_RO|_PAGE_GLOBAL)
268
#define PAGE_KERNEL_EXECUTABLE __pgprot(__PAGE_KERNEL_EXECUTABLE|_PAGE_GLOBAL)
269
 
270
/*         xwr */
271
#define __P000  PAGE_NONE
272
#define __P001  PAGE_READONLY
273
#define __P010  PAGE_COPY
274
#define __P011  PAGE_COPY
275
#define __P100  PAGE_EXECONLY
276
#define __P101  PAGE_READONLY_EXEC
277
#define __P110  PAGE_COPY_EXEC
278
#define __P111  PAGE_COPY_EXEC
279
 
280
/*         xwr */
281
#define __S000  PAGE_NONE
282
#define __S001  PAGE_READONLY
283
#define __S010  PAGE_SHARED_NOEXEC
284
#define __S011  PAGE_SHARED_NOEXEC
285
#define __S100  PAGE_EXECONLY
286
#define __S101  PAGE_READONLY_EXEC
287
#define __S110  PAGE_SHARED
288
#define __S111  PAGE_SHARED
289
 
290
static inline unsigned long pgd_bad(pgd_t pgd)
291
{
292
        unsigned long val = pgd_val(pgd);
293
        val &= ~PAGE_MASK;
294
        val &= ~(_PAGE_USER | _PAGE_DIRTY);
295
        return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
296
}
297
 
298
/*
299
 * Handling allocation failures during page table setup.
300
 */
301
extern void __handle_bad_pmd(pmd_t * pmd);
302
extern void __handle_bad_pmd_kernel(pmd_t * pmd);
303
 
304
#define pte_none(x)     (!pte_val(x))
305
#define pte_present(x)  (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
306
#define pte_clear(xp)   do { set_pte(xp, __pte(0)); } while (0)
307
 
308
#define pmd_none(x)     (!pmd_val(x))
309
#define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
310
#define pmd_clear(xp)   do { set_pmd(xp, __pmd(0)); } while (0)
311
#define pmd_bad(x)      \
312
        ((pmd_val(x) & (~PAGE_MASK & (~_PAGE_USER))) != _KERNPG_TABLE )
313
 
314
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
315
 
316
#ifndef CONFIG_DISCONTIGMEM
317
#define pte_page(x) (pfn_to_page((pte_val(x) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT))
318
#endif
319
/*
320
 * The following only work if pte_present() is true.
321
 * Undefined behaviour if not..
322
 */
323
extern inline int pte_read(pte_t pte)           { return pte_val(pte) & _PAGE_USER; }
324
extern inline int pte_exec(pte_t pte)           { return pte_val(pte) & _PAGE_USER; }
325
extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
326
extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
327
extern inline int pte_write(pte_t pte)          { return pte_val(pte) & _PAGE_RW; }
328
 
329
extern inline pte_t pte_rdprotect(pte_t pte)    { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
330
extern inline pte_t pte_exprotect(pte_t pte)    { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
331
extern inline pte_t pte_mkclean(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
332
extern inline pte_t pte_mkold(pte_t pte)        { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
333
extern inline pte_t pte_wrprotect(pte_t pte)    { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
334
extern inline pte_t pte_mkread(pte_t pte)       { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
335
extern inline pte_t pte_mkexec(pte_t pte)       { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
336
extern inline pte_t pte_mkdirty(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
337
extern inline pte_t pte_mkyoung(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
338
extern inline pte_t pte_mkwrite(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
339
static inline  int ptep_test_and_clear_dirty(pte_t *ptep)       { return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); }
340
static inline  int ptep_test_and_clear_young(pte_t *ptep)       { return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); }
341
static inline void ptep_set_wrprotect(pte_t *ptep)              { clear_bit(_PAGE_BIT_RW, ptep); }
342
static inline void ptep_mkdirty(pte_t *ptep)                    { set_bit(_PAGE_BIT_DIRTY, ptep); }
343
 
344
/*
345
 * Conversion functions: convert a page and protection to a page entry,
346
 * and a page entry and page directory to the page they refer to.
347
 */
348
 
349
#define mk_pte(page,pgprot)                                                      \
350
({                                                                               \
351
        pte_t __pte;                                                             \
352
        unsigned long __val = page_to_phys(page);                        \
353
        __val |= pgprot_val(pgprot);                                             \
354
        __val &= __supported_pte_mask;                                          \
355
        set_pte(&__pte, __pte(__val));                                           \
356
        __pte;                                                                   \
357
})
358
 
359
/* This takes a physical page address that is used by the remapping functions */
360
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
361
{
362
        pte_t __pte;
363
        set_pte(&__pte, __pte(physpage + (pgprot_val(pgprot) & __supported_pte_mask)));
364
        return __pte;
365
}
366
 
367
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
368
{
369
        set_pte(&pte,
370
                __pte(((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))  &
371
                      __supported_pte_mask));
372
        return pte;
373
}
374
 
375
#define page_pte(page) page_pte_prot(page, __pgprot(0))
376
#define __pmd_page(pmd) (__va(pmd_val(pmd) & PHYSICAL_PAGE_MASK))
377
 
378
/* to find an entry in a page-table-directory. */
379
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
380
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
381
 
382
#define __pgd_offset_k(pgd, address) ((pgd) + pgd_index(address))
383
 
384
#define current_pgd_offset_k(address) \
385
        __pgd_offset_k((pgd_t *)read_pda(level4_pgt), address)
386
 
387
/* This accesses the reference page table of the boot cpu.
388
   Other CPUs get synced lazily via the page fault handler. */
389
#define pgd_offset_k(address) \
390
        __pgd_offset_k( \
391
       (pgd_t *)__va(pml4_val(init_level4_pgt[pml4_index(address)]) & PHYSICAL_PAGE_MASK), address)
392
 
393
#define __pmd_offset(address) \
394
                (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
395
 
396
/* Find an entry in the third-level page table.. */
397
#define __pte_offset(address) \
398
                ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
399
#define pte_offset(dir, address) ((pte_t *) __pmd_page(*(dir)) + \
400
                        __pte_offset(address))
401
 
402
/* never use these in the common code */
403
#define pml4_page(level4) ((unsigned long) __va(pml4_val(level4) & PHYSICAL_PAGE_MASK))
404
#define pml4_index(address) (((address) >> PML4_SHIFT) & (PTRS_PER_PML4-1))
405
#define pml4_offset_k(address) ((pml4_t *)read_pda(level4_pgt) + pml4_index(address))
406
#define level3_offset_k(dir, address) ((pgd_t *) pml4_page(*(dir)) + pgd_index(address))
407
#define mk_kernel_pml4(address,prot) ((pml4_t){(address) | pgprot_val(prot)})
408
#define pml4_present(pml4) (pml4_val(pml4) & _PAGE_PRESENT)
409
 
410
/*
411
 * x86 doesn't have any external MMU info: the kernel page
412
 * tables contain all the necessary information.
413
 */
414
#define update_mmu_cache(vma,address,pte) do { } while (0)
415
 
416
/* Encode and de-code a swap entry */
417
#define SWP_TYPE(x)                     (((x).val >> 1) & 0x3f)
418
#define SWP_OFFSET(x)                   ((x).val >> 8)
419
#define SWP_ENTRY(type, offset)         ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
420
#define pte_to_swp_entry(pte)           ((swp_entry_t) { pte_val(pte) })
421
#define swp_entry_to_pte(x)             ((pte_t) { (x).val })
422
 
423
struct page;
424
/*
425
 * Change attributes of an kernel page.
426
 */
427
struct page;
428
extern int change_page_attr(struct page *page, int numpages, pgprot_t prot);
429
 
430
extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
431
 
432
#endif /* !__ASSEMBLY__ */
433
 
434
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
435
#define PageSkip(page)          (0)
436
#define kern_addr_valid(kaddr)  ((kaddr)>>PAGE_SHIFT < max_mapnr)
437
 
438
#define io_remap_page_range remap_page_range
439
 
440
#define HAVE_ARCH_UNMAPPED_AREA
441
 
442
#define pgtable_cache_init()   do { } while (0)
443
 
444
 
445
#endif /* _X86_64_PGTABLE_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.