OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [include/] [asm-ppc/] [pgtable.h] - Blame information for rev 1633

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1633 jcastillo
/* * Last edited: Nov  7 23:44 1995 (cort) */
2
#ifndef _PPC_PGTABLE_H
3
#define _PPC_PGTABLE_H
4
 
5
#include <asm/page.h>
6
#include <asm/mmu.h>
7
 
8
/*
9
 * Memory management on the PowerPC is a software emulation of the i386
10
 * MMU folded onto the PowerPC hardware MMU.  The emulated version looks
11
 * and behaves like the two-level i386 MMU.  Entries from these tables
12
 * are merged into the PowerPC hashed MMU tables, on demand, treating the
13
 * hashed tables like a special cache.
14
 *
15
 * Since the PowerPC does not have separate kernel and user address spaces,
16
 * the user virtual address space must be a [proper] subset of the kernel
17
 * space.  Thus, all tasks will have a specific virtual mapping for the
18
 * user virtual space and a common mapping for the kernel space.  The
19
 * simplest way to split this was literally in half.  Also, life is so
20
 * much simpler for the kernel if the machine hardware resources are
21
 * always mapped in.  Thus, some additional space is given up to the
22
 * kernel space to accommodate this.
23
 *
24
 * CAUTION! Some of the trade-offs make sense for the PreP platform on
25
 * which this code was originally developed.  When it migrates to other
26
 * PowerPC environments, some of the assumptions may fail and the whole
27
 * setup may need to be reevaluated.
28
 *
29
 * On the PowerPC, page translations are kept in a hashed table.  There
30
 * is exactly one of these tables [although the architecture supports
31
 * an arbitrary number].  Page table entries move in/out of this hashed
32
 * structure on demand, with the kernel filling in entries as they are
33
 * needed.  Just where a page table entry hits in the hashed table is a
34
 * function of the hashing which is in turn based on the upper 4 bits
35
 * of the logical address.  These 4 bits address a "virtual segment id"
36
 * which is unique per task/page combination for user addresses and
37
 * fixed for the kernel addresses.  Thus, the kernel space can be simply
38
 * shared [indeed at low overhead] among all tasks.
39
 *
40
 * The basic virtual address space is thus:
41
 *
42
 * 0x0XXXXXX  --+
43
 * 0x1XXXXXX    |
44
 * 0x2XXXXXX    |  User address space.
45
 * 0x3XXXXXX    |
46
 * 0x4XXXXXX    |
47
 * 0x5XXXXXX    |
48
 * 0x6XXXXXX    |
49
 * 0x7XXXXXX  --+
50
 * 0x8XXXXXX       PCI/ISA I/O space
51
 * 0x9XXXXXX  --+
52
 * 0xAXXXXXX    |  Kernel virtual memory
53
 * 0xBXXXXXX  --+
54
 * 0xCXXXXXX       PCI/ISA Memory space
55
 * 0xDXXXXXX
56
 * 0xEXXXXXX
57
 * 0xFXXXXXX       Board I/O space
58
 *
59
 * CAUTION!  One of the real problems here is keeping the software
60
 * managed tables coherent with the hardware hashed tables.  When
61
 * the software decides to update the table, it's normally easy to
62
 * update the hardware table.  But when the hardware tables need
63
 * changed, e.g. as the result of a page fault, it's more difficult
64
 * to reflect those changes back into the software entries.  Currently,
65
 * this process is quite crude, with updates causing the entire set
66
 * of tables to become invalidated.  Some performance could certainly
67
 * be regained by improving this.
68
 *
69
 * The Linux memory management assumes a three-level page table setup. On
70
 * the i386, we use that, but "fold" the mid level into the top-level page
71
 * table, so that we physically have the same two-level page table as the
72
 * i386 mmu expects.
73
 *
74
 * This file contains the functions and defines necessary to modify and use
75
 * the i386 page table tree.
76
 */
77
 
78
/* PMD_SHIFT determines the size of the area a second-level page table can map */
79
#define PMD_SHIFT       22
80
#define PMD_SIZE        (1UL << PMD_SHIFT)
81
#define PMD_MASK        (~(PMD_SIZE-1))
82
 
83
/* PGDIR_SHIFT determines what a third-level page table entry can map */
84
#define PGDIR_SHIFT     22
85
#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
86
#define PGDIR_MASK      (~(PGDIR_SIZE-1))
87
 
88
/*
89
 * entries per page directory level: the i386 is two-level, so
90
 * we don't really have any PMD directory physically.
91
 */
92
#define PTRS_PER_PTE    1024
93
#define PTRS_PER_PMD    1
94
#define PTRS_PER_PGD    1024
95
 
96
/* Just any arbitrary offset to the start of the vmalloc VM area: the
97
 * current 8MB value just means that there will be a 8MB "hole" after the
98
 * physical memory until the kernel virtual memory starts.  That means that
99
 * any out-of-bounds memory accesses will hopefully be caught.
100
 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
101
 * area for the same reason. ;)
102
 */
103
#define VMALLOC_OFFSET  (8*1024*1024)
104
#define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
105
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
106
 
107
#define _PAGE_PRESENT   0x001
108
#define _PAGE_RW        0x002
109
#define _PAGE_USER      0x004
110
#define _PAGE_PCD       0x010
111
#define _PAGE_ACCESSED  0x020
112
#define _PAGE_DIRTY     0x040
113
#define _PAGE_COW       0x200   /* implemented in software (one of the AVL bits) */
114
#define _PAGE_NO_CACHE  0x400
115
 
116
#define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
117
#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
118
 
119
#define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
120
#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
121
#define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_COW)
122
#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
123
#define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
124
#define PAGE_KERNEL_NO_CACHE    __pgprot(_PAGE_NO_CACHE | _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
125
 
126
/*
127
 * The i386 can't do page protection for execute, and considers that the same are read.
128
 * Also, write permissions imply read permissions. This is the closest we can get..
129
 */
130
#define __P000  PAGE_NONE
131
#define __P001  PAGE_READONLY
132
#define __P010  PAGE_COPY
133
#define __P011  PAGE_COPY
134
#define __P100  PAGE_READONLY
135
#define __P101  PAGE_READONLY
136
#define __P110  PAGE_COPY
137
#define __P111  PAGE_COPY
138
 
139
#define __S000  PAGE_NONE
140
#define __S001  PAGE_READONLY
141
#define __S010  PAGE_SHARED
142
#define __S011  PAGE_SHARED
143
#define __S100  PAGE_READONLY
144
#define __S101  PAGE_READONLY
145
#define __S110  PAGE_SHARED
146
#define __S111  PAGE_SHARED
147
 
148
/*
149
 * TLB invalidation:
150
 *
151
 *  - invalidate() invalidates the current mm struct TLBs
152
 *  - invalidate_all() invalidates all processes TLBs
153
 *  - invalidate_mm(mm) invalidates the specified mm context TLB's
154
 *  - invalidate_page(mm, vmaddr) invalidates one page
155
 *  - invalidate_range(mm, start, end) invalidates a range of pages
156
 *
157
 * FIXME: This could be done much better!
158
 */
159
 
160
#define invalidate_all() printk("invalidate_all()\n");invalidate()
161
#if 0
162
#define invalidate_mm(mm_struct) \
163
do { if ((mm_struct) == current->mm) invalidate(); else printk("Can't invalidate_mm(%x)\n", mm_struct);} while (0)
164
#define invalidate_page(mm_struct,addr) \
165
do { if ((mm_struct) == current->mm) invalidate(); else printk("Can't invalidate_page(%x,%x)\n", mm_struct, addr);} while (0)
166
#define invalidate_range(mm_struct,start,end) \
167
do { if ((mm_struct) == current->mm) invalidate(); else printk("Can't invalidate_range(%x,%x,%x)\n", mm_struct, start, end);} while (0)
168
#endif
169
 
170
/*
171
 * Define this if things work differently on a i386 and a i486:
172
 * it will (on a i486) warn about kernel memory accesses that are
173
 * done without a 'verify_area(VERIFY_WRITE,..)'
174
 */
175
#undef CONFIG_TEST_VERIFY_AREA
176
 
177
/* page table for 0-4MB for everybody */
178
extern unsigned long pg0[1024];
179
 
180
/*
181
 * BAD_PAGETABLE is used when we need a bogus page-table, while
182
 * BAD_PAGE is used for a bogus page.
183
 *
184
 * ZERO_PAGE is a global shared page that is always zero: used
185
 * for zero-mapped memory areas etc..
186
 */
187
extern pte_t __bad_page(void);
188
extern pte_t * __bad_pagetable(void);
189
 
190
extern unsigned long __zero_page(void);
191
 
192
#define BAD_PAGETABLE __bad_pagetable()
193
#define BAD_PAGE __bad_page()
194
#define ZERO_PAGE __zero_page()
195
 
196
/* number of bits that fit into a memory pointer */
197
#define BITS_PER_PTR                    (8*sizeof(unsigned long))
198
 
199
/* to align the pointer to a pointer address */
200
#define PTR_MASK                        (~(sizeof(void*)-1))
201
 
202
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
203
/* 64-bit machines, beware!  SRB. */
204
#define SIZEOF_PTR_LOG2                 2
205
 
206
/* to find an entry in a page-table */
207
#define PAGE_PTR(address) \
208
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
209
 
210
/* to set the page-dir */
211
/* tsk is a task_struct and pgdir is a pte_t */
212
#define SET_PAGE_DIR(tsk,pgdir) \
213
do { \
214
        (tsk)->tss.pg_tables = (unsigned long *)(pgdir); \
215
        if ((tsk) == current) \
216
        { \
217
/*_printk("Change page tables = %x\n", pgdir);*/ \
218
        } \
219
} while (0)
220
 
221
extern unsigned long high_memory;
222
 
223
extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
224
extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_PRESENT; }
225
#if 0
226
extern inline int pte_inuse(pte_t *ptep)        { return mem_map[MAP_NR(ptep)].reserved; }
227
/*extern inline int pte_inuse(pte_t *ptep)      { return mem_map[MAP_NR(ptep)] != 1; }*/
228
#endif
229
extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
230
#if 0
231
extern inline void pte_reuse(pte_t * ptep)
232
{
233
        if (!mem_map[MAP_NR(ptep)].reserved)
234
                mem_map[MAP_NR(ptep)].count++;
235
}
236
#endif
237
/*
238
   extern inline void pte_reuse(pte_t * ptep)
239
{
240
        if (!(mem_map[MAP_NR(ptep)] & MAP_PAGE_RESERVED))
241
                mem_map[MAP_NR(ptep)]++;
242
}
243
*/
244
extern inline int pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
245
extern inline int pmd_bad(pmd_t pmd)            { return (pmd_val(pmd) & ~PAGE_MASK) != _PAGE_TABLE; }
246
extern inline int pmd_present(pmd_t pmd)        { return pmd_val(pmd) & _PAGE_PRESENT; }
247
extern inline int pmd_inuse(pmd_t *pmdp)        { return 0; }
248
extern inline void pmd_clear(pmd_t * pmdp)      { pmd_val(*pmdp) = 0; }
249
extern inline void pmd_reuse(pmd_t * pmdp)      { }
250
 
251
/*
252
 * The "pgd_xxx()" functions here are trivial for a folded two-level
253
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
254
 * into the pgd entry)
255
 */
256
extern inline int pgd_none(pgd_t pgd)           { return 0; }
257
extern inline int pgd_bad(pgd_t pgd)            { return 0; }
258
extern inline int pgd_present(pgd_t pgd)        { return 1; }
259
#if 0
260
/*extern inline int pgd_inuse(pgd_t * pgdp)     { return mem_map[MAP_NR(pgdp)] != 1; }*/
261
extern inline int pgd_inuse(pgd_t *pgdp)        { return mem_map[MAP_NR(pgdp)].reserved;  }
262
#endif
263
extern inline void pgd_clear(pgd_t * pgdp)      { }
264
 
265
/*
266
extern inline void pgd_reuse(pgd_t * pgdp)
267
{
268
        if (!mem_map[MAP_NR(pgdp)].reserved)
269
                mem_map[MAP_NR(pgdp)].count++;
270
}
271
*/
272
 
273
/*
274
 * The following only work if pte_present() is true.
275
 * Undefined behaviour if not..
276
 */
277
extern inline int pte_read(pte_t pte)           { return pte_val(pte) & _PAGE_USER; }
278
extern inline int pte_write(pte_t pte)          { return pte_val(pte) & _PAGE_RW; }
279
extern inline int pte_exec(pte_t pte)           { return pte_val(pte) & _PAGE_USER; }
280
extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
281
extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
282
extern inline int pte_cow(pte_t pte)            { return pte_val(pte) & _PAGE_COW; }
283
 
284
extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) &= ~_PAGE_RW; return pte; }
285
extern inline pte_t pte_rdprotect(pte_t pte)    { pte_val(pte) &= ~_PAGE_USER; return pte; }
286
extern inline pte_t pte_exprotect(pte_t pte)    { pte_val(pte) &= ~_PAGE_USER; return pte; }
287
extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
288
extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
289
extern inline pte_t pte_uncow(pte_t pte)        { pte_val(pte) &= ~_PAGE_COW; return pte; }
290
extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) |= _PAGE_RW; return pte; }
291
extern inline pte_t pte_mkread(pte_t pte)       { pte_val(pte) |= _PAGE_USER; return pte; }
292
extern inline pte_t pte_mkexec(pte_t pte)       { pte_val(pte) |= _PAGE_USER; return pte; }
293
extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= _PAGE_DIRTY; return pte; }
294
extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
295
extern inline pte_t pte_mkcow(pte_t pte)        { pte_val(pte) |= _PAGE_COW; return pte; }
296
 
297
/*
298
 * Conversion functions: convert a page and protection to a page entry,
299
 * and a page entry and page directory to the page they refer to.
300
 */
301
extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
302
{ pte_t pte; pte_val(pte) = page | pgprot_val(pgprot); return pte; }
303
 
304
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
305
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
306
 
307
/*extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
308
{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
309
*/
310
extern inline unsigned long pte_page(pte_t pte)
311
{ return pte_val(pte) & PAGE_MASK; }
312
 
313
extern inline unsigned long pmd_page(pmd_t pmd)
314
{ return pmd_val(pmd) & PAGE_MASK; }
315
 
316
 
317
/* to find an entry in a page-table-directory */
318
extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
319
{
320
        return mm->pgd + (address >> PGDIR_SHIFT);
321
}
322
 
323
/* Find an entry in the second-level page table.. */
324
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
325
{
326
        return (pmd_t *) dir;
327
}
328
 
329
/* Find an entry in the third-level page table.. */
330
extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
331
{
332
        return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
333
}
334
 
335
 
336
/*
337
 * Allocate and free page tables. The xxx_kernel() versions are
338
 * used to allocate a kernel page table - this turns on ASN bits
339
 * if any, and marks the page tables reserved.
340
 */
341
extern inline void pte_free_kernel(pte_t * pte)
342
{
343
        free_page((unsigned long) pte);
344
}
345
/*extern inline void pte_free_kernel(pte_t * pte)
346
{
347
        mem_map[MAP_NR(pte)] = 1;
348
        free_page((unsigned long) pte);
349
}
350
*/
351
 
352
/*
353
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
354
{
355
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
356
        if (pmd_none(*pmd)) {
357
                pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
358
                if (pmd_none(*pmd)) {
359
                        if (page) {
360
                                pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page;
361
                                mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
362
                                return page + address;
363
                        }
364
                        pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
365
                        return NULL;
366
                }
367
                free_page((unsigned long) page);
368
        }
369
        if (pmd_bad(*pmd)) {
370
                printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
371
                pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
372
                return NULL;
373
        }
374
        return (pte_t *) pmd_page(*pmd) + address;
375
}*/
376
/*
377
extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
378
{
379
printk("pte_alloc_kernel pmd = %08X, address = %08X\n", pmd, address);
380
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
381
printk("address now = %08X\n", address);
382
        if (pmd_none(*pmd)) {
383
                pte_t *page;
384
printk("pmd_none(*pmd) true\n");
385
                page = (pte_t *) get_free_page(GFP_KERNEL);
386
printk("page = %08X after get_free_page(%08X)\n",page,GFP_KERNEL);
387
                if (pmd_none(*pmd)) {
388
printk("pmd_none(*pmd=%08X) still\n",*pmd);
389
                        if (page) {
390
printk("page true = %08X\n",page);
391
                                pmd_set(pmd, page);
392
printk("pmd_set(%08X,%08X)\n",pmd,page);
393
                                mem_map[MAP_NR(page)].reserved = 1;
394
printk("did mem_map\n",pmd,page);
395
                                return page + address;
396
                        }
397
printk("did pmd_set(%08X, %08X\n",pmd,BAD_PAGETABLE);
398
                        pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
399
                        return NULL;
400
                }
401
printk("did free_page(%08X)\n",page);
402
                free_page((unsigned long) page);
403
        }
404
        if (pmd_bad(*pmd)) {
405
                printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
406
                pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
407
                return NULL;
408
        }
409
printk("returning pmd_page(%08X) + %08X\n",pmd_page(*pmd) , address);
410
 
411
        return (pte_t *) pmd_page(*pmd) + address;
412
}
413
*/
414
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
415
{
416
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
417
        if (pmd_none(*pmd)) {
418
                pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
419
                if (pmd_none(*pmd)) {
420
                        if (page) {
421
/*                                pmd_set(pmd,page);*/
422
                        pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page;
423
                                return page + address;
424
                        }
425
/*                      pmd_set(pmd, BAD_PAGETABLE);*/
426
                        pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
427
                        return NULL;
428
                }
429
                free_page((unsigned long) page);
430
        }
431
        if (pmd_bad(*pmd)) {
432
                printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
433
/*              pmd_set(pmd, (pte_t *) BAD_PAGETABLE);          */
434
                pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
435
                return NULL;
436
        }
437
        return (pte_t *) pmd_page(*pmd) + address;
438
}
439
 
440
/*
441
 * allocating and freeing a pmd is trivial: the 1-entry pmd is
442
 * inside the pgd, so has no extra memory associated with it.
443
 */
444
extern inline void pmd_free_kernel(pmd_t * pmd)
445
{
446
}
447
 
448
extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
449
{
450
        return (pmd_t *) pgd;
451
}
452
 
453
extern inline void pte_free(pte_t * pte)
454
{
455
        free_page((unsigned long) pte);
456
}
457
 
458
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
459
{
460
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
461
        if (pmd_none(*pmd)) {
462
                pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
463
                if (pmd_none(*pmd)) {
464
                        if (page) {
465
                                pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page;
466
                                return page + address;
467
                        }
468
                        pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
469
                        return NULL;
470
                }
471
                free_page((unsigned long) page);
472
        }
473
        if (pmd_bad(*pmd)) {
474
                printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
475
                pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
476
                return NULL;
477
        }
478
        return (pte_t *) pmd_page(*pmd) + address;
479
}
480
 
481
/*
482
 * allocating and freeing a pmd is trivial: the 1-entry pmd is
483
 * inside the pgd, so has no extra memory associated with it.
484
 */
485
extern inline void pmd_free(pmd_t * pmd)
486
{
487
}
488
 
489
extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
490
{
491
        return (pmd_t *) pgd;
492
}
493
 
494
extern inline void pgd_free(pgd_t * pgd)
495
{
496
        free_page((unsigned long) pgd);
497
}
498
 
499
extern inline pgd_t * pgd_alloc(void)
500
{
501
        return (pgd_t *) get_free_page(GFP_KERNEL);
502
}
503
 
504
extern pgd_t swapper_pg_dir[1024*8];
505
/*extern pgd_t *swapper_pg_dir;*/
506
 
507
/*
508
 * Software maintained MMU tables may have changed -- update the
509
 * hardware [aka cache]
510
 */
511
extern inline void update_mmu_cache(struct vm_area_struct * vma,
512
        unsigned long address, pte_t _pte)
513
{
514
#if 0
515
        printk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte);
516
        _printk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte);
517
/*      MMU_hash_page(&(vma->vm_task)->tss, address & PAGE_MASK, (pte *)&_pte);*/
518
#endif  
519
        MMU_hash_page(&(current)->tss, address & PAGE_MASK, (pte *)&_pte);
520
 
521
}
522
 
523
 
524
#ifdef _SCHED_INIT_
525
#define INIT_MMAP { &init_task, 0, 0x40000000, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC }
526
 
527
#endif  
528
 
529
#define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
530
#define SWP_OFFSET(entry) ((entry) >> 8)
531
#define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
532
 
533
#endif /* _PPC_PAGE_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.