OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [include/] [asm-m68knommu/] [pgtable.h] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1633 jcastillo
#ifndef _M68K_PGTABLE_H
2
#define _M68K_PGTABLE_H
3
 
4
extern unsigned long mm_vtop(unsigned long addr) __attribute__ ((const));
5
extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
6
 
7
#define VTOP(addr)  (mm_vtop((unsigned long)(addr)))
8
#define PTOV(addr)  (mm_ptov((unsigned long)(addr)))
9
 
10
#ifndef NO_MM
11
 
12
#include <asm/setup.h>
13
 
14
#ifndef __ASSEMBLY__
15
#include <linux/config.h>
16
 
17
/*
18
 * This file contains the functions and defines necessary to modify and use
19
 * the m68k page table tree.
20
 */
21
 
22
/*
23
 * flush all atc entries (user-space entries only for the 680[46]0).
24
 */
25
static inline void __flush_tlb(void)
26
{
27
        if (CPU_IS_040_OR_060)
28
                __asm__ __volatile__(".word 0xf510\n"::); /* pflushan */
29
        else
30
                __asm__ __volatile__("pflusha\n"::);
31
}
32
 
33
static inline void __flush_tlb_one(unsigned long addr)
34
{
35
        if (CPU_IS_040_OR_060) {
36
                register unsigned long a0 __asm__ ("a0") = addr;
37
                __asm__ __volatile__(".word 0xf508" /* pflush (%a0) */
38
                                     : : "a" (a0));
39
        } else
40
                __asm__ __volatile__("pflush #0,#0,(%0)" : : "a" (addr));
41
}
42
 
43
#define flush_tlb() __flush_tlb()
44
 
45
/*
46
 * flush all atc entries (both kernel and user-space entries).
47
 */
48
static inline void flush_tlb_all(void)
49
{
50
        if (CPU_IS_040_OR_060)
51
                __asm__ __volatile__(".word 0xf518\n"::); /* pflusha */
52
        else
53
                __asm__ __volatile__("pflusha\n"::);
54
}
55
 
56
static inline void flush_tlb_mm(struct mm_struct *mm)
57
{
58
        if (mm == current->mm)
59
                __flush_tlb();
60
}
61
 
62
static inline void flush_tlb_page(struct vm_area_struct *vma,
63
        unsigned long addr)
64
{
65
        if (vma->vm_mm == current->mm)
66
                __flush_tlb_one(addr);
67
}
68
 
69
static inline void flush_tlb_range(struct mm_struct *mm,
70
        unsigned long start, unsigned long end)
71
{
72
        if (mm == current->mm)
73
                __flush_tlb();
74
}
75
 
76
/* Certain architectures need to do special things when pte's
77
 * within a page table are directly modified.  Thus, the following
78
 * hook is made available.
79
 */
80
#define set_pte(pteptr, pteval) do{     \
81
        ((*(pteptr)) = (pteval));       \
82
        if (CPU_IS_060)                 \
83
                __asm__ __volatile__(".word 0xf518\n"::); /* pflusha */ \
84
        } while(0)
85
 
86
 
87
/* PMD_SHIFT determines the size of the area a second-level page table can map */
88
#define PMD_SHIFT       22
89
#define PMD_SIZE        (1UL << PMD_SHIFT)
90
#define PMD_MASK        (~(PMD_SIZE-1))
91
 
92
/* PGDIR_SHIFT determines what a third-level page table entry can map */
93
#define PGDIR_SHIFT     25
94
#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
95
#define PGDIR_MASK      (~(PGDIR_SIZE-1))
96
 
97
/*
98
 * entries per page directory level: the m68k is configured as three-level,
99
 * so we do have PMD level physically.
100
 */
101
#define PTRS_PER_PTE    1024
102
#define PTRS_PER_PMD    8
103
#define PTRS_PER_PGD    128
104
 
105
/* the no. of pointers that fit on a page: this will go away */
106
#define PTRS_PER_PAGE   (PAGE_SIZE/sizeof(void*))
107
 
108
typedef pgd_t pgd_table[PTRS_PER_PGD];
109
typedef pmd_t pmd_table[PTRS_PER_PMD];
110
typedef pte_t pte_table[PTRS_PER_PTE];
111
 
112
#define PGD_TABLES_PER_PAGE (PAGE_SIZE/sizeof(pgd_table))
113
#define PMD_TABLES_PER_PAGE (PAGE_SIZE/sizeof(pmd_table))
114
#define PTE_TABLES_PER_PAGE (PAGE_SIZE/sizeof(pte_table))
115
 
116
typedef pgd_table pgd_tablepage[PGD_TABLES_PER_PAGE];
117
typedef pmd_table pmd_tablepage[PMD_TABLES_PER_PAGE];
118
typedef pte_table pte_tablepage[PTE_TABLES_PER_PAGE];
119
 
120
/* Just any arbitrary offset to the start of the vmalloc VM area: the
121
 * current 8MB value just means that there will be a 8MB "hole" after the
122
 * physical memory until the kernel virtual memory starts.  That means that
123
 * any out-of-bounds memory accesses will hopefully be caught.
124
 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
125
 * area for the same reason. ;)
126
 */
127
#define VMALLOC_OFFSET  (8*1024*1024)
128
#define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
129
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
130
 
131
#endif /* __ASSEMBLY__ */
132
 
133
/*
134
 * Definitions for MMU descriptors
135
 */
136
#define _PAGE_PRESENT   0x001
137
#define _PAGE_SHORT     0x002
138
#define _PAGE_RONLY     0x004
139
#define _PAGE_ACCESSED  0x008
140
#define _PAGE_DIRTY     0x010
141
#define _PAGE_GLOBAL040 0x400   /* 68040 global bit, used for kva descs */
142
#define _PAGE_COW       0x800   /* implemented in software */
143
#define _PAGE_NOCACHE030 0x040  /* 68030 no-cache mode */
144
#define _PAGE_NOCACHE   0x060   /* 68040 cache mode, non-serialized */
145
#define _PAGE_NOCACHE_S 0x040   /* 68040 no-cache mode, serialized */
146
#define _PAGE_CACHE040  0x020   /* 68040 cache mode, cachable, copyback */
147
#define _PAGE_CACHE040W 0x000   /* 68040 cache mode, cachable, write-through */
148
 
149
#define _DESCTYPE_MASK  0x003
150
 
151
#define _CACHEMASK040   (~0x060)
152
#define _TABLE_MASK     (0xfffffe00)
153
 
154
#define _PAGE_TABLE     (_PAGE_SHORT)
155
#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
156
 
157
#ifndef __ASSEMBLY__
158
 
159
/* This is the cache mode to be used for pages containing page descriptors for
160
 * processors >= '040. It is in pte_mknocache(), and the variable is defined
161
 * and initialized in head.S */
162
extern int m68k_pgtable_cachemode;
163
 
164
#if defined(CONFIG_M68040_OR_M68060_ONLY)
165
#define mm_cachebits _PAGE_CACHE040
166
#elif defined(CONFIG_M68020_OR_M68030_ONLY)
167
#define mm_cachebits 0
168
#else
169
extern unsigned long mm_cachebits;
170
#endif
171
 
172
#define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
173
#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | mm_cachebits)
174
#define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
175
#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
176
#define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits)
177
 
178
/* Alternate definitions that are compile time constants, for
179
   initializing protection_map.  The cachebits are fixed later.  */
180
#define PAGE_NONE_C     __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
181
#define PAGE_SHARED_C   __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
182
#define PAGE_COPY_C     __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
183
#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
184
 
185
/*
186
 * The m68k can't do page protection for execute, and considers that the same are read.
187
 * Also, write permissions imply read permissions. This is the closest we can get..
188
 */
189
#define __P000  PAGE_NONE_C
190
#define __P001  PAGE_READONLY_C
191
#define __P010  PAGE_COPY_C
192
#define __P011  PAGE_COPY_C
193
#define __P100  PAGE_READONLY_C
194
#define __P101  PAGE_READONLY_C
195
#define __P110  PAGE_COPY_C
196
#define __P111  PAGE_COPY_C
197
 
198
#define __S000  PAGE_NONE_C
199
#define __S001  PAGE_READONLY_C
200
#define __S010  PAGE_SHARED_C
201
#define __S011  PAGE_SHARED_C
202
#define __S100  PAGE_READONLY_C
203
#define __S101  PAGE_READONLY_C
204
#define __S110  PAGE_SHARED_C
205
#define __S111  PAGE_SHARED_C
206
 
207
/* zero page used for uninitialized stuff */
208
extern unsigned long empty_zero_page;
209
 
210
/*
211
 * BAD_PAGETABLE is used when we need a bogus page-table, while
212
 * BAD_PAGE is used for a bogus page.
213
 *
214
 * ZERO_PAGE is a global shared page that is always zero: used
215
 * for zero-mapped memory areas etc..
216
 */
217
extern pte_t __bad_page(void);
218
extern pte_t * __bad_pagetable(void);
219
 
220
#define BAD_PAGETABLE __bad_pagetable()
221
#define BAD_PAGE __bad_page()
222
#define ZERO_PAGE empty_zero_page
223
 
224
/* number of bits that fit into a memory pointer */
225
#define BITS_PER_PTR                    (8*sizeof(unsigned long))
226
 
227
/* to align the pointer to a pointer address */
228
#define PTR_MASK                        (~(sizeof(void*)-1))
229
 
230
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
231
/* 64-bit machines, beware!  SRB. */
232
#define SIZEOF_PTR_LOG2                 2
233
 
234
/* to find an entry in a page-table */
235
#define PAGE_PTR(address) \
236
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
237
 
238
extern unsigned long high_memory;
239
 
240
/* For virtual address to physical address conversion */
241
extern unsigned long mm_vtop(unsigned long addr) __attribute__ ((const));
242
extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
243
#define VTOP(addr)  (mm_vtop((unsigned long)(addr)))
244
#define PTOV(addr)  (mm_ptov((unsigned long)(addr)))
245
 
246
/*
247
 * Conversion functions: convert a page and protection to a page entry,
248
 * and a page entry and page directory to the page they refer to.
249
 */
250
extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
251
{ pte_t pte; pte_val(pte) = VTOP(page) | pgprot_val(pgprot); return pte; }
252
 
253
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
254
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
255
 
256
extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
257
{
258
        int i;
259
 
260
        ptep = (pte_t *) VTOP(ptep);
261
        for (i = 0; i < 16; i++, ptep += PTRS_PER_PTE/16)
262
                pmdp->pmd[i] = _PAGE_TABLE | _PAGE_ACCESSED | (unsigned long)ptep;
263
}
264
 
265
/* early termination version of the above */
266
extern inline void pmd_set_et(pmd_t * pmdp, pte_t * ptep)
267
{
268
        int i;
269
 
270
        ptep = (pte_t *) VTOP(ptep);
271
        for (i = 0; i < 16; i++, ptep += PTRS_PER_PTE/16)
272
                pmdp->pmd[i] = _PAGE_PRESENT | _PAGE_ACCESSED | (unsigned long)ptep;
273
}
274
 
275
extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
276
{ pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | VTOP(pmdp); }
277
 
278
extern inline unsigned long pte_page(pte_t pte)
279
{ return PTOV(pte_val(pte) & PAGE_MASK); }
280
 
281
extern inline unsigned long pmd_page2(pmd_t *pmd)
282
{ return PTOV(pmd_val(*pmd) & _TABLE_MASK); }
283
#define pmd_page(pmd) pmd_page2(&(pmd))
284
 
285
extern inline unsigned long pgd_page(pgd_t pgd)
286
{ return PTOV(pgd_val(pgd) & _TABLE_MASK); }
287
 
288
extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
289
extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_PRESENT; }
290
extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
291
 
292
extern inline int pmd_none2(pmd_t *pmd)         { return !pmd_val(*pmd); }
293
#define pmd_none(pmd) pmd_none2(&(pmd))
294
extern inline int pmd_bad2(pmd_t *pmd)          { return (pmd_val(*pmd) & _DESCTYPE_MASK) != _PAGE_TABLE || pmd_page(*pmd) > high_memory; }
295
#define pmd_bad(pmd) pmd_bad2(&(pmd))
296
extern inline int pmd_present2(pmd_t *pmd)      { return pmd_val(*pmd) & _PAGE_TABLE; }
297
#define pmd_present(pmd) pmd_present2(&(pmd))
298
extern inline void pmd_clear(pmd_t * pmdp)
299
{
300
        short i;
301
 
302
        for (i = 15; i >= 0; i--)
303
                pmdp->pmd[i] = 0;
304
}
305
 
306
extern inline int pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
307
extern inline int pgd_bad(pgd_t pgd)            { return (pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
308
extern inline int pgd_present(pgd_t pgd)        { return pgd_val(pgd) & _PAGE_TABLE; }
309
 
310
extern inline void pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
311
 
312
/*
313
 * The following only work if pte_present() is true.
314
 * Undefined behaviour if not..
315
 */
316
extern inline int pte_read(pte_t pte)           { return 1; }
317
extern inline int pte_write(pte_t pte)          { return !(pte_val(pte) & _PAGE_RONLY); }
318
extern inline int pte_exec(pte_t pte)           { return 1; }
319
extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
320
extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
321
 
322
extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_RONLY; return pte; }
323
extern inline pte_t pte_rdprotect(pte_t pte)    { return pte; }
324
extern inline pte_t pte_exprotect(pte_t pte)    { return pte; }
325
extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
326
extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
327
extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
328
extern inline pte_t pte_mkread(pte_t pte)       { return pte; }
329
extern inline pte_t pte_mkexec(pte_t pte)       { return pte; }
330
extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= _PAGE_DIRTY; return pte; }
331
extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
332
extern inline pte_t pte_mknocache(pte_t pte)
333
{
334
        pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode;
335
        return pte;
336
}
337
extern inline pte_t pte_mkcache(pte_t pte)      { pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | _PAGE_CACHE040; return pte; }
338
 
339
/* to set the page-dir */
340
extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
341
{
342
        tsk->tss.crp[0] = 0x80000000 | _PAGE_TABLE;
343
        tsk->tss.crp[1] = VTOP(pgdir);
344
        if (tsk == current) {
345
                if (CPU_IS_040_OR_060)
346
                        __asm__ __volatile__ ("movel %0@,%/d0\n\t"
347
                                              ".long 0x4e7b0806\n\t"
348
                                              /* movec d0,urp */
349
                                              : : "a" (&tsk->tss.crp[1])
350
                                              : "d0");
351
                else
352
                        __asm__ __volatile__ ("movec  %/cacr,%/d0\n\t"
353
                                              "oriw #0x0808,%/d0\n\t"
354
                                              "movec %/d0,%/cacr\n\t"
355
                                              "pmove %0@,%/crp\n\t"
356
                                              : : "a" (&tsk->tss.crp[0])
357
                                              : "d0");
358
        }
359
}
360
 
361
#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
362
 
363
/* to find an entry in a page-table-directory */
364
extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
365
{
366
        return mm->pgd + (address >> PGDIR_SHIFT);
367
}
368
 
369
extern pgd_t swapper_pg_dir[128];
370
extern pgd_t kernel_pg_dir[128];
371
 
372
extern inline pgd_t * pgd_offset_k(unsigned long address)
373
{
374
        return kernel_pg_dir + (address >> PGDIR_SHIFT);
375
}
376
 
377
 
378
/* Find an entry in the second-level page table.. */
379
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
380
{
381
        return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
382
}
383
 
384
/* Find an entry in the third-level page table.. */
385
extern inline pte_t * pte_offset(pmd_t * pmdp, unsigned long address)
386
{
387
        return (pte_t *) pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
388
}
389
 
390
/*
391
 * Allocate and free page tables. The xxx_kernel() versions are
392
 * used to allocate a kernel page table - this turns on ASN bits
393
 * if any.
394
 */
395
 
396
extern inline void nocache_page (unsigned long vaddr)
397
{
398
        if (CPU_IS_040_OR_060) {
399
                pgd_t *dir;
400
                pmd_t *pmdp;
401
                pte_t *ptep;
402
 
403
                if(CPU_IS_060)
404
                        __asm__ __volatile__ ("movel %0,%/a0\n\t"
405
                                              ".word 0xf470"
406
                                              : : "g" (VTOP(vaddr))
407
                                              : "a0");
408
 
409
                dir = pgd_offset_k(vaddr);
410
                pmdp = pmd_offset(dir,vaddr);
411
                ptep = pte_offset(pmdp,vaddr);
412
                *ptep = pte_mknocache(*ptep);
413
        }
414
}
415
 
416
static inline void cache_page (unsigned long vaddr)
417
{
418
        if (CPU_IS_040_OR_060) {
419
                pgd_t *dir;
420
                pmd_t *pmdp;
421
                pte_t *ptep;
422
 
423
                dir = pgd_offset_k(vaddr);
424
                pmdp = pmd_offset(dir,vaddr);
425
                ptep = pte_offset(pmdp,vaddr);
426
                *ptep = pte_mkcache(*ptep);
427
        }
428
}
429
 
430
 
431
extern const char PgtabStr_bad_pmd[];
432
extern const char PgtabStr_bad_pgd[];
433
extern const char PgtabStr_bad_pmdk[];
434
extern const char PgtabStr_bad_pgdk[];
435
 
436
extern inline void pte_free(pte_t * pte)
437
{
438
        cache_page((unsigned long)pte);
439
        free_page((unsigned long) pte);
440
}
441
 
442
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
443
{
444
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
445
        if (pmd_none(*pmd)) {
446
                pte_t * page = (pte_t *)get_free_page(GFP_KERNEL);
447
                if (pmd_none(*pmd)) {
448
                        if (page) {
449
                                nocache_page((unsigned long)page);
450
                                pmd_set(pmd,page);
451
                                return page + address;
452
                        }
453
                        pmd_set(pmd, BAD_PAGETABLE);
454
                        return NULL;
455
                }
456
                free_page((unsigned long)page);
457
        }
458
        if (pmd_bad(*pmd)) {
459
                printk(PgtabStr_bad_pmd, pmd_val(*pmd));
460
                pmd_set(pmd, BAD_PAGETABLE);
461
                return NULL;
462
        }
463
        return (pte_t *) pmd_page(*pmd) + address;
464
}
465
 
466
extern pmd_t *get_pointer_table (void);
467
extern void free_pointer_table (pmd_t *);
468
extern pmd_t *get_kpointer_table (void);
469
extern void free_kpointer_table (pmd_t *);
470
 
471
extern inline void pmd_free(pmd_t * pmd)
472
{
473
        free_pointer_table (pmd);
474
}
475
 
476
extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
477
{
478
        address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
479
        if (pgd_none(*pgd)) {
480
                pmd_t *page = get_pointer_table();
481
                if (pgd_none(*pgd)) {
482
                        if (page) {
483
                                pgd_set(pgd, page);
484
                                return page + address;
485
                        }
486
                        pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
487
                        return NULL;
488
                }
489
                free_pointer_table(page);
490
        }
491
        if (pgd_bad(*pgd)) {
492
                printk(PgtabStr_bad_pgd, pgd_val(*pgd));
493
                pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
494
                return NULL;
495
        }
496
        return (pmd_t *) pgd_page(*pgd) + address;
497
}
498
 
499
extern inline void pte_free_kernel(pte_t * pte)
500
{
501
        cache_page((unsigned long)pte);
502
        free_page((unsigned long) pte);
503
}
504
 
505
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
506
{
507
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
508
        if (pmd_none(*pmd)) {
509
                pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
510
                if (pmd_none(*pmd)) {
511
                        if (page) {
512
                                nocache_page((unsigned long)page);
513
                                pmd_set(pmd, page);
514
                                return page + address;
515
                        }
516
                        pmd_set(pmd, BAD_PAGETABLE);
517
                        return NULL;
518
                }
519
                free_page((unsigned long) page);
520
        }
521
        if (pmd_bad(*pmd)) {
522
                printk(PgtabStr_bad_pmdk, pmd_val(*pmd));
523
                pmd_set(pmd, BAD_PAGETABLE);
524
                return NULL;
525
        }
526
        return (pte_t *) pmd_page(*pmd) + address;
527
}
528
 
529
extern inline void pmd_free_kernel(pmd_t * pmd)
530
{
531
        free_kpointer_table(pmd);
532
}
533
 
534
extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
535
{
536
        address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
537
        if (pgd_none(*pgd)) {
538
                pmd_t *page = get_kpointer_table();
539
                if (pgd_none(*pgd)) {
540
                        if (page) {
541
                                pgd_set(pgd, page);
542
                                return page + address;
543
                        }
544
                        pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
545
                        return NULL;
546
                }
547
                free_kpointer_table(page);
548
        }
549
        if (pgd_bad(*pgd)) {
550
                printk(PgtabStr_bad_pgdk, pgd_val(*pgd));
551
                pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
552
                return NULL;
553
        }
554
        return (pmd_t *) pgd_page(*pgd) + address;
555
}
556
 
557
extern inline void pgd_free(pgd_t * pgd)
558
{
559
        free_pointer_table ((pmd_t *) pgd);
560
}
561
 
562
extern inline pgd_t * pgd_alloc(void)
563
{
564
        return (pgd_t *)get_pointer_table ();
565
}
566
 
567
#define flush_icache() \
568
do { \
569
        if (CPU_IS_040_OR_060) \
570
                asm __volatile__ ("nop; .word 0xf498 /* cinva %%ic */"); \
571
        else \
572
                asm __volatile__ ("movec %/cacr,%/d0;" \
573
                     "oriw %0,%/d0;" \
574
                     "movec %/d0,%/cacr" \
575
                     : /* no outputs */ \
576
                     : "i" (FLUSH_I) \
577
                     : "d0"); \
578
} while (0)
579
 
580
/*
581
 * invalidate the cache for the specified memory range.
582
 * It starts at the physical address specified for
583
 * the given number of bytes.
584
 */
585
extern void cache_clear (unsigned long paddr, int len);
586
/*
587
 * push any dirty cache in the specified memory range.
588
 * It starts at the physical address specified for
589
 * the given number of bytes.
590
 */
591
extern void cache_push (unsigned long paddr, int len);
592
 
593
/*
594
 * push and invalidate pages in the specified user virtual
595
 * memory range.
596
 */
597
extern void cache_push_v (unsigned long vaddr, int len);
598
 
599
/* cache code */
600
#define FLUSH_I_AND_D   (0x00000808)
601
#define FLUSH_I         (0x00000008)
602
 
603
/* This is needed whenever the virtual mapping of the current
604
   process changes.  */
605
#define __flush_cache_all()                                             \
606
    do {                                                                \
607
        if (CPU_IS_040_OR_060)                                          \
608
               __asm__ __volatile__ ("nop; .word 0xf478\n" ::);         \
609
        else                                                            \
610
               __asm__ __volatile__ ("movec %%cacr,%%d0\n\t"            \
611
                                     "orw %0,%%d0\n\t"                  \
612
                                     "movec %%d0,%%cacr"                \
613
                                     : : "di" (FLUSH_I_AND_D) : "d0");  \
614
    } while (0)
615
 
616
#define __flush_cache_030()                                             \
617
    do {                                                                \
618
        if (CPU_IS_020_OR_030)                                  \
619
               __asm__ __volatile__ ("movec %%cacr,%%d0\n\t"            \
620
                                     "orw %0,%%d0\n\t"                  \
621
                                     "movec %%d0,%%cacr"                \
622
                                     : : "di" (FLUSH_I_AND_D) : "d0");  \
623
    } while (0)
624
 
625
#define flush_cache_all() __flush_cache_all()
626
 
627
extern inline void flush_cache_mm(struct mm_struct *mm)
628
{
629
#if FLUSH_VIRTUAL_CACHE_040
630
        if (mm == current->mm) __flush_cache_all();
631
#else
632
        if (mm == current->mm) __flush_cache_030();
633
#endif
634
}
635
 
636
extern inline void flush_cache_range(struct mm_struct *mm,
637
                                     unsigned long start,
638
                                     unsigned long end)
639
{
640
        if (mm == current->mm){
641
#if FLUSH_VIRTUAL_CACHE_040
642
            if (CPU_IS_040_OR_060)
643
                cache_push_v(start, end-start);
644
            else
645
#endif
646
                __flush_cache_030();
647
        }
648
}
649
 
650
extern inline void flush_cache_page(struct vm_area_struct *vma,
651
                                    unsigned long vmaddr)
652
{
653
        if (vma->vm_mm == current->mm){
654
#if FLUSH_VIRTUAL_CACHE_040
655
            if (CPU_IS_040_OR_060)
656
                cache_push_v(vmaddr, PAGE_SIZE);
657
            else
658
#endif
659
                __flush_cache_030();
660
        }
661
}
662
 
663
/* Push the page at kernel virtual address and clear the icache */
664
extern inline void flush_page_to_ram (unsigned long address)
665
{
666
    if (CPU_IS_040_OR_060) {
667
        register unsigned long tmp __asm ("a0") = VTOP(address);
668
        __asm__ __volatile__ ("nop\n\t"
669
                              ".word 0xf470 /* cpushp %%dc,(%0) */\n\t"
670
                              ".word 0xf490 /* cinvp %%ic,(%0) */"
671
                              : : "a" (tmp));
672
    }
673
    else
674
        __asm volatile ("movec %%cacr,%%d0\n\t"
675
                        "orw %0,%%d0\n\t"
676
                        "movec %%d0,%%cacr"
677
                        : : "di" (FLUSH_I) : "d0");
678
}
679
 
680
/* Push n pages at kernel virtual address and clear the icache */
681
extern inline void flush_pages_to_ram (unsigned long address, int n)
682
{
683
    if (CPU_IS_040_OR_060) {
684
        while (n--) {
685
            register unsigned long tmp __asm ("a0") = VTOP(address);
686
            __asm__ __volatile__ ("nop\n\t"
687
                                  ".word 0xf470 /* cpushp %%dc,(%0) */\n\t"
688
                                  ".word 0xf490 /* cinvp %%ic,(%0) */"
689
                                  : : "a" (tmp));
690
            address += PAGE_SIZE;
691
        }
692
    }
693
    else
694
        __asm volatile ("movec %%cacr,%%d0\n\t"
695
                        "orw %0,%%d0\n\t"
696
                        "movec %%d0,%%cacr"
697
                        : : "di" (FLUSH_I) : "d0");
698
}
699
 
700
/*
701
 * Check if the addr/len goes up to the end of a physical
702
 * memory chunk.  Used for DMA functions.
703
 */
704
int mm_end_of_chunk (unsigned long addr, int len);
705
 
706
/*
707
 * Map some physical address range into the kernel address space. The
708
 * code is copied and adapted from map_chunk().
709
 */
710
extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
711
                                int nocacheflag, unsigned long *memavailp );
712
/*
713
 * Change the cache mode of some kernel address range.
714
 */
715
extern void kernel_set_cachemode( unsigned long address, unsigned long size,
716
                                  unsigned cmode );
717
 
718
/* Values for nocacheflag and cmode */
719
#define KERNELMAP_FULL_CACHING          0
720
#define KERNELMAP_NOCACHE_SER           1
721
#define KERNELMAP_NOCACHE_NONSER        2
722
#define KERNELMAP_NO_COPYBACK           3
723
 
724
/*
725
 * The m68k doesn't have any external MMU info: the kernel page
726
 * tables contain all the necessary information.
727
 */
728
extern inline void update_mmu_cache(struct vm_area_struct * vma,
729
        unsigned long address, pte_t pte)
730
{
731
}
732
 
733
/*
734
 * I don't know what is going on here, but since these were changed,
735
 * swapping hasn't been working on the 68040.
736
 */
737
 
738
#define SWP_TYPE(entry)  (((entry) >> 2) & 0x7f)
739
#if 0
740
#define SWP_OFFSET(entry) ((entry) >> 9)
741
#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
742
#else
743
#define SWP_OFFSET(entry) ((entry) >> PAGE_SHIFT)
744
#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << PAGE_SHIFT))
745
#endif
746
 
747
 
748
#endif /* __ASSEMBLY__ */
749
 
750
#else /* NO_MM */
751
 
752
extern inline void flush_cache_mm(struct mm_struct *mm)
753
{
754
}
755
 
756
extern inline void flush_cache_range(struct mm_struct *mm,
757
                                     unsigned long start,
758
                                     unsigned long end)
759
{
760
}
761
 
762
/* Push the page at kernel virtual address and clear the icache */
763
extern inline void flush_page_to_ram (unsigned long address)
764
{
765
}
766
 
767
/* Push n pages at kernel virtual address and clear the icache */
768
extern inline void flush_pages_to_ram (unsigned long address, int n)
769
{
770
}
771
 
772
#endif /* NO_MM */
773
 
774
#endif /* _M68K_PGTABLE_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.