OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [include/] [asm-m68k/] [pgtable.h] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1633 jcastillo
#ifndef _M68K_PGTABLE_H
2
#define _M68K_PGTABLE_H
3
 
4
#ifndef __ASSEMBLY__
5
 
6
/*
7
 * This file contains the functions and defines necessary to modify and use
8
 * the m68k page table tree.
9
 */
10
 
11
#define __flush_tlb() \
12
do {    \
13
        if (m68k_is040or060) \
14
                __asm__ __volatile__(".word 0xf510\n"::); /* pflushan */ \
15
        else \
16
                __asm__ __volatile__("pflusha\n"::); \
17
} while (0)
18
 
19
static inline void __flush_tlb_one(unsigned long addr)
20
{
21
        if (m68k_is040or060) {
22
                register unsigned long a0 __asm__ ("a0") = addr;
23
                __asm__ __volatile__(".word 0xf508" /* pflush (%a0) */
24
                                     : : "a" (a0));
25
        } else
26
                __asm__ __volatile__("pflush #0,#0,(%0)" : : "a" (addr));
27
}
28
 
29
#define flush_tlb() __flush_tlb()
30
#define flush_tlb_all() flush_tlb()
31
 
32
static inline void flush_tlb_mm(struct mm_struct *mm)
33
{
34
        if (mm == current->mm)
35
                __flush_tlb();
36
}
37
 
38
static inline void flush_tlb_page(struct vm_area_struct *vma,
39
        unsigned long addr)
40
{
41
        if (vma->vm_mm == current->mm)
42
                __flush_tlb_one(addr);
43
}
44
 
45
static inline void flush_tlb_range(struct mm_struct *mm,
46
        unsigned long start, unsigned long end)
47
{
48
        if (mm == current->mm)
49
                __flush_tlb();
50
}
51
 
52
/* Certain architectures need to do special things when pte's
53
 * within a page table are directly modified.  Thus, the following
54
 * hook is made available.
55
 */
56
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
57
 
58
/* PMD_SHIFT determines the size of the area a second-level page table can map */
59
#define PMD_SHIFT       22
60
#define PMD_SIZE        (1UL << PMD_SHIFT)
61
#define PMD_MASK        (~(PMD_SIZE-1))
62
 
63
/* PGDIR_SHIFT determines what a third-level page table entry can map */
64
#define PGDIR_SHIFT     25
65
#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
66
#define PGDIR_MASK      (~(PGDIR_SIZE-1))
67
 
68
/*
69
 * entries per page directory level: the m68k is configured as three-level,
70
 * so we do have PMD level physically.
71
 */
72
#define PTRS_PER_PTE    1024
73
#define PTRS_PER_PMD    8
74
#define PTRS_PER_PGD    128
75
 
76
/* the no. of pointers that fit on a page: this will go away */
77
#define PTRS_PER_PAGE   (PAGE_SIZE/sizeof(void*))
78
 
79
typedef pgd_t pgd_table[PTRS_PER_PGD];
80
typedef pmd_t pmd_table[PTRS_PER_PMD];
81
typedef pte_t pte_table[PTRS_PER_PTE];
82
 
83
#define PGD_TABLES_PER_PAGE (PAGE_SIZE/sizeof(pgd_table))
84
#define PMD_TABLES_PER_PAGE (PAGE_SIZE/sizeof(pmd_table))
85
#define PTE_TABLES_PER_PAGE (PAGE_SIZE/sizeof(pte_table))
86
 
87
typedef pgd_table pgd_tablepage[PGD_TABLES_PER_PAGE];
88
typedef pmd_table pmd_tablepage[PMD_TABLES_PER_PAGE];
89
typedef pte_table pte_tablepage[PTE_TABLES_PER_PAGE];
90
 
91
/* Just any arbitrary offset to the start of the vmalloc VM area: the
92
 * current 8MB value just means that there will be a 8MB "hole" after the
93
 * physical memory until the kernel virtual memory starts.  That means that
94
 * any out-of-bounds memory accesses will hopefully be caught.
95
 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
96
 * area for the same reason. ;)
97
 */
98
#define VMALLOC_OFFSET  (8*1024*1024)
99
#define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
100
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
101
 
102
#endif /* __ASSEMBLY__ */
103
 
104
/*
105
 * Definitions for MMU descriptors
106
 */
107
#define _PAGE_PRESENT   0x001
108
#define _PAGE_SHORT     0x002
109
#define _PAGE_RONLY     0x004
110
#define _PAGE_ACCESSED  0x008
111
#define _PAGE_DIRTY     0x010
112
#define _PAGE_GLOBAL040 0x400   /* 68040 global bit, used for kva descs */
113
#define _PAGE_COW       0x800   /* implemented in software */
114
#define _PAGE_NOCACHE030 0x040  /* 68030 no-cache mode */
115
#define _PAGE_NOCACHE   0x060   /* 68040 cache mode, non-serialized */
116
#define _PAGE_NOCACHE_S 0x040   /* 68040 no-cache mode, serialized */
117
#define _PAGE_CACHE040  0x020   /* 68040 cache mode, cachable, copyback */
118
#define _PAGE_CACHE040W 0x000   /* 68040 cache mode, cachable, write-through */
119
 
120
#define _DESCTYPE_MASK  0x003
121
 
122
#define _CACHEMASK040   (~0x060)
123
#define _TABLE_MASK     (0xfffffff0)
124
 
125
#define _PAGE_TABLE     (_PAGE_SHORT)
126
#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
127
 
128
#ifndef __ASSEMBLY__
129
 
130
#define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
131
#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE040)
132
#define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
133
#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
134
#define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_CACHE040)
135
 
136
/*
137
 * The m68k can't do page protection for execute, and considers that the same are read.
138
 * Also, write permissions imply read permissions. This is the closest we can get..
139
 */
140
#define __P000  PAGE_NONE
141
#define __P001  PAGE_READONLY
142
#define __P010  PAGE_COPY
143
#define __P011  PAGE_COPY
144
#define __P100  PAGE_READONLY
145
#define __P101  PAGE_READONLY
146
#define __P110  PAGE_COPY
147
#define __P111  PAGE_COPY
148
 
149
#define __S000  PAGE_NONE
150
#define __S001  PAGE_READONLY
151
#define __S010  PAGE_SHARED
152
#define __S011  PAGE_SHARED
153
#define __S100  PAGE_READONLY
154
#define __S101  PAGE_READONLY
155
#define __S110  PAGE_SHARED
156
#define __S111  PAGE_SHARED
157
 
158
/* zero page used for uninitialized stuff */
159
extern unsigned long empty_zero_page;
160
 
161
/*
162
 * BAD_PAGETABLE is used when we need a bogus page-table, while
163
 * BAD_PAGE is used for a bogus page.
164
 *
165
 * ZERO_PAGE is a global shared page that is always zero: used
166
 * for zero-mapped memory areas etc..
167
 */
168
extern pte_t __bad_page(void);
169
extern pte_t * __bad_pagetable(void);
170
 
171
#define BAD_PAGETABLE __bad_pagetable()
172
#define BAD_PAGE __bad_page()
173
#define ZERO_PAGE empty_zero_page
174
 
175
/* number of bits that fit into a memory pointer */
176
#define BITS_PER_PTR                    (8*sizeof(unsigned long))
177
 
178
/* to align the pointer to a pointer address */
179
#define PTR_MASK                        (~(sizeof(void*)-1))
180
 
181
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
182
/* 64-bit machines, beware!  SRB. */
183
#define SIZEOF_PTR_LOG2                 2
184
 
185
/* to find an entry in a page-table */
186
#define PAGE_PTR(address) \
187
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
188
 
189
extern unsigned long high_memory;
190
 
191
/* For virtual address to physical address conversion */
192
extern unsigned long mm_vtop(unsigned long addr) __attribute__ ((const));
193
extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
194
#define VTOP(addr)  (mm_vtop((unsigned long)(addr)))
195
#define PTOV(addr)  (mm_ptov((unsigned long)(addr)))
196
 
197
/*
198
 * Conversion functions: convert a page and protection to a page entry,
199
 * and a page entry and page directory to the page they refer to.
200
 */
201
extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
202
{ pte_t pte; pte_val(pte) = VTOP(page) | pgprot_val(pgprot); return pte; }
203
 
204
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
205
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
206
 
207
extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
208
{
209
        int i;
210
 
211
        ptep = (pte_t *) VTOP(ptep);
212
        for (i = 0; i < 16; i++, ptep += PTRS_PER_PTE/16)
213
                pmdp->pmd[i] = _PAGE_TABLE | (unsigned long)ptep;
214
}
215
 
216
/* early termination version of the above */
217
extern inline void pmd_set_et(pmd_t * pmdp, pte_t * ptep)
218
{
219
        int i;
220
 
221
        ptep = (pte_t *) VTOP(ptep);
222
        for (i = 0; i < 16; i++, ptep += PTRS_PER_PTE/16)
223
                pmdp->pmd[i] = _PAGE_PRESENT | (unsigned long)ptep;
224
}
225
 
226
extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
227
{ pgd_val(*pgdp) = _PAGE_TABLE | VTOP(pmdp); }
228
 
229
extern inline unsigned long pte_page(pte_t pte)
230
{ return PTOV(pte_val(pte) & PAGE_MASK); }
231
 
232
extern inline unsigned long pmd_page2(pmd_t *pmd)
233
{ return PTOV(pmd_val(*pmd) & _TABLE_MASK); }
234
#define pmd_page(pmd) pmd_page2(&(pmd))
235
 
236
extern inline unsigned long pgd_page(pgd_t pgd)
237
{ return PTOV(pgd_val(pgd) & _TABLE_MASK); }
238
 
239
extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
240
extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_PRESENT; }
241
extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
242
 
243
extern inline int pmd_none2(pmd_t *pmd)         { return !pmd_val(*pmd); }
244
#define pmd_none(pmd) pmd_none2(&(pmd))
245
extern inline int pmd_bad2(pmd_t *pmd)          { return (pmd_val(*pmd) & _DESCTYPE_MASK) != _PAGE_TABLE || pmd_page(*pmd) > high_memory; }
246
#define pmd_bad(pmd) pmd_bad2(&(pmd))
247
extern inline int pmd_present2(pmd_t *pmd)      { return pmd_val(*pmd) & _PAGE_TABLE; }
248
#define pmd_present(pmd) pmd_present2(&(pmd))
249
extern inline void pmd_clear(pmd_t * pmdp)
250
{
251
        short i;
252
 
253
        for (i = 15; i >= 0; i--)
254
                pmdp->pmd[i] = 0;
255
}
256
 
257
extern inline int pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
258
extern inline int pgd_bad(pgd_t pgd)            { return (pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
259
extern inline int pgd_present(pgd_t pgd)        { return pgd_val(pgd) & _PAGE_TABLE; }
260
 
261
extern inline void pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
262
 
263
/*
264
 * The following only work if pte_present() is true.
265
 * Undefined behaviour if not..
266
 */
267
extern inline int pte_read(pte_t pte)           { return 1; }
268
extern inline int pte_write(pte_t pte)          { return !(pte_val(pte) & _PAGE_RONLY); }
269
extern inline int pte_exec(pte_t pte)           { return 1; }
270
extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
271
extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
272
 
273
extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_RONLY; return pte; }
274
extern inline pte_t pte_rdprotect(pte_t pte)    { return pte; }
275
extern inline pte_t pte_exprotect(pte_t pte)    { return pte; }
276
extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
277
extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
278
extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
279
extern inline pte_t pte_mkread(pte_t pte)       { return pte; }
280
extern inline pte_t pte_mkexec(pte_t pte)       { return pte; }
281
extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= _PAGE_DIRTY; return pte; }
282
extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
283
extern inline pte_t pte_mknocache(pte_t pte)
284
{
285
        pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode;
286
        return pte;
287
}
288
extern inline pte_t pte_mkcache(pte_t pte)      { pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | _PAGE_CACHE040; return pte; }
289
 
290
/* to set the page-dir */
291
extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
292
{
293
        tsk->tss.pagedir_v = (unsigned long *)pgdir;
294
        tsk->tss.pagedir_p = VTOP(pgdir);
295
        tsk->tss.crp[0] = 0x80000000 | _PAGE_SHORT;
296
        tsk->tss.crp[1] = tsk->tss.pagedir_p;
297
        if (tsk == current) {
298
                if (m68k_is040or060)
299
                        __asm__ __volatile__ (".word 0xf510\n\t" /* pflushan */
300
                                              "movel %0@,%/d0\n\t"
301
                                              ".long 0x4e7b0806\n\t"
302
                                              /* movec d0,urp */
303
                                              : : "a" (&tsk->tss.crp[1])
304
                                              : "d0");
305
                else
306
                        __asm__ __volatile__ ("movec  %/cacr,%/d0\n\t"
307
                                              "oriw #0x0808,%/d0\n\t"
308
                                              "movec %/d0,%/cacr\n\t"
309
                                              "pmove %0@,%/crp\n\t"
310
                                              : : "a" (&tsk->tss.crp[0])
311
                                              : "d0");
312
        }
313
}
314
 
315
#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
316
 
317
/* to find an entry in a page-table-directory */
318
extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
319
{
320
        return mm->pgd + (address >> PGDIR_SHIFT);
321
}
322
 
323
extern pgd_t swapper_pg_dir[128];
324
extern pgd_t kernel_pg_dir[128];
325
 
326
extern inline pgd_t * pgd_offset_k(unsigned long address)
327
{
328
        return kernel_pg_dir + (address >> PGDIR_SHIFT);
329
}
330
 
331
 
332
/* Find an entry in the second-level page table.. */
333
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
334
{
335
        return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
336
}
337
 
338
/* Find an entry in the third-level page table.. */
339
extern inline pte_t * pte_offset(pmd_t * pmdp, unsigned long address)
340
{
341
        return (pte_t *) pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
342
}
343
 
344
/*
345
 * Allocate and free page tables. The xxx_kernel() versions are
346
 * used to allocate a kernel page table - this turns on ASN bits
347
 * if any.
348
 */
349
 
350
extern inline void nocache_page (unsigned long vaddr)
351
{
352
        if (m68k_is040or060) {
353
                pgd_t *dir;
354
                pmd_t *pmdp;
355
                pte_t *ptep;
356
 
357
                dir = pgd_offset_k(vaddr);
358
                pmdp = pmd_offset(dir,vaddr);
359
                ptep = pte_offset(pmdp,vaddr);
360
                *ptep = pte_mknocache(*ptep);
361
        }
362
}
363
 
364
static inline void cache_page (unsigned long vaddr)
365
{
366
        if (m68k_is040or060) {
367
                pgd_t *dir;
368
                pmd_t *pmdp;
369
                pte_t *ptep;
370
 
371
                dir = pgd_offset_k(vaddr);
372
                pmdp = pmd_offset(dir,vaddr);
373
                ptep = pte_offset(pmdp,vaddr);
374
                *ptep = pte_mkcache(*ptep);
375
        }
376
}
377
 
378
 
379
extern const char PgtabStr_bad_pmd[];
380
extern const char PgtabStr_bad_pgd[];
381
extern const char PgtabStr_bad_pmdk[];
382
extern const char PgtabStr_bad_pgdk[];
383
 
384
extern inline void pte_free(pte_t * pte)
385
{
386
        cache_page((unsigned long)pte);
387
        free_page((unsigned long) pte);
388
}
389
 
390
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
391
{
392
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
393
        if (pmd_none(*pmd)) {
394
                pte_t * page = (pte_t *)get_free_page(GFP_KERNEL);
395
                if (pmd_none(*pmd)) {
396
                        if (page) {
397
                                nocache_page((unsigned long)page);
398
                                pmd_set(pmd,page);
399
                                return page + address;
400
                        }
401
                        pmd_set(pmd, BAD_PAGETABLE);
402
                        return NULL;
403
                }
404
                free_page((unsigned long)page);
405
        }
406
        if (pmd_bad(*pmd)) {
407
                printk(PgtabStr_bad_pmd, pmd_val(*pmd));
408
                pmd_set(pmd, BAD_PAGETABLE);
409
                return NULL;
410
        }
411
        return (pte_t *) pmd_page(*pmd) + address;
412
}
413
 
414
extern pmd_t *get_pointer_table (void);
415
extern void free_pointer_table (pmd_t *);
416
extern pmd_t *get_kpointer_table (void);
417
extern void free_kpointer_table (pmd_t *);
418
 
419
extern inline void pmd_free(pmd_t * pmd)
420
{
421
        free_pointer_table (pmd);
422
}
423
 
424
extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
425
{
426
        address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
427
        if (pgd_none(*pgd)) {
428
                pmd_t *page = get_pointer_table();
429
                if (pgd_none(*pgd)) {
430
                        if (page) {
431
                                pgd_set(pgd, page);
432
                                return page + address;
433
                        }
434
                        pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
435
                        return NULL;
436
                }
437
                free_pointer_table(page);
438
        }
439
        if (pgd_bad(*pgd)) {
440
                printk(PgtabStr_bad_pgd, pgd_val(*pgd));
441
                pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
442
                return NULL;
443
        }
444
        return (pmd_t *) pgd_page(*pgd) + address;
445
}
446
 
447
extern inline void pte_free_kernel(pte_t * pte)
448
{
449
        cache_page((unsigned long)pte);
450
        free_page((unsigned long) pte);
451
}
452
 
453
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
454
{
455
        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
456
        if (pmd_none(*pmd)) {
457
                pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
458
                if (pmd_none(*pmd)) {
459
                        if (page) {
460
                                nocache_page((unsigned long)page);
461
                                pmd_set(pmd, page);
462
                                return page + address;
463
                        }
464
                        pmd_set(pmd, BAD_PAGETABLE);
465
                        return NULL;
466
                }
467
                free_page((unsigned long) page);
468
        }
469
        if (pmd_bad(*pmd)) {
470
                printk(PgtabStr_bad_pmdk, pmd_val(*pmd));
471
                pmd_set(pmd, BAD_PAGETABLE);
472
                return NULL;
473
        }
474
        return (pte_t *) pmd_page(*pmd) + address;
475
}
476
 
477
extern inline void pmd_free_kernel(pmd_t * pmd)
478
{
479
        free_kpointer_table(pmd);
480
}
481
 
482
extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
483
{
484
        address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
485
        if (pgd_none(*pgd)) {
486
                pmd_t *page = get_kpointer_table();
487
                if (pgd_none(*pgd)) {
488
                        if (page) {
489
                                pgd_set(pgd, page);
490
                                return page + address;
491
                        }
492
                        pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
493
                        return NULL;
494
                }
495
                free_kpointer_table(page);
496
        }
497
        if (pgd_bad(*pgd)) {
498
                printk(PgtabStr_bad_pgdk, pgd_val(*pgd));
499
                pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
500
                return NULL;
501
        }
502
        return (pmd_t *) pgd_page(*pgd) + address;
503
}
504
 
505
extern inline void pgd_free(pgd_t * pgd)
506
{
507
        free_pointer_table ((pmd_t *) pgd);
508
}
509
 
510
extern inline pgd_t * pgd_alloc(void)
511
{
512
        return (pgd_t *)get_pointer_table ();
513
}
514
 
515
#define flush_icache() \
516
do { \
517
        if (m68k_is040or060) \
518
                asm ("nop; .word 0xf498 /* cinva %%ic */"); \
519
        else \
520
                asm ("movec %/cacr,%/d0;" \
521
                     "oriw %0,%/d0;" \
522
                     "movec %/d0,%/cacr" \
523
                     : /* no outputs */ \
524
                     : "i" (FLUSH_I) \
525
                     : "d0"); \
526
} while (0)
527
 
528
/*
529
 * invalidate the cache for the specified memory range.
530
 * It starts at the physical address specified for
531
 * the given number of bytes.
532
 */
533
extern void cache_clear (unsigned long paddr, int len);
534
/*
535
 * push any dirty cache in the specified memory range.
536
 * It starts at the physical address specified for
537
 * the given number of bytes.
538
 */
539
extern void cache_push (unsigned long paddr, int len);
540
 
541
/*
542
 * push and invalidate pages in the specified user virtual
543
 * memory range.
544
 */
545
extern void cache_push_v (unsigned long vaddr, int len);
546
 
547
/* cache code */
548
#define FLUSH_I_AND_D   (0x00000808)
549
#define FLUSH_I         (0x00000008)
550
 
551
/* This is needed whenever the virtual mapping of the current
552
   process changes.  */
553
#define __flush_cache_all()                                             \
554
    do {                                                                \
555
        if (m68k_is040or060)                                            \
556
               __asm__ __volatile__ ("nop; .word 0xf478\n" ::);         \
557
        else                                                            \
558
               __asm__ __volatile__ ("movec %%cacr,%%d0\n\t"            \
559
                                     "orw %0,%%d0\n\t"                  \
560
                                     "movec %%d0,%%cacr"                \
561
                                     : : "di" (FLUSH_I_AND_D) : "d0");  \
562
    } while (0)
563
 
564
#define __flush_cache_030()                                             \
565
    do {                                                                \
566
        if (m68k_is040or060 == 0)                                        \
567
               __asm__ __volatile__ ("movec %%cacr,%%d0\n\t"            \
568
                                     "orw %0,%%d0\n\t"                  \
569
                                     "movec %%d0,%%cacr"                \
570
                                     : : "di" (FLUSH_I_AND_D) : "d0");  \
571
    } while (0)
572
 
573
#define flush_cache_all() __flush_cache_all()
574
 
575
extern inline void flush_cache_mm(struct mm_struct *mm)
576
{
577
        if (mm == current->mm) __flush_cache_all();
578
}
579
 
580
extern inline void flush_cache_range(struct mm_struct *mm,
581
                                     unsigned long start,
582
                                     unsigned long end)
583
{
584
        if (mm == current->mm){
585
            if (m68k_is040or060)
586
                cache_push_v(start, end-start);
587
            else
588
                __flush_cache_030();
589
        }
590
}
591
 
592
extern inline void flush_cache_page(struct vm_area_struct *vma,
593
                                    unsigned long vmaddr)
594
{
595
        if (vma->vm_mm == current->mm){
596
            if (m68k_is040or060)
597
                cache_push_v(vmaddr, PAGE_SIZE);
598
            else
599
                __flush_cache_030();
600
        }
601
}
602
 
603
/* Push the page at kernel virtual address and clear the icache */
604
extern inline void flush_page_to_ram (unsigned long address)
605
{
606
    if (m68k_is040or060) {
607
        register unsigned long tmp __asm ("a0") = VTOP(address);
608
        __asm__ __volatile__ ("nop\n\t"
609
                              ".word 0xf470 /* cpushp %%dc,(%0) */\n\t"
610
                              ".word 0xf490 /* cinvp %%ic,(%0) */"
611
                              : : "a" (tmp));
612
    }
613
    else
614
        __asm volatile ("movec %%cacr,%%d0\n\t"
615
                        "orw %0,%%d0\n\t"
616
                        "movec %%d0,%%cacr"
617
                        : : "di" (FLUSH_I) : "d0");
618
}
619
 
620
/* Push n pages at kernel virtual address and clear the icache */
621
extern inline void flush_pages_to_ram (unsigned long address, int n)
622
{
623
    if (m68k_is040or060) {
624
        while (n--) {
625
            register unsigned long tmp __asm ("a0") = VTOP(address);
626
            __asm__ __volatile__ ("nop\n\t"
627
                                  ".word 0xf470 /* cpushp %%dc,(%0) */\n\t"
628
                                  ".word 0xf490 /* cinvp %%ic,(%0) */"
629
                                  : : "a" (tmp));
630
            address += PAGE_SIZE;
631
        }
632
    }
633
    else
634
        __asm volatile ("movec %%cacr,%%d0\n\t"
635
                        "orw %0,%%d0\n\t"
636
                        "movec %%d0,%%cacr"
637
                        : : "di" (FLUSH_I) : "d0");
638
}
639
 
640
/*
641
 * Check if the addr/len goes up to the end of a physical
642
 * memory chunk.  Used for DMA functions.
643
 */
644
int mm_end_of_chunk (unsigned long addr, int len);
645
 
646
/*
647
 * Map some physical address range into the kernel address space. The
648
 * code is copied and adapted from map_chunk().
649
 */
650
extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
651
                                int nocacheflag, unsigned long *memavailp );
652
/*
653
 * Change the cache mode of some kernel address range.
654
 */
655
extern void kernel_set_cachemode( unsigned long address, unsigned long size,
656
                                  unsigned cmode );
657
 
658
/* Values for nocacheflag and cmode */
659
#define KERNELMAP_FULL_CACHING          0
660
#define KERNELMAP_NOCACHE_SER           1
661
#define KERNELMAP_NOCACHE_NONSER        2
662
#define KERNELMAP_NO_COPYBACK           3
663
 
664
/*
665
 * The m68k doesn't have any external MMU info: the kernel page
666
 * tables contain all the necessary information.
667
 */
668
extern inline void update_mmu_cache(struct vm_area_struct * vma,
669
        unsigned long address, pte_t pte)
670
{
671
}
672
 
673
/*
674
 * I don't know what is going on here, but since these were changed,
675
 * swapping hasn't been working on the 68040.
676
 */
677
 
678
#define SWP_TYPE(entry)  (((entry) >> 2) & 0x7f)
679
#if 0
680
#define SWP_OFFSET(entry) ((entry) >> 9)
681
#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
682
#else
683
#define SWP_OFFSET(entry) ((entry) >> PAGE_SHIFT)
684
#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << PAGE_SHIFT))
685
#endif
686
 
687
#endif /* __ASSEMBLY__ */
688
 
689
#endif /* _M68K_PGTABLE_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.