OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [arch/] [m68k/] [mm/] [memory.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1623 jcastillo
/*
2
 *  linux/arch/m68k/mm/memory.c
3
 *
4
 *  Copyright (C) 1995  Hamish Macdonald
5
 */
6
 
7
#include <linux/mm.h>
8
#include <linux/kernel.h>
9
#include <linux/string.h>
10
#include <linux/types.h>
11
#include <linux/malloc.h>
12
 
13
#include <asm/segment.h>
14
#include <asm/page.h>
15
#include <asm/pgtable.h>
16
#include <asm/system.h>
17
#include <asm/traps.h>
18
#include <asm/amigahw.h>
19
#include <asm/bootinfo.h>
20
 
21
extern pte_t *kernel_page_table (unsigned long *memavailp);
22
 
23
/* Strings for `extern inline' functions in <asm/pgtable.h>.  If put
24
   directly into these functions, they are output for every file that
25
   includes pgtable.h */
26
 
27
const char PgtabStr_bad_pmd[] = "Bad pmd in pte_alloc: %08lx\n";
28
const char PgtabStr_bad_pgd[] = "Bad pgd in pmd_alloc: %08lx\n";
29
const char PgtabStr_bad_pmdk[] = "Bad pmd in pte_alloc_kernel: %08lx\n";
30
const char PgtabStr_bad_pgdk[] = "Bad pgd in pmd_alloc_kernel: %08lx\n";
31
 
32
static struct ptable_desc {
33
        struct ptable_desc *prev;
34
        struct ptable_desc *next;
35
        unsigned long      page;
36
        unsigned char      alloced;
37
} ptable_list = { &ptable_list, &ptable_list, 0, 0xff };
38
 
39
#define PD_NONEFREE(dp) ((dp)->alloced == 0xff)
40
#define PD_ALLFREE(dp) ((dp)->alloced == 0)
41
#define PD_TABLEFREE(dp,i) (!((dp)->alloced & (1<<(i))))
42
#define PD_MARKUSED(dp,i) ((dp)->alloced |= (1<<(i)))
43
#define PD_MARKFREE(dp,i) ((dp)->alloced &= ~(1<<(i)))
44
 
45
#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
46
 
47
pmd_t *get_pointer_table (void)
48
{
49
        pmd_t *pmdp = NULL;
50
        unsigned long flags;
51
        struct ptable_desc *dp = ptable_list.next;
52
        int i;
53
 
54
        /*
55
         * For a pointer table for a user process address space, a
56
         * table is taken from a page allocated for the purpose.  Each
57
         * page can hold 8 pointer tables.  The page is remapped in
58
         * virtual address space to be noncacheable.
59
         */
60
        if (PD_NONEFREE (dp)) {
61
 
62
                if (!(dp = kmalloc (sizeof(struct ptable_desc),GFP_KERNEL))) {
63
                        return 0;
64
                }
65
 
66
                if (!(dp->page = __get_free_page (GFP_KERNEL))) {
67
                        kfree (dp);
68
                        return 0;
69
                }
70
 
71
                nocache_page (dp->page);
72
 
73
                dp->alloced = 0;
74
                /* put at head of list */
75
                save_flags(flags);
76
                cli();
77
                dp->next = ptable_list.next;
78
                dp->prev = ptable_list.next->prev;
79
                ptable_list.next->prev = dp;
80
                ptable_list.next = dp;
81
                restore_flags(flags);
82
        }
83
 
84
        for (i = 0; i < 8; i++)
85
                if (PD_TABLEFREE (dp, i)) {
86
                        PD_MARKUSED (dp, i);
87
                        pmdp = (pmd_t *)(dp->page + PTABLE_SIZE*i);
88
                        break;
89
                }
90
 
91
        if (PD_NONEFREE (dp)) {
92
                /* move to end of list */
93
                save_flags(flags);
94
                cli();
95
                dp->prev->next = dp->next;
96
                dp->next->prev = dp->prev;
97
 
98
                dp->next = ptable_list.next->prev;
99
                dp->prev = ptable_list.prev;
100
                ptable_list.prev->next = dp;
101
                ptable_list.prev = dp;
102
                restore_flags(flags);
103
        }
104
 
105
        memset (pmdp, 0, PTABLE_SIZE);
106
 
107
        return pmdp;
108
}
109
 
110
void free_pointer_table (pmd_t *ptable)
111
{
112
        struct ptable_desc *dp;
113
        unsigned long page = (unsigned long)ptable & PAGE_MASK;
114
        int index = ((unsigned long)ptable - page)/PTABLE_SIZE;
115
        unsigned long flags;
116
 
117
        for (dp = ptable_list.next; dp->page && dp->page != page; dp = dp->next)
118
                ;
119
 
120
        if (!dp->page)
121
                panic ("unable to find desc for ptable %p on list!", ptable);
122
 
123
        if (PD_TABLEFREE (dp, index))
124
                panic ("table already free!");
125
 
126
        PD_MARKFREE (dp, index);
127
 
128
        if (PD_ALLFREE (dp)) {
129
                /* all tables in page are free, free page */
130
                save_flags(flags);
131
                cli();
132
                dp->prev->next = dp->next;
133
                dp->next->prev = dp->prev;
134
                restore_flags(flags);
135
                cache_page (dp->page);
136
                free_page (dp->page);
137
                kfree (dp);
138
                return;
139
        } else {
140
                /*
141
                 * move this descriptor to the front of the list, since
142
                 * it has one or more free tables.
143
                 */
144
                save_flags(flags);
145
                cli();
146
                dp->prev->next = dp->next;
147
                dp->next->prev = dp->prev;
148
 
149
                dp->next = ptable_list.next;
150
                dp->prev = ptable_list.next->prev;
151
                ptable_list.next->prev = dp;
152
                ptable_list.next = dp;
153
                restore_flags(flags);
154
        }
155
}
156
 
157
/* maximum pages used for kpointer tables */
158
#define KPTR_PAGES      4
159
/* # of reserved slots */
160
#define RESERVED_KPTR   4
161
extern pmd_tablepage kernel_pmd_table; /* reserved in head.S */
162
 
163
static struct kpointer_pages {
164
        pmd_tablepage *page[KPTR_PAGES];
165
        u_char alloced[KPTR_PAGES];
166
} kptr_pages;
167
 
168
void init_kpointer_table(void) {
169
        short i = KPTR_PAGES-1;
170
 
171
        /* first page is reserved in head.S */
172
        kptr_pages.page[i] = &kernel_pmd_table;
173
        kptr_pages.alloced[i] = ~(0xff>>RESERVED_KPTR);
174
        for (i--; i>=0; i--) {
175
                kptr_pages.page[i] = NULL;
176
                kptr_pages.alloced[i] = 0;
177
        }
178
}
179
 
180
pmd_t *get_kpointer_table (void)
181
{
182
        /* For pointer tables for the kernel virtual address space,
183
         * use the page that is reserved in head.S that can hold up to
184
         * 8 pointer tables. 3 of these tables are always reserved
185
         * (kernel_pg_dir, swapper_pg_dir and kernel pointer table for
186
         * the first 16 MB of RAM). In addition, the 4th pointer table
187
         * in this page is reserved. On Amiga and Atari, it is used to
188
         * map in the hardware registers. It may be used for other
189
         * purposes on other 68k machines. This leaves 4 pointer tables
190
         * available for use by the kernel. 1 of them are usually used
191
         * for the vmalloc tables. This allows mapping of 3 * 32 = 96 MB
192
         * of physical memory. But these pointer tables are also used
193
         * for other purposes, like kernel_map(), so further pages can
194
         * now be allocated.
195
         */
196
        pmd_tablepage *page;
197
        pmd_table *table;
198
        long nr, offset = -8;
199
        short i;
200
 
201
        for (i=KPTR_PAGES-1; i>=0; i--) {
202
                asm volatile("bfffo %1{%2,#8},%0"
203
                        : "=d" (nr)
204
                        : "d" ((u_char)~kptr_pages.alloced[i]), "d" (offset));
205
                if (nr)
206
                        break;
207
        }
208
        if (i < 0) {
209
                printk("No space for kernel pointer table!\n");
210
                return NULL;
211
        }
212
        if (!(page = kptr_pages.page[i])) {
213
                if (!(page = (pmd_tablepage *)__get_free_page(GFP_KERNEL))) {
214
                        printk("No space for kernel pointer table!\n");
215
                        return NULL;
216
                }
217
                nocache_page((u_long)(kptr_pages.page[i] = page));
218
        }
219
        asm volatile("bfset %0@{%1,#1}"
220
                : /* no output */
221
                : "a" (&kptr_pages.alloced[i]), "d" (nr-offset));
222
        table = &(*page)[nr-offset];
223
        memset(table, 0, sizeof(pmd_table));
224
        return ((pmd_t *)table);
225
}
226
 
227
void free_kpointer_table (pmd_t *pmdp)
228
{
229
        pmd_table *table = (pmd_table *)pmdp;
230
        pmd_tablepage *page = (pmd_tablepage *)((u_long)table & PAGE_MASK);
231
        long nr;
232
        short i;
233
 
234
        for (i=KPTR_PAGES-1; i>=0; i--) {
235
                if (kptr_pages.page[i] == page)
236
                        break;
237
        }
238
        nr = ((u_long)table - (u_long)page) / sizeof(pmd_table);
239
        if (!table || i < 0 || (i == KPTR_PAGES-1 && nr < RESERVED_KPTR)) {
240
                printk("Attempt to free invalid kernel pointer table: %p\n", table);
241
                return;
242
        }
243
        asm volatile("bfclr %0@{%1,#1}"
244
                : /* no output */
245
                : "a" (&kptr_pages.alloced[i]), "d" (nr));
246
        if (!kptr_pages.alloced[i]) {
247
                kptr_pages.page[i] = 0;
248
                cache_page ((u_long)page);
249
                free_page ((u_long)page);
250
        }
251
}
252
 
253
/*
254
 * The following two routines map from a physical address to a kernel
255
 * virtual address and vice versa.
256
 */
257
unsigned long mm_vtop (unsigned long vaddr)
258
{
259
        int i;
260
        unsigned long voff = vaddr;
261
        unsigned long offset = 0;
262
 
263
        for (i = 0; i < boot_info.num_memory; i++)
264
        {
265
                if (voff < offset + boot_info.memory[i].size) {
266
#ifdef DEBUGPV
267
                        printk ("VTOP(%lx)=%lx\n", vaddr,
268
                                boot_info.memory[i].addr + voff - offset);
269
#endif
270
                        return boot_info.memory[i].addr + voff - offset;
271
                } else
272
                        offset += boot_info.memory[i].size;
273
        }
274
 
275
        /* not in one of the memory chunks; get the actual
276
         * physical address from the MMU.
277
         */
278
        if (m68k_is040or060 == 6) {
279
          unsigned long fs = get_fs();
280
          unsigned long  paddr;
281
 
282
          set_fs (SUPER_DATA);
283
 
284
          /* The PLPAR instruction causes an access error if the translation
285
           * is not possible. We don't catch that here, so a bad kernel trap
286
           * will be reported in this case. */
287
          asm volatile ("movel %1,%/a0\n\t"
288
                        ".word 0xf5c8\n\t"      /* plpar (a0) */
289
                        "movel %/a0,%0"
290
                        : "=g" (paddr)
291
                        : "g" (vaddr)
292
                        : "a0" );
293
          set_fs (fs);
294
 
295
          return paddr;
296
 
297
        } else if (m68k_is040or060 == 4) {
298
          unsigned long mmusr;
299
          unsigned long fs = get_fs();
300
 
301
          set_fs (SUPER_DATA);
302
 
303
          asm volatile ("movel %1,%/a0\n\t"
304
                        ".word 0xf568\n\t"      /* ptestr (a0) */
305
                        ".long 0x4e7a8805\n\t"  /* movec mmusr, a0 */
306
                        "movel %/a0,%0"
307
                        : "=g" (mmusr)
308
                        : "g" (vaddr)
309
                        : "a0", "d0");
310
          set_fs (fs);
311
 
312
          if (mmusr & MMU_R_040)
313
            return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
314
 
315
          panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr);
316
        } else {
317
          volatile unsigned short temp;
318
          unsigned short mmusr;
319
          unsigned long *descaddr;
320
 
321
          asm volatile ("ptestr #5,%2@,#7,%0\n\t"
322
                        "pmove %/psr,%1@"
323
                        : "=a&" (descaddr)
324
                        : "a" (&temp), "a" (vaddr));
325
          mmusr = temp;
326
 
327
          if (mmusr & (MMU_I|MMU_B|MMU_L))
328
            panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr);
329
 
330
          descaddr = (unsigned long *)PTOV(descaddr);
331
 
332
          switch (mmusr & MMU_NUM) {
333
          case 1:
334
            return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
335
          case 2:
336
            return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
337
          case 3:
338
            return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
339
          default:
340
            panic ("VTOP: bad levels (%u) for virtual address %08lx",
341
                   mmusr & MMU_NUM, vaddr);
342
          }
343
        }
344
 
345
        panic ("VTOP: bad virtual address %08lx", vaddr);
346
}
347
 
348
unsigned long mm_ptov (unsigned long paddr)
349
{
350
        int i;
351
        unsigned long offset = 0;
352
 
353
        for (i = 0; i < boot_info.num_memory; i++)
354
        {
355
                if (paddr >= boot_info.memory[i].addr &&
356
                    paddr < (boot_info.memory[i].addr
357
                             + boot_info.memory[i].size)) {
358
#ifdef DEBUGPV
359
                        printk ("PTOV(%lx)=%lx\n", paddr,
360
                                (paddr - boot_info.memory[i].addr) + offset);
361
#endif
362
                        return (paddr - boot_info.memory[i].addr) + offset;
363
                } else
364
                        offset += boot_info.memory[i].size;
365
        }
366
 
367
        /*
368
         * assume that the kernel virtual address is the same as the
369
         * physical address.
370
         *
371
         * This should be reasonable in most situations:
372
         *  1) They shouldn't be dereferencing the virtual address
373
         *     unless they are sure that it is valid from kernel space.
374
         *  2) The only usage I see so far is converting a page table
375
         *     reference to some non-FASTMEM address space when freeing
376
         *     mmaped "/dev/mem" pages.  These addresses are just passed
377
         *     to "free_page", which ignores addresses that aren't in
378
         *     the memory list anyway.
379
         *
380
         */
381
 
382
        /*
383
         * if on an amiga and address is in first 16M, move it
384
         * to the ZTWO_ADDR range
385
         */
386
        if (MACH_IS_AMIGA && paddr < 16*1024*1024)
387
                return ZTWO_VADDR(paddr);
388
        return paddr;
389
}
390
 
391
/* invalidate page in both caches */
392
#define clear040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
393
                                              "nop\n\t"\
394
                                              ".word 0xf4d0"\
395
                                              /* CINVP I/D (a0) */\
396
                                              : : "g" ((paddr))\
397
                                              : "a0")
398
 
399
/* invalidate page in i-cache */
400
#define cleari040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
401
                                               /* CINVP I (a0) */\
402
                                               "nop\n\t"\
403
                                               ".word 0xf490"\
404
                                               : : "g" ((paddr))\
405
                                               : "a0")
406
 
407
/* push page in both caches */
408
#define push040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
409
                                              "nop\n\t"\
410
                                             ".word 0xf4f0"\
411
                                             /* CPUSHP I/D (a0) */\
412
                                             : : "g" ((paddr))\
413
                                             : "a0")
414
 
415
/* push and invalidate page in both caches */
416
#define pushcl040(paddr) do { push040((paddr));\
417
                              if (m68k_is040or060 == 6) clear040((paddr));\
418
                         } while(0)
419
 
420
/* push page in both caches, invalidate in i-cache */
421
#define pushcli040(paddr) do { push040((paddr));\
422
                               if (m68k_is040or060 == 6) cleari040((paddr));\
423
                          } while(0)
424
 
425
/* push page defined by virtual address in both caches */
426
#define pushv040(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
427
                                              /* ptestr (a0) */\
428
                                              ".word 0xf568\n\t"\
429
                                              /* movec mmusr,d0 */\
430
                                              ".long 0x4e7a0805\n\t"\
431
                                              "andw #0xf000,%/d0\n\t"\
432
                                              "movel %/d0,%/a0\n\t"\
433
                                              /* CPUSHP I/D (a0) */\
434
                                              "nop\n\t"\
435
                                              ".word 0xf4f0"\
436
                                              : : "g" ((vaddr))\
437
                                              : "a0", "d0")
438
 
439
/* push page defined by virtual address in both caches */
440
#define pushv060(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
441
                                              /* plpar (a0) */\
442
                                              ".word 0xf5c8\n\t"\
443
                                              /* CPUSHP I/D (a0) */\
444
                                              ".word 0xf4f0"\
445
                                              : : "g" ((vaddr))\
446
                                              : "a0")
447
 
448
 
449
/*
450
 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
451
 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
452
 * Hit every page until there is a page or less to go. Hit the next page,
453
 * and the one after that if the range hits it.
454
 */
455
/* ++roman: A little bit more care is required here: The CINVP instruction
456
 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
457
 * and the end of the region must be treated differently if they are not
458
 * exactly at the beginning or end of a page boundary. Else, maybe too much
459
 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
460
 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
461
 * for discovering the problem!)
462
 */
463
/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
464
 * the DPI bit in the CACR; would it cause problems with temporarily changing
465
 * this?). So we have to push first and then additionally to invalidate.
466
 */
467
 
468
/*
469
 * cache_clear() semantics: Clear any cache entries for the area in question,
470
 * without writing back dirty entries first. This is useful if the data will
471
 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
472
 * _physical_ address.
473
 */
474
 
475
void cache_clear (unsigned long paddr, int len)
476
{
477
    if (m68k_is040or060) {
478
        /* ++roman: There have been too many problems with the CINV, it seems
479
         * to break the cache maintenance of DMAing drivers. I don't expect
480
         * too much overhead by using CPUSH instead.
481
         */
482
        while (len > PAGE_SIZE) {
483
            pushcl040(paddr);
484
            len -= PAGE_SIZE;
485
            paddr += PAGE_SIZE;
486
        }
487
        if (len > 0) {
488
            pushcl040(paddr);
489
            if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
490
                /* a page boundary gets crossed at the end */
491
                pushcl040(paddr + len - 1);
492
            }
493
        }
494
    }
495
#if 0
496
        /* on 68040, invalidate cache lines for pages in the range */
497
        while (len > PAGE_SIZE) {
498
            clear040(paddr);
499
            len -= PAGE_SIZE;
500
            paddr += PAGE_SIZE;
501
            }
502
        if (len > 0) {
503
            /* 0 < len <= PAGE_SIZE */
504
            clear040(paddr);
505
            if (((paddr + len - 1) / PAGE_SIZE) != (paddr / PAGE_SIZE)) {
506
                /* a page boundary gets crossed at the end */
507
                clear040(paddr + len - 1);
508
                }
509
            }
510
#endif
511
    else /* 68030 or 68020 */
512
        asm volatile ("movec %/cacr,%/d0\n\t"
513
                      "oriw %0,%/d0\n\t"
514
                      "movec %/d0,%/cacr"
515
                      : : "i" (FLUSH_I_AND_D)
516
                      : "d0");
517
}
518
 
519
 
520
/*
521
 * cache_push() semantics: Write back any dirty cache data in the given area,
522
 * and invalidate the range in the instruction cache. It needs not (but may)
523
 * invalidate those entries also in the data cache. The range is defined by a
524
 * _physical_ address.
525
 */
526
 
527
void cache_push (unsigned long paddr, int len)
528
{
529
    if (m68k_is040or060) {
530
        /*
531
         * on 68040 or 68060, push cache lines for pages in the range;
532
         * on the '040 this also invalidates the pushed lines, but not on
533
         * the '060!
534
         */
535
        while (len > PAGE_SIZE) {
536
            pushcli040(paddr);
537
            len -= PAGE_SIZE;
538
            paddr += PAGE_SIZE;
539
            }
540
        if (len > 0) {
541
            pushcli040(paddr);
542
#if 0
543
            if (((paddr + len - 1) / PAGE_SIZE) != (paddr / PAGE_SIZE)) {
544
#endif
545
            if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
546
                /* a page boundary gets crossed at the end */
547
                pushcli040(paddr + len - 1);
548
                }
549
            }
550
        }
551
 
552
 
553
    /*
554
     * 68030/68020 have no writeback cache. On the other hand,
555
     * cache_push is actually a superset of cache_clear (the lines
556
     * get written back and invalidated), so we should make sure
557
     * to perform the corresponding actions. After all, this is getting
558
     * called in places where we've just loaded code, or whatever, so
559
     * flushing the icache is appropriate; flushing the dcache shouldn't
560
     * be required.
561
     */
562
    else /* 68030 or 68020 */
563
        asm volatile ("movec %/cacr,%/d0\n\t"
564
                      "oriw %0,%/d0\n\t"
565
                      "movec %/d0,%/cacr"
566
                      : : "i" (FLUSH_I)
567
                      : "d0");
568
}
569
 
570
 
571
/*
572
 * cache_push_v() semantics: Write back any dirty cache data in the given
573
 * area, and invalidate those entries at least in the instruction cache. This
574
 * is intended to be used after data has been written that can be executed as
575
 * code later. The range is defined by a _user_mode_ _virtual_ address  (or,
576
 * more exactly, the space is defined by the %sfc/%dfc register.)
577
 */
578
 
579
void cache_push_v (unsigned long vaddr, int len)
580
{
581
    if (m68k_is040or060 == 4) {
582
        /* on 68040, push cache lines for pages in the range */
583
        while (len > PAGE_SIZE) {
584
            pushv040(vaddr);
585
            len -= PAGE_SIZE;
586
            vaddr += PAGE_SIZE;
587
            }
588
        if (len > 0) {
589
            pushv040(vaddr);
590
#if 0
591
            if (((vaddr + len - 1) / PAGE_SIZE) != (vaddr / PAGE_SIZE)) {
592
#endif
593
            if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
594
                /* a page boundary gets crossed at the end */
595
                pushv040(vaddr + len - 1);
596
                }
597
            }
598
        }
599
    else if (m68k_is040or060 == 6) {
600
        /* on 68040, push cache lines for pages in the range */
601
        while (len > PAGE_SIZE) {
602
            pushv060(vaddr);
603
            len -= PAGE_SIZE;
604
            vaddr += PAGE_SIZE;
605
        }
606
        if (len > 0) {
607
            pushv060(vaddr);
608
            if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
609
                /* a page boundary gets crossed at the end */
610
                pushv060(vaddr + len - 1);
611
            }
612
        }
613
    }
614
    /* 68030/68020 have no writeback cache; still need to clear icache. */
615
    else /* 68030 or 68020 */
616
        asm volatile ("movec %/cacr,%/d0\n\t"
617
                      "oriw %0,%/d0\n\t"
618
                      "movec %/d0,%/cacr"
619
                      : : "i" (FLUSH_I)
620
                      : "d0");
621
}
622
 
623
#undef clear040
624
#undef cleari040
625
#undef push040
626
#undef pushcl040
627
#undef pushcli040
628
#undef pushv040
629
#undef pushv060
630
 
631
unsigned long mm_phys_to_virt (unsigned long addr)
632
{
633
    return PTOV (addr);
634
}
635
 
636
int mm_end_of_chunk (unsigned long addr, int len)
637
{
638
        int i;
639
 
640
        for (i = 0; i < boot_info.num_memory; i++)
641
                if (boot_info.memory[i].addr + boot_info.memory[i].size
642
                    == addr + len)
643
                        return 1;
644
        return 0;
645
}
646
 
647
/* Map some physical address range into the kernel address space. The
648
 * code is copied and adapted from map_chunk().
649
 */
650
 
651
unsigned long kernel_map(unsigned long paddr, unsigned long size,
652
                         int nocacheflag, unsigned long *memavailp )
653
{
654
#define STEP_SIZE       (256*1024)
655
 
656
        static unsigned long vaddr = 0xe0000000; /* safe place */
657
        unsigned long physaddr, retaddr;
658
        pte_t *ktablep = NULL;
659
        pmd_t *kpointerp;
660
        pgd_t *page_dir;
661
        int pindex;   /* index into pointer table */
662
        int prot;
663
 
664
        /* Round down 'paddr' to 256 KB and adjust size */
665
        physaddr = paddr & ~(STEP_SIZE-1);
666
        size += paddr - physaddr;
667
        retaddr = vaddr + (paddr - physaddr);
668
        paddr = physaddr;
669
        /* Round up the size to 256 KB. It doesn't hurt if too much is
670
         * mapped... */
671
        size = (size + STEP_SIZE - 1) & ~(STEP_SIZE-1);
672
 
673
        if (m68k_is040or060) {
674
                prot = _PAGE_PRESENT | _PAGE_GLOBAL040;
675
                switch( nocacheflag ) {
676
                  case KERNELMAP_FULL_CACHING:
677
                        prot |= _PAGE_CACHE040;
678
                        break;
679
                  case KERNELMAP_NOCACHE_SER:
680
                  default:
681
                        prot |= _PAGE_NOCACHE_S;
682
                        break;
683
                  case KERNELMAP_NOCACHE_NONSER:
684
                        prot |= _PAGE_NOCACHE;
685
                        break;
686
                  case KERNELMAP_NO_COPYBACK:
687
                        prot |= _PAGE_CACHE040W;
688
                        /* prot |= 0; */
689
                        break;
690
                }
691
        } else
692
                prot = _PAGE_PRESENT |
693
                           ((nocacheflag == KERNELMAP_FULL_CACHING ||
694
                                 nocacheflag == KERNELMAP_NO_COPYBACK) ? 0 : _PAGE_NOCACHE030);
695
 
696
        page_dir = pgd_offset_k(vaddr);
697
        if (pgd_present(*page_dir)) {
698
                kpointerp = (pmd_t *)pgd_page(*page_dir);
699
                pindex = (vaddr >> 18) & 0x7f;
700
                if (pindex != 0 && m68k_is040or060) {
701
                        if (pmd_present(*kpointerp))
702
                                ktablep = (pte_t *)pmd_page(*kpointerp);
703
                        else {
704
                                ktablep = kernel_page_table (memavailp);
705
                                /* Make entries invalid */
706
                                memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
707
                                pmd_set(kpointerp,ktablep);
708
                        }
709
                        ktablep += (pindex & 15)*64;
710
                }
711
        }
712
        else {
713
                /* we need a new pointer table */
714
                kpointerp = get_kpointer_table ();
715
                pgd_set(page_dir, (pmd_t *)kpointerp);
716
                memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
717
                pindex = 0;
718
        }
719
 
720
        for (physaddr = paddr; physaddr < paddr + size; vaddr += STEP_SIZE) {
721
 
722
                if (pindex > 127) {
723
                        /* we need a new pointer table */
724
                        kpointerp = get_kpointer_table ();
725
                        pgd_set(pgd_offset_k(vaddr), (pmd_t *)kpointerp);
726
                        memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
727
                        pindex = 0;
728
                }
729
 
730
                if (m68k_is040or060) {
731
                        int i;
732
                        unsigned long ktable;
733
 
734
                        /*
735
                         * 68040, use page tables pointed to by the
736
                         * kernel pointer table.
737
                         */
738
 
739
                        if ((pindex & 15) == 0) {
740
                                /* Need new page table every 4M on the '040 */
741
                                ktablep = kernel_page_table (memavailp);
742
                                /* Make entries invalid */
743
                                memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
744
                        }
745
 
746
                        ktable = VTOP(ktablep);
747
 
748
                        /*
749
                         * initialize section of the page table mapping
750
                         * this 1M portion.
751
                         */
752
                        for (i = 0; i < 64; i++) {
753
                                pte_val(*ktablep++) = physaddr | prot;
754
                                physaddr += PAGE_SIZE;
755
                        }
756
 
757
                        /*
758
                         * make the kernel pointer table point to the
759
                         * kernel page table.
760
                         */
761
 
762
                        ((unsigned long *)kpointerp)[pindex++] = ktable | _PAGE_TABLE;
763
 
764
                } else {
765
                        /*
766
                         * 68030, use early termination page descriptors.
767
                         * Each one points to 64 pages (256K).
768
                         */
769
                        ((unsigned long *)kpointerp)[pindex++] = physaddr | prot;
770
                        physaddr += 64 * PAGE_SIZE;
771
                }
772
        }
773
 
774
        return( retaddr );
775
}
776
 
777
 
778
static inline void set_cmode_pte( pmd_t *pmd, unsigned long address,
779
                                  unsigned long size, unsigned cmode )
780
{       pte_t *pte;
781
        unsigned long end;
782
 
783
        if (pmd_none(*pmd))
784
                return;
785
 
786
        pte = pte_offset( pmd, address );
787
        address &= ~PMD_MASK;
788
        end = address + size;
789
        if (end >= PMD_SIZE)
790
                end = PMD_SIZE;
791
 
792
        for( ; address < end; pte++ ) {
793
                pte_val(*pte) = (pte_val(*pte) & ~_PAGE_NOCACHE) | cmode;
794
                address += PAGE_SIZE;
795
        }
796
}
797
 
798
 
799
static inline void set_cmode_pmd( pgd_t *dir, unsigned long address,
800
                                  unsigned long size, unsigned cmode )
801
{
802
        pmd_t *pmd;
803
        unsigned long end;
804
 
805
        if (pgd_none(*dir))
806
                return;
807
 
808
        pmd = pmd_offset( dir, address );
809
        address &= ~PGDIR_MASK;
810
        end = address + size;
811
        if (end > PGDIR_SIZE)
812
                end = PGDIR_SIZE;
813
 
814
        if ((pmd_val(*pmd) & _DESCTYPE_MASK) == _PAGE_PRESENT) {
815
                /* 68030 early termination descriptor */
816
                pmd_val(*pmd) = (pmd_val(*pmd) & ~_PAGE_NOCACHE) | cmode;
817
                return;
818
        }
819
        else {
820
                /* "normal" tables */
821
                for( ; address < end; pmd++ ) {
822
                        set_cmode_pte( pmd, address, end - address, cmode );
823
                        address = (address + PMD_SIZE) & PMD_MASK;
824
                }
825
        }
826
}
827
 
828
 
829
/*
830
 * Set new cache mode for some kernel address space.
831
 * The caller must push data for that range itself, if such data may already
832
 * be in the cache.
833
 */
834
 
835
void kernel_set_cachemode( unsigned long address, unsigned long size,
836
                                                   unsigned cmode )
837
{
838
        pgd_t *dir = pgd_offset_k( address );
839
        unsigned long end = address + size;
840
 
841
        if (m68k_is040or060) {
842
                switch( cmode ) {
843
                  case KERNELMAP_FULL_CACHING:
844
                        cmode = _PAGE_CACHE040;
845
                        break;
846
                  case KERNELMAP_NOCACHE_SER:
847
                  default:
848
                        cmode = _PAGE_NOCACHE_S;
849
                        break;
850
                  case KERNELMAP_NOCACHE_NONSER:
851
                        cmode = _PAGE_NOCACHE;
852
                        break;
853
                  case KERNELMAP_NO_COPYBACK:
854
                        cmode = _PAGE_CACHE040W;
855
                        break;
856
                }
857
        } else
858
                cmode = ((cmode == KERNELMAP_FULL_CACHING ||
859
                                  cmode == KERNELMAP_NO_COPYBACK)    ?
860
 
861
 
862
        for( ; address < end; dir++ ) {
863
                set_cmode_pmd( dir, address, end - address, cmode );
864
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
865
        }
866
        flush_tlb_all();
867
}
868
 
869
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.