OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [arch/] [m68knommu/] [mm/] [memory.c] - Blame information for rev 1777

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1625 jcastillo
/*
2
 *  linux/arch/m68knommu/mm/memory.c
3
 *
4
 *  Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
5
 *                      The Silver Hammer Group, Ltd.
6
 *
7
 *  MAR/1999 -- hacked for the ColdFire (gerg@moreton.com.au)
8
 *
9
 *  Based on:
10
 *
11
 *  linux/arch/m68k/mm/memory.c
12
 *
13
 *  Copyright (C) 1995  Hamish Macdonald
14
 */
15
 
16
#include <linux/config.h>
17
#include <linux/mm.h>
18
#include <linux/kernel.h>
19
#include <linux/string.h>
20
#include <linux/types.h>
21
#include <linux/malloc.h>
22
 
23
#include <asm/setup.h>
24
#include <asm/segment.h>
25
#include <asm/page.h>
26
#include <asm/pgtable.h>
27
#include <asm/system.h>
28
#include <asm/traps.h>
29
#include <asm/shglcore.h>
30
 
31
#ifndef NO_MM
32
 
33
extern pte_t *kernel_page_table (unsigned long *memavailp);
34
 
35
/* Strings for `extern inline' functions in <asm/pgtable.h>.  If put
36
   directly into these functions, they are output for every file that
37
   includes pgtable.h */
38
 
39
const char PgtabStr_bad_pmd[] = "Bad pmd in pte_alloc: %08lx\n";
40
const char PgtabStr_bad_pgd[] = "Bad pgd in pmd_alloc: %08lx\n";
41
const char PgtabStr_bad_pmdk[] = "Bad pmd in pte_alloc_kernel: %08lx\n";
42
const char PgtabStr_bad_pgdk[] = "Bad pgd in pmd_alloc_kernel: %08lx\n";
43
 
44
static struct ptable_desc {
45
        struct ptable_desc *prev;
46
        struct ptable_desc *next;
47
        unsigned long      page;
48
        unsigned char      alloced;
49
} ptable_list = { &ptable_list, &ptable_list, 0, 0xff };
50
 
51
#define PD_NONEFREE(dp) ((dp)->alloced == 0xff)
52
#define PD_ALLFREE(dp) ((dp)->alloced == 0)
53
#define PD_TABLEFREE(dp,i) (!((dp)->alloced & (1<<(i))))
54
#define PD_MARKUSED(dp,i) ((dp)->alloced |= (1<<(i)))
55
#define PD_MARKFREE(dp,i) ((dp)->alloced &= ~(1<<(i)))
56
 
57
#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
58
 
59
pmd_t *get_pointer_table (void)
60
{
61
        pmd_t *pmdp = NULL;
62
        unsigned long flags;
63
        struct ptable_desc *dp = ptable_list.next;
64
        int i;
65
 
66
        /*
67
         * For a pointer table for a user process address space, a
68
         * table is taken from a page allocated for the purpose.  Each
69
         * page can hold 8 pointer tables.  The page is remapped in
70
         * virtual address space to be noncacheable.
71
         */
72
        if (PD_NONEFREE (dp)) {
73
 
74
                if (!(dp = kmalloc (sizeof(struct ptable_desc),GFP_KERNEL))) {
75
                        return 0;
76
                }
77
 
78
                if (!(dp->page = __get_free_page (GFP_KERNEL))) {
79
                        kfree (dp);
80
                        return 0;
81
                }
82
 
83
                nocache_page (dp->page);
84
 
85
                dp->alloced = 0;
86
                /* put at head of list */
87
                save_flags(flags);
88
                cli();
89
                dp->next = ptable_list.next;
90
                dp->prev = ptable_list.next->prev;
91
                ptable_list.next->prev = dp;
92
                ptable_list.next = dp;
93
                restore_flags(flags);
94
        }
95
 
96
        for (i = 0; i < 8; i++)
97
                if (PD_TABLEFREE (dp, i)) {
98
                        PD_MARKUSED (dp, i);
99
                        pmdp = (pmd_t *)(dp->page + PTABLE_SIZE*i);
100
                        break;
101
                }
102
 
103
        if (PD_NONEFREE (dp)) {
104
                /* move to end of list */
105
                save_flags(flags);
106
                cli();
107
                dp->prev->next = dp->next;
108
                dp->next->prev = dp->prev;
109
 
110
                dp->next = ptable_list.next->prev;
111
                dp->prev = ptable_list.prev;
112
                ptable_list.prev->next = dp;
113
                ptable_list.prev = dp;
114
                restore_flags(flags);
115
        }
116
 
117
        memset (pmdp, 0, PTABLE_SIZE);
118
 
119
        return pmdp;
120
}
121
 
122
void free_pointer_table (pmd_t *ptable)
123
{
124
        struct ptable_desc *dp;
125
        unsigned long page = (unsigned long)ptable & PAGE_MASK;
126
        int index = ((unsigned long)ptable - page)/PTABLE_SIZE;
127
        unsigned long flags;
128
 
129
        for (dp = ptable_list.next; dp->page && dp->page != page; dp = dp->next)
130
                ;
131
 
132
        if (!dp->page)
133
                panic ("unable to find desc for ptable %p on list!", ptable);
134
 
135
        if (PD_TABLEFREE (dp, index))
136
                panic ("table already free!");
137
 
138
        PD_MARKFREE (dp, index);
139
 
140
        if (PD_ALLFREE (dp)) {
141
                /* all tables in page are free, free page */
142
                save_flags(flags);
143
                cli();
144
                dp->prev->next = dp->next;
145
                dp->next->prev = dp->prev;
146
                restore_flags(flags);
147
                cache_page (dp->page);
148
                free_page (dp->page);
149
                kfree (dp);
150
                return;
151
        } else {
152
                /*
153
                 * move this descriptor the the front of the list, since
154
                 * it has one or more free tables.
155
                 */
156
                save_flags(flags);
157
                cli();
158
                dp->prev->next = dp->next;
159
                dp->next->prev = dp->prev;
160
 
161
                dp->next = ptable_list.next;
162
                dp->prev = ptable_list.next->prev;
163
                ptable_list.next->prev = dp;
164
                ptable_list.next = dp;
165
                restore_flags(flags);
166
        }
167
}
168
 
169
/* maximum pages used for kpointer tables */
170
#define KPTR_PAGES      4
171
/* # of reserved slots */
172
#define RESERVED_KPTR   4
173
extern pmd_tablepage kernel_pmd_table; /* reserved in head.S */
174
 
175
static struct kpointer_pages {
176
        pmd_tablepage *page[KPTR_PAGES];
177
        u_char alloced[KPTR_PAGES];
178
} kptr_pages;
179
 
180
void init_kpointer_table(void) {
181
        short i = KPTR_PAGES-1;
182
 
183
        /* first page is reserved in head.S */
184
        kptr_pages.page[i] = &kernel_pmd_table;
185
        kptr_pages.alloced[i] = ~(0xff>>RESERVED_KPTR);
186
        for (i--; i>=0; i--) {
187
                kptr_pages.page[i] = NULL;
188
                kptr_pages.alloced[i] = 0;
189
        }
190
}
191
 
192
pmd_t *get_kpointer_table (void)
193
{
194
        /* For pointer tables for the kernel virtual address space,
195
         * use the page that is reserved in head.S that can hold up to
196
         * 8 pointer tables. 3 of these tables are always reserved
197
         * (kernel_pg_dir, swapper_pg_dir and kernel pointer table for
198
         * the first 16 MB of RAM). In addition, the 4th pointer table
199
         * in this page is reserved. On Amiga and Atari, it is used to
200
         * map in the hardware registers. It may be used for other
201
         * purposes on other 68k machines. This leaves 4 pointer tables
202
         * available for use by the kernel. 1 of them are usually used
203
         * for the vmalloc tables. This allows mapping of 3 * 32 = 96 MB
204
         * of physical memory. But these pointer tables are also used
205
         * for other purposes, like kernel_map(), so further pages can
206
         * now be allocated.
207
         */
208
        pmd_tablepage *page;
209
        pmd_table *table;
210
        long nr, offset = -8;
211
        short i;
212
 
213
        for (i=KPTR_PAGES-1; i>=0; i--) {
214
                asm volatile("bfffo %1{%2,#8},%0"
215
                        : "=d" (nr)
216
                        : "d" ((u_char)~kptr_pages.alloced[i]), "d" (offset));
217
                if (nr)
218
                        break;
219
        }
220
        if (i < 0) {
221
                printk("No space for kernel pointer table!\n");
222
                return NULL;
223
        }
224
        if (!(page = kptr_pages.page[i])) {
225
                if (!(page = (pmd_tablepage *)__get_free_page(GFP_KERNEL))) {
226
                        printk("No space for kernel pointer table!\n");
227
                        return NULL;
228
                }
229
                nocache_page((u_long)(kptr_pages.page[i] = page));
230
        }
231
        asm volatile("bfset %0@{%1,#1}"
232
                : /* no output */
233
                : "a" (&kptr_pages.alloced[i]), "d" (nr-offset));
234
        table = &(*page)[nr-offset];
235
        memset(table, 0, sizeof(pmd_table));
236
        return ((pmd_t *)table);
237
}
238
 
239
void free_kpointer_table (pmd_t *pmdp)
240
{
241
        pmd_table *table = (pmd_table *)pmdp;
242
        pmd_tablepage *page = (pmd_tablepage *)((u_long)table & PAGE_MASK);
243
        long nr;
244
        short i;
245
 
246
        for (i=KPTR_PAGES-1; i>=0; i--) {
247
                if (kptr_pages.page[i] == page)
248
                        break;
249
        }
250
        nr = ((u_long)table - (u_long)page) / sizeof(pmd_table);
251
        if (!table || i < 0 || (i == KPTR_PAGES-1 && nr < RESERVED_KPTR)) {
252
                printk("Attempt to free invalid kernel pointer table: %p\n", table);
253
                return;
254
        }
255
        asm volatile("bfclr %0@{%1,#1}"
256
                : /* no output */
257
                : "a" (&kptr_pages.alloced[i]), "d" (nr));
258
        if (!kptr_pages.alloced[i]) {
259
                kptr_pages.page[i] = 0;
260
                cache_page ((u_long)page);
261
                free_page ((u_long)page);
262
        }
263
}
264
 
265
static unsigned long transp_transl_matches( unsigned long regval,
266
                                            unsigned long vaddr )
267
{
268
    unsigned long base, mask;
269
 
270
    /* enabled? */
271
    if (!(regval & 0x8000))
272
        return( 0 );
273
 
274
    if (CPU_IS_030) {
275
        /* function code match? */
276
        base = (regval >> 4) & 7;
277
        mask = ~(regval & 7);
278
        if ((SUPER_DATA & mask) != (base & mask))
279
            return( 0 );
280
    }
281
    else {
282
        /* must not be user-only */
283
        if ((regval & 0x6000) == 0)
284
            return( 0 );
285
    }
286
 
287
    /* address match? */
288
    base = regval & 0xff000000;
289
    mask = ~((regval << 8) & 0xff000000);
290
    return( (vaddr & mask) == (base & mask) );
291
}
292
 
293
/*
294
 * The following two routines map from a physical address to a kernel
295
 * virtual address and vice versa.
296
 */
297
unsigned long mm_vtop (unsigned long vaddr)
298
{
299
        int i;
300
        unsigned long voff = vaddr;
301
        unsigned long offset = 0;
302
 
303
        for (i = 0; i < boot_info.num_memory; i++)
304
        {
305
                if (voff < offset + boot_info.memory[i].size) {
306
#ifdef DEBUGPV
307
                        printk ("VTOP(%lx)=%lx\n", vaddr,
308
                                boot_info.memory[i].addr + voff - offset);
309
#endif
310
                        return boot_info.memory[i].addr + voff - offset;
311
                } else
312
                        offset += boot_info.memory[i].size;
313
        }
314
 
315
        /* not in one of the memory chunks; test for applying transparent
316
         * translation */
317
 
318
        if (CPU_IS_030) {
319
            unsigned long ttreg;
320
            register unsigned long *ttregptr __asm__( "a2" ) = &ttreg;
321
 
322
            asm volatile( ".long 0xf0120a00;" /* pmove %/tt0,%a0@ */
323
                          : "=g" (ttreg) : "a" (ttregptr) );
324
            if (transp_transl_matches( ttreg, vaddr ))
325
                return vaddr;
326
 
327
            asm volatile( ".long 0xf0120a00" /* pmove %/tt1,%a0@ */
328
                          : "=g" (ttreg) : "a" (ttregptr) );
329
            if (transp_transl_matches( ttreg, vaddr ))
330
                return vaddr;
331
        }
332
        else if (CPU_IS_040_OR_060) {
333
            register unsigned long ttreg __asm__( "d0" );
334
 
335
            asm volatile( ".long 0x4e7a0006" /* movec %dtt0,%d0 */
336
                          : "=d" (ttreg) );
337
            if (transp_transl_matches( ttreg, vaddr ))
338
                return vaddr;
339
            asm volatile( ".long 0x4e7a0007" /* movec %dtt1,%d0 */
340
                          : "=d" (ttreg) );
341
            if (transp_transl_matches( ttreg, vaddr ))
342
                return vaddr;
343
        }
344
 
345
        /* no match, too, so get the actual physical address from the MMU. */
346
 
347
        if (CPU_IS_060) {
348
          unsigned long fs = get_fs();
349
          unsigned long  paddr;
350
 
351
          set_fs (SUPER_DATA);
352
 
353
          /* The PLPAR instruction causes an access error if the translation
354
           * is not possible. We don't catch that here, so a bad kernel trap
355
           * will be reported in this case. */
356
          asm volatile ("movel %1,%/a0\n\t"
357
                        ".word 0xf5c8\n\t"      /* plpar (a0) */
358
                        "movel %/a0,%0"
359
                        : "=g" (paddr)
360
                        : "g" (vaddr)
361
                        : "a0" );
362
          set_fs (fs);
363
 
364
          return paddr;
365
 
366
        } else if (CPU_IS_040) {
367
          unsigned long mmusr;
368
          unsigned long fs = get_fs();
369
 
370
          set_fs (SUPER_DATA);
371
 
372
          asm volatile ("movel %1,%/a0\n\t"
373
                        ".word 0xf568\n\t"      /* ptestr (a0) */
374
                        ".long 0x4e7a8805\n\t"  /* movec mmusr, a0 */
375
                        "movel %/a0,%0"
376
                        : "=g" (mmusr)
377
                        : "g" (vaddr)
378
                        : "a0", "d0");
379
          set_fs (fs);
380
 
381
          if (mmusr & MMU_R_040)
382
            return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
383
 
384
          panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr);
385
        } else {
386
          volatile unsigned short temp;
387
          unsigned short mmusr;
388
          unsigned long *descaddr;
389
 
390
          asm volatile ("ptestr #5,%2@,#7,%0\n\t"
391
                        "pmove %/psr,%1@"
392
                        : "=a&" (descaddr)
393
                        : "a" (&temp), "a" (vaddr));
394
          mmusr = temp;
395
 
396
          if (mmusr & (MMU_I|MMU_B|MMU_L))
397
            panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr);
398
 
399
          descaddr = (unsigned long *)PTOV(descaddr);
400
 
401
          switch (mmusr & MMU_NUM) {
402
          case 1:
403
            return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
404
          case 2:
405
            return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
406
          case 3:
407
            return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
408
          default:
409
            panic ("VTOP: bad levels (%u) for virtual address %08lx",
410
                   mmusr & MMU_NUM, vaddr);
411
          }
412
        }
413
 
414
        panic ("VTOP: bad virtual address %08lx", vaddr);
415
}
416
 
417
unsigned long mm_ptov (unsigned long paddr)
418
{
419
        int i;
420
        unsigned long offset = 0;
421
 
422
        for (i = 0; i < boot_info.num_memory; i++)
423
        {
424
                if (paddr >= boot_info.memory[i].addr &&
425
                    paddr < (boot_info.memory[i].addr
426
                             + boot_info.memory[i].size)) {
427
#ifdef DEBUGPV
428
                        printk ("PTOV(%lx)=%lx\n", paddr,
429
                                (paddr - boot_info.memory[i].addr) + offset);
430
#endif
431
                        return (paddr - boot_info.memory[i].addr) + offset;
432
                } else
433
                        offset += boot_info.memory[i].size;
434
        }
435
 
436
        /*
437
         * assume that the kernel virtual address is the same as the
438
         * physical address.
439
         *
440
         * This should be reasonable in most situations:
441
         *  1) They shouldn't be dereferencing the virtual address
442
         *     unless they are sure that it is valid from kernel space.
443
         *  2) The only usage I see so far is converting a page table
444
         *     reference to some non-FASTMEM address space when freeing
445
         *     mmaped "/dev/mem" pages.  These addresses are just passed
446
         *     to "free_page", which ignores addresses that aren't in
447
         *     the memory list anyway.
448
         *
449
         */
450
 
451
        /*
452
         * if on an amiga and address is in first 16M, move it
453
         * to the ZTWO_ADDR range
454
         */
455
        if (MACH_IS_AMIGA && paddr < 16*1024*1024)
456
                return ZTWO_VADDR(paddr);
457
        return paddr;
458
}
459
 
460
/* invalidate page in both caches */
461
#define clear040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
462
                                              "nop\n\t"\
463
                                              ".word 0xf4d0"\
464
                                              /* CINVP I/D (a0) */\
465
                                              : : "g" ((paddr))\
466
                                              : "a0")
467
 
468
/* invalidate page in i-cache */
469
#define cleari040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
470
                                               /* CINVP I (a0) */\
471
                                               "nop\n\t"\
472
                                               ".word 0xf490"\
473
                                               : : "g" ((paddr))\
474
                                               : "a0")
475
 
476
/* push page in both caches */
477
#define push040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
478
                                              "nop\n\t"\
479
                                             ".word 0xf4f0"\
480
                                             /* CPUSHP I/D (a0) */\
481
                                             : : "g" ((paddr))\
482
                                             : "a0")
483
 
484
/* push and invalidate page in both caches */
485
#define pushcl040(paddr) do { push040((paddr));\
486
                              if (CPU_IS_060) clear040((paddr));\
487
                         } while(0)
488
 
489
/* push page in both caches, invalidate in i-cache */
490
#define pushcli040(paddr) do { push040((paddr));\
491
                               if (CPU_IS_060) cleari040((paddr));\
492
                          } while(0)
493
 
494
/* push page defined by virtual address in both caches */
495
#define pushv040(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
496
                                              /* ptestr (a0) */\
497
                                              "nop\n\t"\
498
                                              ".word 0xf568\n\t"\
499
                                              /* movec mmusr,d0 */\
500
                                              ".long 0x4e7a0805\n\t"\
501
                                              "andw #0xf000,%/d0\n\t"\
502
                                              "movel %/d0,%/a0\n\t"\
503
                                              /* CPUSHP I/D (a0) */\
504
                                              "nop\n\t"\
505
                                              ".word 0xf4f0"\
506
                                              : : "g" ((vaddr))\
507
                                              : "a0", "d0")
508
 
509
/* push page defined by virtual address in both caches */
510
#define pushv060(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
511
                                              /* plpar (a0) */\
512
                                              ".word 0xf5c8\n\t"\
513
                                              /* CPUSHP I/D (a0) */\
514
                                              ".word 0xf4f0"\
515
                                              : : "g" ((vaddr))\
516
                                              : "a0")
517
 
518
 
519
/*
520
 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
521
 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
522
 * Hit every page until there is a page or less to go. Hit the next page,
523
 * and the one after that if the range hits it.
524
 */
525
/* ++roman: A little bit more care is required here: The CINVP instruction
526
 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
527
 * and the end of the region must be treated differently if they are not
528
 * exactly at the beginning or end of a page boundary. Else, maybe too much
529
 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
530
 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
531
 * for discovering the problem!)
532
 */
533
/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
534
 * the DPI bit in the CACR; would it cause problems with temporarily changing
535
 * this?). So we have to push first and then additionally to invalidate.
536
 */
537
 
538
/*
539
 * cache_clear() semantics: Clear any cache entries for the area in question,
540
 * without writing back dirty entries first. This is useful if the data will
541
 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
542
 * _physical_ address.
543
 */
544
 
545
void cache_clear (unsigned long paddr, int len)
546
{
547
    if (CPU_IS_040_OR_060) {
548
        /*
549
         * cwe need special treatment for the first page, in case it
550
         * is not page-aligned.
551
         */
552
        if (paddr & (PAGE_SIZE - 1)){
553
            pushcl040(paddr);
554
            if (len <= PAGE_SIZE){
555
                if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
556
                    pushcl040(paddr + len - 1);
557
                }
558
                return;
559
            }else{
560
                len -=PAGE_SIZE;
561
                paddr += PAGE_SIZE;
562
            }
563
        }
564
 
565
        while (len > PAGE_SIZE) {
566
#if 0
567
            pushcl040(paddr);
568
#else
569
            clear040(paddr);
570
#endif
571
            len -= PAGE_SIZE;
572
            paddr += PAGE_SIZE;
573
        }
574
        if (len > 0) {
575
            pushcl040(paddr);
576
            if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
577
                /* a page boundary gets crossed at the end */
578
                pushcl040(paddr + len - 1);
579
            }
580
        }
581
    }
582
    else /* 68030 or 68020 */
583
        asm volatile ("movec %/cacr,%/d0\n\t"
584
                      "oriw %0,%/d0\n\t"
585
                      "movec %/d0,%/cacr"
586
                      : : "i" (FLUSH_I_AND_D)
587
                      : "d0");
588
}
589
 
590
 
591
/*
592
 * cache_push() semantics: Write back any dirty cache data in the given area,
593
 * and invalidate the range in the instruction cache. It needs not (but may)
594
 * invalidate those entries also in the data cache. The range is defined by a
595
 * _physical_ address.
596
 */
597
 
598
void cache_push (unsigned long paddr, int len)
599
{
600
    if (CPU_IS_040_OR_060) {
601
        /*
602
         * on 68040 or 68060, push cache lines for pages in the range;
603
         * on the '040 this also invalidates the pushed lines, but not on
604
         * the '060!
605
         */
606
        while (len > PAGE_SIZE) {
607
            pushcli040(paddr);
608
            len -= PAGE_SIZE;
609
            paddr += PAGE_SIZE;
610
            }
611
        if (len > 0) {
612
            pushcli040(paddr);
613
            if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
614
                /* a page boundary gets crossed at the end */
615
                pushcli040(paddr + len - 1);
616
                }
617
            }
618
        }
619
 
620
 
621
    /*
622
     * 68030/68020 have no writeback cache. On the other hand,
623
     * cache_push is actually a superset of cache_clear (the lines
624
     * get written back and invalidated), so we should make sure
625
     * to perform the corresponding actions. After all, this is getting
626
     * called in places where we've just loaded code, or whatever, so
627
     * flushing the icache is appropriate; flushing the dcache shouldn't
628
     * be required.
629
     */
630
    else /* 68030 or 68020 */
631
        asm volatile ("movec %/cacr,%/d0\n\t"
632
                      "oriw %0,%/d0\n\t"
633
                      "movec %/d0,%/cacr"
634
                      : : "i" (FLUSH_I)
635
                      : "d0");
636
}
637
 
638
 
639
/*
640
 * cache_push_v() semantics: Write back any dirty cache data in the given
641
 * area, and invalidate those entries at least in the instruction cache. This
642
 * is intended to be used after data has been written that can be executed as
643
 * code later. The range is defined by a _user_mode_ _virtual_ address  (or,
644
 * more exactly, the space is defined by the %sfc/%dfc register.)
645
 */
646
 
647
void cache_push_v (unsigned long vaddr, int len)
648
{
649
    if (CPU_IS_040) {
650
        /* on 68040, push cache lines for pages in the range */
651
        while (len > PAGE_SIZE) {
652
            pushv040(vaddr);
653
            len -= PAGE_SIZE;
654
            vaddr += PAGE_SIZE;
655
            }
656
        if (len > 0) {
657
            pushv040(vaddr);
658
            if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
659
                /* a page boundary gets crossed at the end */
660
                pushv040(vaddr + len - 1);
661
                }
662
            }
663
        }
664
    else if (CPU_IS_060) {
665
        /* on 68040, push cache lines for pages in the range */
666
        while (len > PAGE_SIZE) {
667
            pushv060(vaddr);
668
            len -= PAGE_SIZE;
669
            vaddr += PAGE_SIZE;
670
        }
671
        if (len > 0) {
672
            pushv060(vaddr);
673
            if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
674
                /* a page boundary gets crossed at the end */
675
                pushv060(vaddr + len - 1);
676
            }
677
        }
678
    }
679
    /* 68030/68020 have no writeback cache; still need to clear icache. */
680
    else /* 68030 or 68020 */
681
        asm volatile ("movec %/cacr,%/d0\n\t"
682
                      "oriw %0,%/d0\n\t"
683
                      "movec %/d0,%/cacr"
684
                      : : "i" (FLUSH_I)
685
                      : "d0");
686
}
687
 
688
#undef clear040
689
#undef cleari040
690
#undef push040
691
#undef pushcl040
692
#undef pushcli040
693
#undef pushv040
694
#undef pushv060
695
 
696
unsigned long mm_phys_to_virt (unsigned long addr)
697
{
698
    return PTOV (addr);
699
}
700
 
701
int mm_end_of_chunk (unsigned long addr, int len)
702
{
703
        int i;
704
 
705
        for (i = 0; i < boot_info.num_memory; i++)
706
                if (boot_info.memory[i].addr + boot_info.memory[i].size
707
                    == addr + len)
708
                        return 1;
709
        return 0;
710
}
711
 
712
/* Map some physical address range into the kernel address space. The
713
 * code is copied and adapted from map_chunk().
714
 */
715
 
716
unsigned long kernel_map(unsigned long paddr, unsigned long size,
717
                         int nocacheflag, unsigned long *memavailp )
718
{
719
#define STEP_SIZE       (256*1024)
720
 
721
        static unsigned long vaddr = 0xe0000000; /* safe place */
722
        unsigned long physaddr, retaddr;
723
        pte_t *ktablep = NULL;
724
        pmd_t *kpointerp;
725
        pgd_t *page_dir;
726
        int pindex;   /* index into pointer table */
727
        int prot;
728
 
729
        /* Round down 'paddr' to 256 KB and adjust size */
730
        physaddr = paddr & ~(STEP_SIZE-1);
731
        size += paddr - physaddr;
732
        retaddr = vaddr + (paddr - physaddr);
733
        paddr = physaddr;
734
        /* Round up the size to 256 KB. It doesn't hurt if too much is
735
         * mapped... */
736
        size = (size + STEP_SIZE - 1) & ~(STEP_SIZE-1);
737
 
738
        if (CPU_IS_040_OR_060) {
739
                prot = _PAGE_PRESENT | _PAGE_GLOBAL040;
740
                switch( nocacheflag ) {
741
                  case KERNELMAP_FULL_CACHING:
742
                        prot |= _PAGE_CACHE040;
743
                        break;
744
                  case KERNELMAP_NOCACHE_SER:
745
                  default:
746
                        prot |= _PAGE_NOCACHE_S;
747
                        break;
748
                  case KERNELMAP_NOCACHE_NONSER:
749
                        prot |= _PAGE_NOCACHE;
750
                        break;
751
                  case KERNELMAP_NO_COPYBACK:
752
                        prot |= _PAGE_CACHE040W;
753
                        /* prot |= 0; */
754
                        break;
755
                }
756
        } else
757
                prot = _PAGE_PRESENT |
758
                           ((nocacheflag == KERNELMAP_FULL_CACHING ||
759
                                 nocacheflag == KERNELMAP_NO_COPYBACK) ? 0 : _PAGE_NOCACHE030);
760
 
761
        page_dir = pgd_offset_k(vaddr);
762
        if (pgd_present(*page_dir)) {
763
                kpointerp = (pmd_t *)pgd_page(*page_dir);
764
                pindex = (vaddr >> 18) & 0x7f;
765
                if (pindex != 0 && CPU_IS_040_OR_060) {
766
                        if (pmd_present(*kpointerp))
767
                                ktablep = (pte_t *)pmd_page(*kpointerp);
768
                        else {
769
                                ktablep = kernel_page_table (memavailp);
770
                                /* Make entries invalid */
771
                                memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
772
                                pmd_set(kpointerp,ktablep);
773
                        }
774
                        ktablep += (pindex & 15)*64;
775
                }
776
        }
777
        else {
778
                /* we need a new pointer table */
779
                kpointerp = get_kpointer_table ();
780
                pgd_set(page_dir, (pmd_t *)kpointerp);
781
                memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
782
                pindex = 0;
783
        }
784
 
785
        for (physaddr = paddr; physaddr < paddr + size; vaddr += STEP_SIZE) {
786
 
787
                if (pindex > 127) {
788
                        /* we need a new pointer table */
789
                        kpointerp = get_kpointer_table ();
790
                        pgd_set(pgd_offset_k(vaddr), (pmd_t *)kpointerp);
791
                        memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
792
                        pindex = 0;
793
                }
794
 
795
                if (CPU_IS_040_OR_060) {
796
                        int i;
797
                        unsigned long ktable;
798
 
799
                        /*
800
                         * 68040, use page tables pointed to by the
801
                         * kernel pointer table.
802
                         */
803
 
804
                        if ((pindex & 15) == 0) {
805
                                /* Need new page table every 4M on the '040 */
806
                                ktablep = kernel_page_table (memavailp);
807
                                /* Make entries invalid */
808
                                memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
809
                        }
810
 
811
                        ktable = VTOP(ktablep);
812
 
813
                        /*
814
                         * initialize section of the page table mapping
815
                         * this 1M portion.
816
                         */
817
                        for (i = 0; i < 64; i++) {
818
                                pte_val(*ktablep++) = physaddr | prot;
819
                                physaddr += PAGE_SIZE;
820
                        }
821
 
822
                        /*
823
                         * make the kernel pointer table point to the
824
                         * kernel page table.
825
                         */
826
 
827
                        ((unsigned long *)kpointerp)[pindex++] = ktable | _PAGE_TABLE;
828
 
829
                } else {
830
                        /*
831
                         * 68030, use early termination page descriptors.
832
                         * Each one points to 64 pages (256K).
833
                         */
834
                        ((unsigned long *)kpointerp)[pindex++] = physaddr | prot;
835
                        physaddr += 64 * PAGE_SIZE;
836
                }
837
        }
838
 
839
        return( retaddr );
840
}
841
 
842
 
843
static inline void set_cmode_pte( pmd_t *pmd, unsigned long address,
844
                                  unsigned long size, unsigned cmode )
845
{       pte_t *pte;
846
        unsigned long end;
847
 
848
        if (pmd_none(*pmd))
849
                return;
850
 
851
        pte = pte_offset( pmd, address );
852
        address &= ~PMD_MASK;
853
        end = address + size;
854
        if (end >= PMD_SIZE)
855
                end = PMD_SIZE;
856
 
857
        for( ; address < end; pte++ ) {
858
                pte_val(*pte) = (pte_val(*pte) & ~_PAGE_NOCACHE) | cmode;
859
                address += PAGE_SIZE;
860
        }
861
}
862
 
863
 
864
static inline void set_cmode_pmd( pgd_t *dir, unsigned long address,
865
                                  unsigned long size, unsigned cmode )
866
{
867
        pmd_t *pmd;
868
        unsigned long end;
869
 
870
        if (pgd_none(*dir))
871
                return;
872
 
873
        pmd = pmd_offset( dir, address );
874
        address &= ~PGDIR_MASK;
875
        end = address + size;
876
        if (end > PGDIR_SIZE)
877
                end = PGDIR_SIZE;
878
 
879
        if ((pmd_val(*pmd) & _DESCTYPE_MASK) == _PAGE_PRESENT) {
880
                /* 68030 early termination descriptor */
881
                pmd_val(*pmd) = (pmd_val(*pmd) & ~_PAGE_NOCACHE) | cmode;
882
                return;
883
        }
884
        else {
885
                /* "normal" tables */
886
                for( ; address < end; pmd++ ) {
887
                        set_cmode_pte( pmd, address, end - address, cmode );
888
                        address = (address + PMD_SIZE) & PMD_MASK;
889
                }
890
        }
891
}
892
 
893
 
894
/*
895
 * Set new cache mode for some kernel address space.
896
 * The caller must push data for that range itself, if such data may already
897
 * be in the cache.
898
 */
899
 
900
void kernel_set_cachemode( unsigned long address, unsigned long size,
901
                                                   unsigned cmode )
902
{
903
        pgd_t *dir = pgd_offset_k( address );
904
        unsigned long end = address + size;
905
 
906
        if (CPU_IS_040_OR_060) {
907
                switch( cmode ) {
908
                  case KERNELMAP_FULL_CACHING:
909
                        cmode = _PAGE_CACHE040;
910
                        break;
911
                  case KERNELMAP_NOCACHE_SER:
912
                  default:
913
                        cmode = _PAGE_NOCACHE_S;
914
                        break;
915
                  case KERNELMAP_NOCACHE_NONSER:
916
                        cmode = _PAGE_NOCACHE;
917
                        break;
918
                  case KERNELMAP_NO_COPYBACK:
919
                        cmode = _PAGE_CACHE040W;
920
                        break;
921
                }
922
        } else
923
                cmode = ((cmode == KERNELMAP_FULL_CACHING ||
924
                                  cmode == KERNELMAP_NO_COPYBACK)    ?
925
 
926
 
927
        for( ; address < end; dir++ ) {
928
                set_cmode_pmd( dir, address, end - address, cmode );
929
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
930
        }
931
        flush_tlb_all();
932
}
933
 
934
#else /* !NO_MM */
935
 
936
/*
937
 * The following two routines map from a physical address to a kernel
938
 * virtual address and vice versa.
939
 */
940
unsigned long mm_vtop (unsigned long vaddr)
941
{
942
        return vaddr;
943
}
944
 
945
unsigned long mm_ptov (unsigned long paddr)
946
{
947
        return paddr;
948
}
949
 
950
 
951
/*
952
 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
953
 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
954
 * Hit every page until there is a page or less to go. Hit the next page,
955
 * and the one after that if the range hits it.
956
 */
957
/* ++roman: A little bit more care is required here: The CINVP instruction
958
 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
959
 * and the end of the region must be treated differently if they are not
960
 * exactly at the beginning or end of a page boundary. Else, maybe too much
961
 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
962
 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
963
 * for discovering the problem!)
964
 */
965
/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
966
 * the DPI bit in the CACR; would it cause problems with temporarily changing
967
 * this?). So we have to push first and then additionally to invalidate.
968
 */
969
 
970
/*
971
 * cache_clear() semantics: Clear any cache entries for the area in question,
972
 * without writing back dirty entries first. This is useful if the data will
973
 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
974
 * _physical_ address.
975
 */
976
 
977
void cache_clear (unsigned long paddr, int len)
978
{
979
}
980
 
981
 
982
/*
983
 * cache_push() semantics: Write back any dirty cache data in the given area,
984
 * and invalidate the range in the instruction cache. It needs not (but may)
985
 * invalidate those entries also in the data cache. The range is defined by a
986
 * _physical_ address.
987
 */
988
 
989
void cache_push (unsigned long paddr, int len)
990
{
991
}
992
 
993
 
994
/*
995
 * cache_push_v() semantics: Write back any dirty cache data in the given
996
 * area, and invalidate those entries at least in the instruction cache. This
997
 * is intended to be used after data has been written that can be executed as
998
 * code later. The range is defined by a _user_mode_ _virtual_ address  (or,
999
 * more exactly, the space is defined by the %sfc/%dfc register.)
1000
 */
1001
 
1002
void cache_push_v (unsigned long vaddr, int len)
1003
{
1004
}
1005
 
1006
unsigned long mm_phys_to_virt (unsigned long addr)
1007
{
1008
    return PTOV (addr);
1009
}
1010
 
1011
/* Map some physical address range into the kernel address space. The
1012
 * code is copied and adapted from map_chunk().
1013
 */
1014
 
1015
unsigned long kernel_map(unsigned long paddr, unsigned long size,
1016
                         int nocacheflag, unsigned long *memavailp )
1017
{
1018
        return paddr;
1019
}
1020
 
1021
 
1022
void kernel_set_cachemode( unsigned long address, unsigned long size,
1023
                                                   unsigned cmode )
1024
{
1025
}
1026
 
1027
#ifdef MAGIC_ROM_PTR
1028
int is_in_rom(unsigned long addr) {
1029
#ifdef CONFIG_COLDFIRE
1030
        extern unsigned long    _ramstart, _ramend;
1031
 
1032
        /* Anything not in operational RAM is returned as in rom! */
1033
        if ((addr >= _ramstart) && (addr < _ramend))
1034
                return(0);
1035
        return(1);
1036
#endif
1037
#ifdef CONFIG_PILOT
1038
        if (addr >= 0x10c00000)
1039
                return 1;
1040
        else
1041
                return 0;
1042
#endif
1043
#ifdef CONFIG_M68EZ328ADS
1044
        if ( 0x00200000 <= addr && addr < 0x00400000)
1045
                return 1;
1046
        else
1047
                return 0;
1048
#endif
1049
#ifdef CONFIG_M68332
1050
        extern char _etext;
1051
 
1052
#ifdef SHGLCORE_ROM_BANK_0_ADDR
1053
        if ((addr >= SHGLCORE_ROM_BANK_0_ADDR) && (addr < (SHGLCORE_ROM_BANK_0_ADDR+SHGLCORE_ROM_BANK_0_LENGTH)))
1054
                return 1;
1055
#endif
1056
#ifdef SHGLCORE_ROM_BANK_1_ADDR
1057
        else if ((addr >= SHGLCORE_ROM_BANK_1_ADDR) && (addr < (SHGLCORE_ROM_BANK_1_ADDR+SHGLCORE_ROM_BANK_1_LENGTH)))
1058
                return 1;
1059
#endif
1060
#ifdef SHGLCORE_FLASH_BANK_0_ADDR
1061
        else if ((addr >= SHGLCORE_FLASH_BANK_0_ADDR) && (addr < (SHGLCORE_FLASH_BANK_0_ADDR+SHGLCORE_FLASH_BANK_0_LENGTH)))
1062
                return 1;
1063
#endif
1064
#ifdef SHGLCORE_FLASH_BANK_1_ADDR
1065
        else if ((addr >= SHGLCORE_FLASH_BANK_1_ADDR) && (addr < (SHGLCORE_FLASH_BANK_1_ADDR+SHGLCORE_FLASH_BANK_1_LENGTH)))
1066
                return 1;
1067
#endif
1068
        else
1069
                return 0;
1070
#endif
1071
}
1072
#endif
1073
 
1074
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.