OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [arch/] [i960/] [mm/] [memory.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1623 jcastillo
/*
2
 *  linux/arch/m68knommu/mm/memory.c
3
 *
4
 *  Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
5
 *                      The Silver Hammer Group, Ltd.
6
 *
7
 *  Based on:
8
 *
9
 *  linux/arch/m68k/mm/memory.c
10
 *
11
 *  Copyright (C) 1995  Hamish Macdonald
12
 */
13
 
14
#include <linux/config.h>
15
#include <linux/mm.h>
16
#include <linux/kernel.h>
17
#include <linux/string.h>
18
#include <linux/types.h>
19
#include <linux/malloc.h>
20
 
21
#include <asm/setup.h>
22
#include <asm/segment.h>
23
#include <asm/page.h>
24
#include <asm/pgtable.h>
25
#include <asm/system.h>
26
#include <asm/traps.h>
27
 
28
#ifndef NO_MM
29
 
30
extern pte_t *kernel_page_table (unsigned long *memavailp);
31
 
32
/* Strings for `extern inline' functions in <asm/pgtable.h>.  If put
33
   directly into these functions, they are output for every file that
34
   includes pgtable.h */
35
 
36
const char PgtabStr_bad_pmd[] = "Bad pmd in pte_alloc: %08lx\n";
37
const char PgtabStr_bad_pgd[] = "Bad pgd in pmd_alloc: %08lx\n";
38
const char PgtabStr_bad_pmdk[] = "Bad pmd in pte_alloc_kernel: %08lx\n";
39
const char PgtabStr_bad_pgdk[] = "Bad pgd in pmd_alloc_kernel: %08lx\n";
40
 
41
static struct ptable_desc {
42
        struct ptable_desc *prev;
43
        struct ptable_desc *next;
44
        unsigned long      page;
45
        unsigned char      alloced;
46
} ptable_list = { &ptable_list, &ptable_list, 0, 0xff };
47
 
48
#define PD_NONEFREE(dp) ((dp)->alloced == 0xff)
49
#define PD_ALLFREE(dp) ((dp)->alloced == 0)
50
#define PD_TABLEFREE(dp,i) (!((dp)->alloced & (1<<(i))))
51
#define PD_MARKUSED(dp,i) ((dp)->alloced |= (1<<(i)))
52
#define PD_MARKFREE(dp,i) ((dp)->alloced &= ~(1<<(i)))
53
 
54
#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
55
 
56
pmd_t *get_pointer_table (void)
57
{
58
        pmd_t *pmdp = NULL;
59
        unsigned long flags;
60
        struct ptable_desc *dp = ptable_list.next;
61
        int i;
62
 
63
        /*
64
         * For a pointer table for a user process address space, a
65
         * table is taken from a page allocated for the purpose.  Each
66
         * page can hold 8 pointer tables.  The page is remapped in
67
         * virtual address space to be noncacheable.
68
         */
69
        if (PD_NONEFREE (dp)) {
70
 
71
                if (!(dp = kmalloc (sizeof(struct ptable_desc),GFP_KERNEL))) {
72
                        return 0;
73
                }
74
 
75
                if (!(dp->page = __get_free_page (GFP_KERNEL))) {
76
                        kfree (dp);
77
                        return 0;
78
                }
79
 
80
                nocache_page (dp->page);
81
 
82
                dp->alloced = 0;
83
                /* put at head of list */
84
                save_flags(flags);
85
                cli();
86
                dp->next = ptable_list.next;
87
                dp->prev = ptable_list.next->prev;
88
                ptable_list.next->prev = dp;
89
                ptable_list.next = dp;
90
                restore_flags(flags);
91
        }
92
 
93
        for (i = 0; i < 8; i++)
94
                if (PD_TABLEFREE (dp, i)) {
95
                        PD_MARKUSED (dp, i);
96
                        pmdp = (pmd_t *)(dp->page + PTABLE_SIZE*i);
97
                        break;
98
                }
99
 
100
        if (PD_NONEFREE (dp)) {
101
                /* move to end of list */
102
                save_flags(flags);
103
                cli();
104
                dp->prev->next = dp->next;
105
                dp->next->prev = dp->prev;
106
 
107
                dp->next = ptable_list.next->prev;
108
                dp->prev = ptable_list.prev;
109
                ptable_list.prev->next = dp;
110
                ptable_list.prev = dp;
111
                restore_flags(flags);
112
        }
113
 
114
        memset (pmdp, 0, PTABLE_SIZE);
115
 
116
        return pmdp;
117
}
118
 
119
void free_pointer_table (pmd_t *ptable)
120
{
121
        struct ptable_desc *dp;
122
        unsigned long page = (unsigned long)ptable & PAGE_MASK;
123
        int index = ((unsigned long)ptable - page)/PTABLE_SIZE;
124
        unsigned long flags;
125
 
126
        for (dp = ptable_list.next; dp->page && dp->page != page; dp = dp->next)
127
                ;
128
 
129
        if (!dp->page)
130
                panic ("unable to find desc for ptable %p on list!", ptable);
131
 
132
        if (PD_TABLEFREE (dp, index))
133
                panic ("table already free!");
134
 
135
        PD_MARKFREE (dp, index);
136
 
137
        if (PD_ALLFREE (dp)) {
138
                /* all tables in page are free, free page */
139
                save_flags(flags);
140
                cli();
141
                dp->prev->next = dp->next;
142
                dp->next->prev = dp->prev;
143
                restore_flags(flags);
144
                cache_page (dp->page);
145
                free_page (dp->page);
146
                kfree (dp);
147
                return;
148
        } else {
149
                /*
150
                 * move this descriptor the the front of the list, since
151
                 * it has one or more free tables.
152
                 */
153
                save_flags(flags);
154
                cli();
155
                dp->prev->next = dp->next;
156
                dp->next->prev = dp->prev;
157
 
158
                dp->next = ptable_list.next;
159
                dp->prev = ptable_list.next->prev;
160
                ptable_list.next->prev = dp;
161
                ptable_list.next = dp;
162
                restore_flags(flags);
163
        }
164
}
165
 
166
/* maximum pages used for kpointer tables */
167
#define KPTR_PAGES      4
168
/* # of reserved slots */
169
#define RESERVED_KPTR   4
170
extern pmd_tablepage kernel_pmd_table; /* reserved in head.S */
171
 
172
static struct kpointer_pages {
173
        pmd_tablepage *page[KPTR_PAGES];
174
        u_char alloced[KPTR_PAGES];
175
} kptr_pages;
176
 
177
void init_kpointer_table(void) {
178
        short i = KPTR_PAGES-1;
179
 
180
        /* first page is reserved in head.S */
181
        kptr_pages.page[i] = &kernel_pmd_table;
182
        kptr_pages.alloced[i] = ~(0xff>>RESERVED_KPTR);
183
        for (i--; i>=0; i--) {
184
                kptr_pages.page[i] = NULL;
185
                kptr_pages.alloced[i] = 0;
186
        }
187
}
188
 
189
pmd_t *get_kpointer_table (void)
190
{
191
        /* For pointer tables for the kernel virtual address space,
192
         * use the page that is reserved in head.S that can hold up to
193
         * 8 pointer tables. 3 of these tables are always reserved
194
         * (kernel_pg_dir, swapper_pg_dir and kernel pointer table for
195
         * the first 16 MB of RAM). In addition, the 4th pointer table
196
         * in this page is reserved. On Amiga and Atari, it is used to
197
         * map in the hardware registers. It may be used for other
198
         * purposes on other 68k machines. This leaves 4 pointer tables
199
         * available for use by the kernel. 1 of them are usually used
200
         * for the vmalloc tables. This allows mapping of 3 * 32 = 96 MB
201
         * of physical memory. But these pointer tables are also used
202
         * for other purposes, like kernel_map(), so further pages can
203
         * now be allocated.
204
         */
205
        pmd_tablepage *page;
206
        pmd_table *table;
207
        long nr, offset = -8;
208
        short i;
209
 
210
        for (i=KPTR_PAGES-1; i>=0; i--) {
211
                asm volatile("bfffo %1{%2,#8},%0"
212
                        : "=d" (nr)
213
                        : "d" ((u_char)~kptr_pages.alloced[i]), "d" (offset));
214
                if (nr)
215
                        break;
216
        }
217
        if (i < 0) {
218
                printk("No space for kernel pointer table!\n");
219
                return NULL;
220
        }
221
        if (!(page = kptr_pages.page[i])) {
222
                if (!(page = (pmd_tablepage *)__get_free_page(GFP_KERNEL))) {
223
                        printk("No space for kernel pointer table!\n");
224
                        return NULL;
225
                }
226
                nocache_page((u_long)(kptr_pages.page[i] = page));
227
        }
228
        asm volatile("bfset %0@{%1,#1}"
229
                : /* no output */
230
                : "a" (&kptr_pages.alloced[i]), "d" (nr-offset));
231
        table = &(*page)[nr-offset];
232
        memset(table, 0, sizeof(pmd_table));
233
        return ((pmd_t *)table);
234
}
235
 
236
void free_kpointer_table (pmd_t *pmdp)
237
{
238
        pmd_table *table = (pmd_table *)pmdp;
239
        pmd_tablepage *page = (pmd_tablepage *)((u_long)table & PAGE_MASK);
240
        long nr;
241
        short i;
242
 
243
        for (i=KPTR_PAGES-1; i>=0; i--) {
244
                if (kptr_pages.page[i] == page)
245
                        break;
246
        }
247
        nr = ((u_long)table - (u_long)page) / sizeof(pmd_table);
248
        if (!table || i < 0 || (i == KPTR_PAGES-1 && nr < RESERVED_KPTR)) {
249
                printk("Attempt to free invalid kernel pointer table: %p\n", table);
250
                return;
251
        }
252
        asm volatile("bfclr %0@{%1,#1}"
253
                : /* no output */
254
                : "a" (&kptr_pages.alloced[i]), "d" (nr));
255
        if (!kptr_pages.alloced[i]) {
256
                kptr_pages.page[i] = 0;
257
                cache_page ((u_long)page);
258
                free_page ((u_long)page);
259
        }
260
}
261
 
262
static unsigned long transp_transl_matches( unsigned long regval,
263
                                            unsigned long vaddr )
264
{
265
    unsigned long base, mask;
266
 
267
    /* enabled? */
268
    if (!(regval & 0x8000))
269
        return( 0 );
270
 
271
    if (CPU_IS_030) {
272
        /* function code match? */
273
        base = (regval >> 4) & 7;
274
        mask = ~(regval & 7);
275
        if ((SUPER_DATA & mask) != (base & mask))
276
            return( 0 );
277
    }
278
    else {
279
        /* must not be user-only */
280
        if ((regval & 0x6000) == 0)
281
            return( 0 );
282
    }
283
 
284
    /* address match? */
285
    base = regval & 0xff000000;
286
    mask = ~((regval << 8) & 0xff000000);
287
    return( (vaddr & mask) == (base & mask) );
288
}
289
 
290
/*
291
 * The following two routines map from a physical address to a kernel
292
 * virtual address and vice versa.
293
 */
294
unsigned long mm_vtop (unsigned long vaddr)
295
{
296
        int i;
297
        unsigned long voff = vaddr;
298
        unsigned long offset = 0;
299
 
300
        for (i = 0; i < boot_info.num_memory; i++)
301
        {
302
                if (voff < offset + boot_info.memory[i].size) {
303
#ifdef DEBUGPV
304
                        printk ("VTOP(%lx)=%lx\n", vaddr,
305
                                boot_info.memory[i].addr + voff - offset);
306
#endif
307
                        return boot_info.memory[i].addr + voff - offset;
308
                } else
309
                        offset += boot_info.memory[i].size;
310
        }
311
 
312
        /* not in one of the memory chunks; test for applying transparent
313
         * translation */
314
 
315
        if (CPU_IS_030) {
316
            unsigned long ttreg;
317
            register unsigned long *ttregptr __asm__( "a2" ) = &ttreg;
318
 
319
            asm volatile( ".long 0xf0120a00;" /* pmove %/tt0,%a0@ */
320
                          : "=g" (ttreg) : "a" (ttregptr) );
321
            if (transp_transl_matches( ttreg, vaddr ))
322
                return vaddr;
323
 
324
            asm volatile( ".long 0xf0120a00" /* pmove %/tt1,%a0@ */
325
                          : "=g" (ttreg) : "a" (ttregptr) );
326
            if (transp_transl_matches( ttreg, vaddr ))
327
                return vaddr;
328
        }
329
        else if (CPU_IS_040_OR_060) {
330
            register unsigned long ttreg __asm__( "d0" );
331
 
332
            asm volatile( ".long 0x4e7a0006" /* movec %dtt0,%d0 */
333
                          : "=d" (ttreg) );
334
            if (transp_transl_matches( ttreg, vaddr ))
335
                return vaddr;
336
            asm volatile( ".long 0x4e7a0007" /* movec %dtt1,%d0 */
337
                          : "=d" (ttreg) );
338
            if (transp_transl_matches( ttreg, vaddr ))
339
                return vaddr;
340
        }
341
 
342
        /* no match, too, so get the actual physical address from the MMU. */
343
 
344
        if (CPU_IS_060) {
345
          unsigned long fs = get_fs();
346
          unsigned long  paddr;
347
 
348
          set_fs (SUPER_DATA);
349
 
350
          /* The PLPAR instruction causes an access error if the translation
351
           * is not possible. We don't catch that here, so a bad kernel trap
352
           * will be reported in this case. */
353
          asm volatile ("movel %1,%/a0\n\t"
354
                        ".word 0xf5c8\n\t"      /* plpar (a0) */
355
                        "movel %/a0,%0"
356
                        : "=g" (paddr)
357
                        : "g" (vaddr)
358
                        : "a0" );
359
          set_fs (fs);
360
 
361
          return paddr;
362
 
363
        } else if (CPU_IS_040) {
364
          unsigned long mmusr;
365
          unsigned long fs = get_fs();
366
 
367
          set_fs (SUPER_DATA);
368
 
369
          asm volatile ("movel %1,%/a0\n\t"
370
                        ".word 0xf568\n\t"      /* ptestr (a0) */
371
                        ".long 0x4e7a8805\n\t"  /* movec mmusr, a0 */
372
                        "movel %/a0,%0"
373
                        : "=g" (mmusr)
374
                        : "g" (vaddr)
375
                        : "a0", "d0");
376
          set_fs (fs);
377
 
378
          if (mmusr & MMU_R_040)
379
            return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
380
 
381
          panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr);
382
        } else {
383
          volatile unsigned short temp;
384
          unsigned short mmusr;
385
          unsigned long *descaddr;
386
 
387
          asm volatile ("ptestr #5,%2@,#7,%0\n\t"
388
                        "pmove %/psr,%1@"
389
                        : "=a&" (descaddr)
390
                        : "a" (&temp), "a" (vaddr));
391
          mmusr = temp;
392
 
393
          if (mmusr & (MMU_I|MMU_B|MMU_L))
394
            panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr);
395
 
396
          descaddr = (unsigned long *)PTOV(descaddr);
397
 
398
          switch (mmusr & MMU_NUM) {
399
          case 1:
400
            return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
401
          case 2:
402
            return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
403
          case 3:
404
            return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
405
          default:
406
            panic ("VTOP: bad levels (%u) for virtual address %08lx",
407
                   mmusr & MMU_NUM, vaddr);
408
          }
409
        }
410
 
411
        panic ("VTOP: bad virtual address %08lx", vaddr);
412
}
413
 
414
unsigned long mm_ptov (unsigned long paddr)
415
{
416
        int i;
417
        unsigned long offset = 0;
418
 
419
        for (i = 0; i < boot_info.num_memory; i++)
420
        {
421
                if (paddr >= boot_info.memory[i].addr &&
422
                    paddr < (boot_info.memory[i].addr
423
                             + boot_info.memory[i].size)) {
424
#ifdef DEBUGPV
425
                        printk ("PTOV(%lx)=%lx\n", paddr,
426
                                (paddr - boot_info.memory[i].addr) + offset);
427
#endif
428
                        return (paddr - boot_info.memory[i].addr) + offset;
429
                } else
430
                        offset += boot_info.memory[i].size;
431
        }
432
 
433
        /*
434
         * assume that the kernel virtual address is the same as the
435
         * physical address.
436
         *
437
         * This should be reasonable in most situations:
438
         *  1) They shouldn't be dereferencing the virtual address
439
         *     unless they are sure that it is valid from kernel space.
440
         *  2) The only usage I see so far is converting a page table
441
         *     reference to some non-FASTMEM address space when freeing
442
         *     mmaped "/dev/mem" pages.  These addresses are just passed
443
         *     to "free_page", which ignores addresses that aren't in
444
         *     the memory list anyway.
445
         *
446
         */
447
 
448
        /*
449
         * if on an amiga and address is in first 16M, move it
450
         * to the ZTWO_ADDR range
451
         */
452
        if (MACH_IS_AMIGA && paddr < 16*1024*1024)
453
                return ZTWO_VADDR(paddr);
454
        return paddr;
455
}
456
 
457
/* invalidate page in both caches */
458
#define clear040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
459
                                              "nop\n\t"\
460
                                              ".word 0xf4d0"\
461
                                              /* CINVP I/D (a0) */\
462
                                              : : "g" ((paddr))\
463
                                              : "a0")
464
 
465
/* invalidate page in i-cache */
466
#define cleari040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
467
                                               /* CINVP I (a0) */\
468
                                               "nop\n\t"\
469
                                               ".word 0xf490"\
470
                                               : : "g" ((paddr))\
471
                                               : "a0")
472
 
473
/* push page in both caches */
474
#define push040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
475
                                              "nop\n\t"\
476
                                             ".word 0xf4f0"\
477
                                             /* CPUSHP I/D (a0) */\
478
                                             : : "g" ((paddr))\
479
                                             : "a0")
480
 
481
/* push and invalidate page in both caches */
482
#define pushcl040(paddr) do { push040((paddr));\
483
                              if (CPU_IS_060) clear040((paddr));\
484
                         } while(0)
485
 
486
/* push page in both caches, invalidate in i-cache */
487
#define pushcli040(paddr) do { push040((paddr));\
488
                               if (CPU_IS_060) cleari040((paddr));\
489
                          } while(0)
490
 
491
/* push page defined by virtual address in both caches */
492
#define pushv040(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
493
                                              /* ptestr (a0) */\
494
                                              "nop\n\t"\
495
                                              ".word 0xf568\n\t"\
496
                                              /* movec mmusr,d0 */\
497
                                              ".long 0x4e7a0805\n\t"\
498
                                              "andw #0xf000,%/d0\n\t"\
499
                                              "movel %/d0,%/a0\n\t"\
500
                                              /* CPUSHP I/D (a0) */\
501
                                              "nop\n\t"\
502
                                              ".word 0xf4f0"\
503
                                              : : "g" ((vaddr))\
504
                                              : "a0", "d0")
505
 
506
/* push page defined by virtual address in both caches */
507
#define pushv060(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
508
                                              /* plpar (a0) */\
509
                                              ".word 0xf5c8\n\t"\
510
                                              /* CPUSHP I/D (a0) */\
511
                                              ".word 0xf4f0"\
512
                                              : : "g" ((vaddr))\
513
                                              : "a0")
514
 
515
 
516
/*
517
 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
518
 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
519
 * Hit every page until there is a page or less to go. Hit the next page,
520
 * and the one after that if the range hits it.
521
 */
522
/* ++roman: A little bit more care is required here: The CINVP instruction
523
 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
524
 * and the end of the region must be treated differently if they are not
525
 * exactly at the beginning or end of a page boundary. Else, maybe too much
526
 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
527
 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
528
 * for discovering the problem!)
529
 */
530
/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
531
 * the DPI bit in the CACR; would it cause problems with temporarily changing
532
 * this?). So we have to push first and then additionally to invalidate.
533
 */
534
 
535
/*
536
 * cache_clear() semantics: Clear any cache entries for the area in question,
537
 * without writing back dirty entries first. This is useful if the data will
538
 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
539
 * _physical_ address.
540
 */
541
 
542
void cache_clear (unsigned long paddr, int len)
543
{
544
    if (CPU_IS_040_OR_060) {
545
        /*
546
         * cwe need special treatment for the first page, in case it
547
         * is not page-aligned.
548
         */
549
        if (paddr & (PAGE_SIZE - 1)){
550
            pushcl040(paddr);
551
            if (len <= PAGE_SIZE){
552
                if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
553
                    pushcl040(paddr + len - 1);
554
                }
555
                return;
556
            }else{
557
                len -=PAGE_SIZE;
558
                paddr += PAGE_SIZE;
559
            }
560
        }
561
 
562
        while (len > PAGE_SIZE) {
563
#if 0
564
            pushcl040(paddr);
565
#else
566
            clear040(paddr);
567
#endif
568
            len -= PAGE_SIZE;
569
            paddr += PAGE_SIZE;
570
        }
571
        if (len > 0) {
572
            pushcl040(paddr);
573
            if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
574
                /* a page boundary gets crossed at the end */
575
                pushcl040(paddr + len - 1);
576
            }
577
        }
578
    }
579
    else /* 68030 or 68020 */
580
        asm volatile ("movec %/cacr,%/d0\n\t"
581
                      "oriw %0,%/d0\n\t"
582
                      "movec %/d0,%/cacr"
583
                      : : "i" (FLUSH_I_AND_D)
584
                      : "d0");
585
}
586
 
587
 
588
/*
589
 * cache_push() semantics: Write back any dirty cache data in the given area,
590
 * and invalidate the range in the instruction cache. It needs not (but may)
591
 * invalidate those entries also in the data cache. The range is defined by a
592
 * _physical_ address.
593
 */
594
 
595
void cache_push (unsigned long paddr, int len)
596
{
597
    if (CPU_IS_040_OR_060) {
598
        /*
599
         * on 68040 or 68060, push cache lines for pages in the range;
600
         * on the '040 this also invalidates the pushed lines, but not on
601
         * the '060!
602
         */
603
        while (len > PAGE_SIZE) {
604
            pushcli040(paddr);
605
            len -= PAGE_SIZE;
606
            paddr += PAGE_SIZE;
607
            }
608
        if (len > 0) {
609
            pushcli040(paddr);
610
            if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
611
                /* a page boundary gets crossed at the end */
612
                pushcli040(paddr + len - 1);
613
                }
614
            }
615
        }
616
 
617
 
618
    /*
619
     * 68030/68020 have no writeback cache. On the other hand,
620
     * cache_push is actually a superset of cache_clear (the lines
621
     * get written back and invalidated), so we should make sure
622
     * to perform the corresponding actions. After all, this is getting
623
     * called in places where we've just loaded code, or whatever, so
624
     * flushing the icache is appropriate; flushing the dcache shouldn't
625
     * be required.
626
     */
627
    else /* 68030 or 68020 */
628
        asm volatile ("movec %/cacr,%/d0\n\t"
629
                      "oriw %0,%/d0\n\t"
630
                      "movec %/d0,%/cacr"
631
                      : : "i" (FLUSH_I)
632
                      : "d0");
633
}
634
 
635
 
636
/*
637
 * cache_push_v() semantics: Write back any dirty cache data in the given
638
 * area, and invalidate those entries at least in the instruction cache. This
639
 * is intended to be used after data has been written that can be executed as
640
 * code later. The range is defined by a _user_mode_ _virtual_ address  (or,
641
 * more exactly, the space is defined by the %sfc/%dfc register.)
642
 */
643
 
644
void cache_push_v (unsigned long vaddr, int len)
645
{
646
    if (CPU_IS_040) {
647
        /* on 68040, push cache lines for pages in the range */
648
        while (len > PAGE_SIZE) {
649
            pushv040(vaddr);
650
            len -= PAGE_SIZE;
651
            vaddr += PAGE_SIZE;
652
            }
653
        if (len > 0) {
654
            pushv040(vaddr);
655
            if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
656
                /* a page boundary gets crossed at the end */
657
                pushv040(vaddr + len - 1);
658
                }
659
            }
660
        }
661
    else if (CPU_IS_060) {
662
        /* on 68040, push cache lines for pages in the range */
663
        while (len > PAGE_SIZE) {
664
            pushv060(vaddr);
665
            len -= PAGE_SIZE;
666
            vaddr += PAGE_SIZE;
667
        }
668
        if (len > 0) {
669
            pushv060(vaddr);
670
            if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
671
                /* a page boundary gets crossed at the end */
672
                pushv060(vaddr + len - 1);
673
            }
674
        }
675
    }
676
    /* 68030/68020 have no writeback cache; still need to clear icache. */
677
    else /* 68030 or 68020 */
678
        asm volatile ("movec %/cacr,%/d0\n\t"
679
                      "oriw %0,%/d0\n\t"
680
                      "movec %/d0,%/cacr"
681
                      : : "i" (FLUSH_I)
682
                      : "d0");
683
}
684
 
685
#undef clear040
686
#undef cleari040
687
#undef push040
688
#undef pushcl040
689
#undef pushcli040
690
#undef pushv040
691
#undef pushv060
692
 
693
unsigned long mm_phys_to_virt (unsigned long addr)
694
{
695
    return PTOV (addr);
696
}
697
 
698
int mm_end_of_chunk (unsigned long addr, int len)
699
{
700
        int i;
701
 
702
        for (i = 0; i < boot_info.num_memory; i++)
703
                if (boot_info.memory[i].addr + boot_info.memory[i].size
704
                    == addr + len)
705
                        return 1;
706
        return 0;
707
}
708
 
709
/* Map some physical address range into the kernel address space. The
710
 * code is copied and adapted from map_chunk().
711
 */
712
 
713
unsigned long kernel_map(unsigned long paddr, unsigned long size,
714
                         int nocacheflag, unsigned long *memavailp )
715
{
716
#define STEP_SIZE       (256*1024)
717
 
718
        static unsigned long vaddr = 0xe0000000; /* safe place */
719
        unsigned long physaddr, retaddr;
720
        pte_t *ktablep = NULL;
721
        pmd_t *kpointerp;
722
        pgd_t *page_dir;
723
        int pindex;   /* index into pointer table */
724
        int prot;
725
 
726
        /* Round down 'paddr' to 256 KB and adjust size */
727
        physaddr = paddr & ~(STEP_SIZE-1);
728
        size += paddr - physaddr;
729
        retaddr = vaddr + (paddr - physaddr);
730
        paddr = physaddr;
731
        /* Round up the size to 256 KB. It doesn't hurt if too much is
732
         * mapped... */
733
        size = (size + STEP_SIZE - 1) & ~(STEP_SIZE-1);
734
 
735
        if (CPU_IS_040_OR_060) {
736
                prot = _PAGE_PRESENT | _PAGE_GLOBAL040;
737
                switch( nocacheflag ) {
738
                  case KERNELMAP_FULL_CACHING:
739
                        prot |= _PAGE_CACHE040;
740
                        break;
741
                  case KERNELMAP_NOCACHE_SER:
742
                  default:
743
                        prot |= _PAGE_NOCACHE_S;
744
                        break;
745
                  case KERNELMAP_NOCACHE_NONSER:
746
                        prot |= _PAGE_NOCACHE;
747
                        break;
748
                  case KERNELMAP_NO_COPYBACK:
749
                        prot |= _PAGE_CACHE040W;
750
                        /* prot |= 0; */
751
                        break;
752
                }
753
        } else
754
                prot = _PAGE_PRESENT |
755
                           ((nocacheflag == KERNELMAP_FULL_CACHING ||
756
                                 nocacheflag == KERNELMAP_NO_COPYBACK) ? 0 : _PAGE_NOCACHE030);
757
 
758
        page_dir = pgd_offset_k(vaddr);
759
        if (pgd_present(*page_dir)) {
760
                kpointerp = (pmd_t *)pgd_page(*page_dir);
761
                pindex = (vaddr >> 18) & 0x7f;
762
                if (pindex != 0 && CPU_IS_040_OR_060) {
763
                        if (pmd_present(*kpointerp))
764
                                ktablep = (pte_t *)pmd_page(*kpointerp);
765
                        else {
766
                                ktablep = kernel_page_table (memavailp);
767
                                /* Make entries invalid */
768
                                memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
769
                                pmd_set(kpointerp,ktablep);
770
                        }
771
                        ktablep += (pindex & 15)*64;
772
                }
773
        }
774
        else {
775
                /* we need a new pointer table */
776
                kpointerp = get_kpointer_table ();
777
                pgd_set(page_dir, (pmd_t *)kpointerp);
778
                memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
779
                pindex = 0;
780
        }
781
 
782
        for (physaddr = paddr; physaddr < paddr + size; vaddr += STEP_SIZE) {
783
 
784
                if (pindex > 127) {
785
                        /* we need a new pointer table */
786
                        kpointerp = get_kpointer_table ();
787
                        pgd_set(pgd_offset_k(vaddr), (pmd_t *)kpointerp);
788
                        memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
789
                        pindex = 0;
790
                }
791
 
792
                if (CPU_IS_040_OR_060) {
793
                        int i;
794
                        unsigned long ktable;
795
 
796
                        /*
797
                         * 68040, use page tables pointed to by the
798
                         * kernel pointer table.
799
                         */
800
 
801
                        if ((pindex & 15) == 0) {
802
                                /* Need new page table every 4M on the '040 */
803
                                ktablep = kernel_page_table (memavailp);
804
                                /* Make entries invalid */
805
                                memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
806
                        }
807
 
808
                        ktable = VTOP(ktablep);
809
 
810
                        /*
811
                         * initialize section of the page table mapping
812
                         * this 1M portion.
813
                         */
814
                        for (i = 0; i < 64; i++) {
815
                                pte_val(*ktablep++) = physaddr | prot;
816
                                physaddr += PAGE_SIZE;
817
                        }
818
 
819
                        /*
820
                         * make the kernel pointer table point to the
821
                         * kernel page table.
822
                         */
823
 
824
                        ((unsigned long *)kpointerp)[pindex++] = ktable | _PAGE_TABLE;
825
 
826
                } else {
827
                        /*
828
                         * 68030, use early termination page descriptors.
829
                         * Each one points to 64 pages (256K).
830
                         */
831
                        ((unsigned long *)kpointerp)[pindex++] = physaddr | prot;
832
                        physaddr += 64 * PAGE_SIZE;
833
                }
834
        }
835
 
836
        return( retaddr );
837
}
838
 
839
 
840
static inline void set_cmode_pte( pmd_t *pmd, unsigned long address,
841
                                  unsigned long size, unsigned cmode )
842
{       pte_t *pte;
843
        unsigned long end;
844
 
845
        if (pmd_none(*pmd))
846
                return;
847
 
848
        pte = pte_offset( pmd, address );
849
        address &= ~PMD_MASK;
850
        end = address + size;
851
        if (end >= PMD_SIZE)
852
                end = PMD_SIZE;
853
 
854
        for( ; address < end; pte++ ) {
855
                pte_val(*pte) = (pte_val(*pte) & ~_PAGE_NOCACHE) | cmode;
856
                address += PAGE_SIZE;
857
        }
858
}
859
 
860
 
861
static inline void set_cmode_pmd( pgd_t *dir, unsigned long address,
862
                                  unsigned long size, unsigned cmode )
863
{
864
        pmd_t *pmd;
865
        unsigned long end;
866
 
867
        if (pgd_none(*dir))
868
                return;
869
 
870
        pmd = pmd_offset( dir, address );
871
        address &= ~PGDIR_MASK;
872
        end = address + size;
873
        if (end > PGDIR_SIZE)
874
                end = PGDIR_SIZE;
875
 
876
        if ((pmd_val(*pmd) & _DESCTYPE_MASK) == _PAGE_PRESENT) {
877
                /* 68030 early termination descriptor */
878
                pmd_val(*pmd) = (pmd_val(*pmd) & ~_PAGE_NOCACHE) | cmode;
879
                return;
880
        }
881
        else {
882
                /* "normal" tables */
883
                for( ; address < end; pmd++ ) {
884
                        set_cmode_pte( pmd, address, end - address, cmode );
885
                        address = (address + PMD_SIZE) & PMD_MASK;
886
                }
887
        }
888
}
889
 
890
 
891
/*
892
 * Set new cache mode for some kernel address space.
893
 * The caller must push data for that range itself, if such data may already
894
 * be in the cache.
895
 */
896
 
897
void kernel_set_cachemode( unsigned long address, unsigned long size,
898
                                                   unsigned cmode )
899
{
900
        pgd_t *dir = pgd_offset_k( address );
901
        unsigned long end = address + size;
902
 
903
        if (CPU_IS_040_OR_060) {
904
                switch( cmode ) {
905
                  case KERNELMAP_FULL_CACHING:
906
                        cmode = _PAGE_CACHE040;
907
                        break;
908
                  case KERNELMAP_NOCACHE_SER:
909
                  default:
910
                        cmode = _PAGE_NOCACHE_S;
911
                        break;
912
                  case KERNELMAP_NOCACHE_NONSER:
913
                        cmode = _PAGE_NOCACHE;
914
                        break;
915
                  case KERNELMAP_NO_COPYBACK:
916
                        cmode = _PAGE_CACHE040W;
917
                        break;
918
                }
919
        } else
920
                cmode = ((cmode == KERNELMAP_FULL_CACHING ||
921
                                  cmode == KERNELMAP_NO_COPYBACK)    ?
922
 
923
 
924
        for( ; address < end; dir++ ) {
925
                set_cmode_pmd( dir, address, end - address, cmode );
926
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
927
        }
928
        flush_tlb_all();
929
}
930
 
931
#else /* !NO_MM */
932
 
933
/*
934
 * The following two routines map from a physical address to a kernel
935
 * virtual address and vice versa.
936
 */
937
unsigned long mm_vtop (unsigned long vaddr)
938
{
939
        return vaddr;
940
}
941
 
942
unsigned long mm_ptov (unsigned long paddr)
943
{
944
        return paddr;
945
}
946
 
947
 
948
/*
949
 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
950
 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
951
 * Hit every page until there is a page or less to go. Hit the next page,
952
 * and the one after that if the range hits it.
953
 */
954
/* ++roman: A little bit more care is required here: The CINVP instruction
955
 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
956
 * and the end of the region must be treated differently if they are not
957
 * exactly at the beginning or end of a page boundary. Else, maybe too much
958
 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
959
 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
960
 * for discovering the problem!)
961
 */
962
/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
963
 * the DPI bit in the CACR; would it cause problems with temporarily changing
964
 * this?). So we have to push first and then additionally to invalidate.
965
 */
966
 
967
/*
968
 * cache_clear() semantics: Clear any cache entries for the area in question,
969
 * without writing back dirty entries first. This is useful if the data will
970
 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
971
 * _physical_ address.
972
 */
973
 
974
void cache_clear (unsigned long paddr, int len)
975
{
976
}
977
 
978
 
979
/*
980
 * cache_push() semantics: Write back any dirty cache data in the given area,
981
 * and invalidate the range in the instruction cache. It needs not (but may)
982
 * invalidate those entries also in the data cache. The range is defined by a
983
 * _physical_ address.
984
 */
985
 
986
void cache_push (unsigned long paddr, int len)
987
{
988
}
989
 
990
 
991
/*
992
 * cache_push_v() semantics: Write back any dirty cache data in the given
993
 * area, and invalidate those entries at least in the instruction cache. This
994
 * is intended to be used after data has been written that can be executed as
995
 * code later. The range is defined by a _user_mode_ _virtual_ address  (or,
996
 * more exactly, the space is defined by the %sfc/%dfc register.)
997
 */
998
 
999
void cache_push_v (unsigned long vaddr, int len)
1000
{
1001
}
1002
 
1003
unsigned long mm_phys_to_virt (unsigned long addr)
1004
{
1005
    return PTOV (addr);
1006
}
1007
 
1008
/* Map some physical address range into the kernel address space. The
1009
 * code is copied and adapted from map_chunk().
1010
 */
1011
 
1012
unsigned long kernel_map(unsigned long paddr, unsigned long size,
1013
                         int nocacheflag, unsigned long *memavailp )
1014
{
1015
        return paddr;
1016
}
1017
 
1018
 
1019
void kernel_set_cachemode( unsigned long address, unsigned long size,
1020
                                                   unsigned cmode )
1021
{
1022
}
1023
 
1024
unsigned long alloc_kernel_stack()
1025
{
1026
        unsigned long rval = __get_free_page(GFP_KERNEL);
1027
#ifdef DEBUG
1028
        printk("+++ alloc_kernel_stack: 0x%8x\n", rval);
1029
        stack_trace();
1030
        show_free_areas();
1031
#endif
1032
        return rval;
1033
}
1034
 
1035
void free_kernel_stack(unsigned long ksp)
1036
{
1037
#ifdef DEBUG
1038
        printk("--- free_kernel_stack: 0x%8x\n", ksp);
1039
        show_free_areas();
1040
#endif
1041
        free_page(ksp);
1042
#ifdef DEBUG
1043
        printk(" page freed\n");
1044
        show_free_areas();
1045
#endif
1046
}
1047
 
1048
#ifdef CONFIG_CYVH
1049
#define ROMA_START      0xfec00000
1050
#define ROMB_START      0xfee00000
1051
#define ROMA_END        ROMB_START
1052
#define ROMB_END        0xff000000
1053
#endif
1054
 
1055
int is_in_rom(unsigned long addr)
1056
{
1057
#if 0
1058
        return ((addr >= ROMA_START) && (addr < ROMA_END))
1059
                || ((addr >= ROMB_START) && (addr < ROMB_END));
1060
#else
1061
        extern int __data_rom_start, __data_rom_end;
1062
        return (addr >= (unsigned long)&__data_rom_start)
1063
                && (addr < (unsigned long)&__data_rom_end);
1064
#endif
1065
}
1066
 
1067
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.