OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [arch/] [ppc/] [mm/] [init.c] - Blame information for rev 1777

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1624 jcastillo
/*
2
 *  arch/ppc/mm/init.c
3
 *
4
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5
 *  Ported to PPC by Gary Thomas
6
 */
7
 
8
#include <linux/config.h>
9
#include <linux/signal.h>
10
#include <linux/sched.h>
11
#include <linux/head.h>
12
#include <linux/kernel.h>
13
#include <linux/errno.h>
14
#include <linux/string.h>
15
#include <linux/types.h>
16
#include <linux/ptrace.h>
17
#include <linux/mman.h>
18
#include <linux/mm.h>
19
#include <linux/swap.h>
20
 
21
#define SHOW_FAULTS
22
#undef  SHOW_FAULTS
23
 
24
#define SHOW_INVALIDATES
25
#undef  SHOW_INVALIDATES
26
 
27
#include <asm/pgtable.h>
28
 
29
extern pgd_t swapper_pg_dir[1024*8];
30
 
31
extern void die_if_kernel(char *,struct pt_regs *,long);
32
extern void show_net_buffers(void);
33
 
34
/*
35
 * BAD_PAGE is the page that is used for page faults when linux
36
 * is out-of-memory. Older versions of linux just did a
37
 * do_exit(), but using this instead means there is less risk
38
 * for a process dying in kernel mode, possibly leaving a inode
39
 * unused etc..
40
 *
41
 * BAD_PAGETABLE is the accompanying page-table: it is initialized
42
 * to point to BAD_PAGE entries.
43
 *
44
 * ZERO_PAGE is a special page that is used for zero-initialized
45
 * data and COW.
46
 */
47
pte_t * __bad_pagetable(void)
48
{
49
        panic("__bad_pagetable");
50
}
51
pte_t __bad_page(void)
52
{
53
        panic("__bad_page");
54
}
55
unsigned long __zero_page(void)
56
{
57
        extern char empty_zero_page[PAGE_SIZE];
58
        bzero(empty_zero_page, PAGE_SIZE);
59
        return (unsigned long) empty_zero_page;
60
}
61
 
62
void show_mem(void)
63
{
64
        int i,free = 0,total = 0,reserved = 0;
65
        int shared = 0;
66
 
67
        printk("Mem-info:\n");
68
        show_free_areas();
69
        printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
70
        /*i = high_memory >> PAGE_SHIFT;*/
71
        i = MAP_NR(high_memory);
72
        while (i-- > 0) {
73
                total++;
74
                if (PageReserved(mem_map+i))
75
                        reserved++;
76
                else if (!mem_map[i].count)
77
                        free++;
78
                else
79
                        shared += mem_map[i].count-1;
80
        }
81
        printk("%d pages of RAM\n",total);
82
        printk("%d free pages\n",free);
83
        printk("%d reserved pages\n",reserved);
84
        printk("%d pages shared\n",shared);
85
        show_buffers();
86
#ifdef CONFIG_NET
87
        show_net_buffers();
88
#endif
89
}
90
 
91
extern unsigned long free_area_init(unsigned long, unsigned long);
92
 
93
/*
94
 * paging_init() sets up the page tables - note that the first 4MB are
95
 * already mapped by head.S.
96
 *
97
 * This routines also unmaps the page at virtual kernel address 0, so
98
 * that we can trap those pesky NULL-reference errors in the kernel.
99
 */
100
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
101
{
102
        return free_area_init(start_mem, end_mem);
103
}
104
 
105
void mem_init(unsigned long start_mem, unsigned long end_mem)
106
{
107
        int codepages = 0;
108
        int datapages = 0;
109
        unsigned long tmp;
110
        extern int etext;
111
 
112
        end_mem &= PAGE_MASK;
113
        high_memory = end_mem;
114
 
115
        /* mark usable pages in the mem_map[] */
116
        start_mem = PAGE_ALIGN(start_mem);
117
 
118
        for (tmp = KERNELBASE ; tmp < high_memory ; tmp += PAGE_SIZE)
119
        {
120
                if (tmp < start_mem)
121
                {
122
                        set_bit(PG_reserved, &mem_map[MAP_NR(tmp)].flags);
123
                        if (tmp < (unsigned long) &etext)
124
                        {
125
                                codepages++;
126
                        } else
127
                        {
128
                                datapages++;
129
                        }
130
                        continue;
131
                }
132
                clear_bit(PG_reserved, &mem_map[MAP_NR(tmp)].flags);
133
                mem_map[MAP_NR(tmp)].count = 1;
134
                free_page(tmp);
135
        }
136
        tmp = nr_free_pages << PAGE_SHIFT;
137
        printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n",
138
                tmp >> 10,
139
                ((int)high_memory - (int)KERNELBASE) >> 10,
140
                codepages << (PAGE_SHIFT-10),
141
                datapages << (PAGE_SHIFT-10));
142
        invalidate();
143
        return;
144
}
145
 
146
void si_meminfo(struct sysinfo *val)
147
{
148
        int i;
149
 
150
        i = ((int)high_memory & 0x00FFFFFF) >> PAGE_SHIFT;
151
        val->totalram = 0;
152
        val->sharedram = 0;
153
        val->freeram = nr_free_pages << PAGE_SHIFT;
154
        val->bufferram = buffermem;
155
        while (i-- > 0)  {
156
                if (PageReserved(mem_map+i))
157
                        continue;
158
                val->totalram++;
159
                if (!mem_map[i].count)
160
                        continue;
161
                val->sharedram += mem_map[i].count-1;
162
        }
163
        val->totalram <<= PAGE_SHIFT;
164
        val->sharedram <<= PAGE_SHIFT;
165
        return;
166
}
167
 
168
BAT BAT0 =
169
   {
170
        {
171
                0x80000000>>17,         /* bepi */
172
                BL_256M,                /* bl */
173
                1,                      /* vs -- supervisor mode valid */
174
                1,                      /* vp -- user mode valid */
175
        },
176
        {
177
                0x80000000>>17,         /* brpn */
178
                1,                      /* write-through */
179
                1,                      /* cache-inhibited */
180
                0,                       /* memory coherence */
181
                1,                      /* guarded */
182
                BPP_RW                  /* protection */
183
        }
184
   };
185
BAT BAT1 =
186
   {
187
        {
188
                0xC0000000>>17,         /* bepi */
189
                BL_256M,                /* bl */
190
                1,                      /* vs */
191
                1,                      /* vp */
192
        },
193
        {
194
                0xC0000000>>17,         /* brpn */
195
                1,                      /* w */
196
                1,                      /* i (cache disabled) */
197
                0,                       /* m */
198
                1,                      /* g */
199
                BPP_RW                  /* pp */
200
        }
201
   };
202
BAT BAT2 =
203
   {
204
        {
205
                0x90000000>>17,         /* bepi */
206
                BL_16M, /* this should be set to amount of phys ram */
207
                1,                      /* vs */
208
                0,                       /* vp */
209
        },
210
        {
211
                0x00000000>>17,         /* brpn */
212
                0,                       /* w */
213
                0,                       /* i */
214
                0,                       /* m */
215
                0,                       /* g */
216
                BPP_RW                  /* pp */
217
        }
218
   };
219
BAT BAT3 =
220
   {
221
        {
222
                0x00000000>>17,         /* bepi */
223
                BL_256M,                /* bl */
224
                0,                       /* vs */
225
                0,                       /* vp */
226
        },
227
        {
228
                0x00000000>>17,         /* brpn */
229
                1,                      /* w */
230
                1,                      /* i (cache disabled) */
231
                0,                       /* m */
232
                0,                       /* g */
233
                BPP_RW                  /* pp */
234
        }
235
   };
236
BAT TMP_BAT2 =
237
   { /* 0x9XXXXXXX -> 0x0XXXXXXX */
238
        {
239
                0x90000000>>17,         /* bepi */
240
                BL_256M,                /* bl */
241
                1,                      /* vs */
242
                1,                      /* vp */
243
        },
244
        {
245
                0x00000000>>17,         /* brpn */
246
                1,                      /* w */
247
                0,                       /* i (cache enabled) */
248
                0,                       /* m */
249
                0,                       /* g */
250
                BPP_RW                  /* pp */
251
        }
252
   };
253
 
254
unsigned long _SDR1;            /* Hardware SDR1 image */
255
PTE *Hash;
256
int Hash_size, Hash_mask;
257
unsigned long *end_of_DRAM;
258
int cache_is_copyback = 1;
259
int kernel_pages_are_copyback = 1;
260
/* Note: these need to be in 'data' so they live over the boot */
261
unsigned char *BeBox_IO_page = 0;
262
unsigned long isBeBox[2] = {0, 0};
263
 
264
#define NUM_MAPPINGS 128
265
struct
266
   {
267
        int va, pa, pg, task;
268
   } last_mappings[NUM_MAPPINGS];
269
int next_mapping = 0;
270
 
271
/* Generic linked list */
272
struct item
273
   {
274
        struct item *next;
275
   };
276
 
277
#ifndef NULL   
278
#define NULL 0
279
#endif
280
 
281
#define MAX_CONTEXTS    16
282
#define MAX_MMU_PAGES   8
283
 
284
static struct item _free_pages;
285
static char mmu_pages[(MAX_MMU_PAGES+1)*MMU_PAGE_SIZE];
286
 
287
/*
288
 * Routines to support generic linked lists.
289
 */
290
 
291
MMU_free_item(struct item *hdr, struct item *elem)
292
{
293
        if (hdr->next == (struct item *)NULL)
294
        { /* First item in list */
295
                elem->next = (struct item *)NULL;
296
        } else
297
        {
298
                elem->next = hdr->next;
299
        }
300
        hdr->next = elem;
301
}
302
 
303
struct item *
304
MMU_get_item(struct item *hdr)
305
{
306
        struct item *item;
307
        if ((item = hdr->next) != (struct item *)NULL)
308
        {
309
                item = hdr->next;
310
                hdr->next = item->next;
311
        }
312
        return (item);
313
}
314
 
315
/*
316
 * This code is called to create a minimal mapped environment.
317
 * It is called with the MMU on, but with only a BAT register
318
 * set up to cover the code/data.  After this routine runs,
319
 * the BAT mapping is withdrawn and all mappings must be complete.
320
 */
321
 
322
extern char _start[], _end[];
323
 
324
MMU_init()
325
{
326
        int i, p;
327
        SEGREG *segs;
328
        printk("MMU init - started\n");
329
        find_end_of_memory();
330
        printk("  Start at 0x%08X, End at 0x%08X, Hash at 0x%08X\n", _start, _end, Hash);
331
        _SDR1 = ((unsigned long)Hash & 0x00FFFFFF) | Hash_mask;
332
        p = (int)mmu_pages;
333
        p = (p + (MMU_PAGE_SIZE-1)) & ~(MMU_PAGE_SIZE-1);
334
        _free_pages.next = (struct item *)NULL;
335
        for (i = 0;  i < MAX_MMU_PAGES;  i++)
336
        {
337
                MMU_free_item(&_free_pages, (struct item *)p);
338
                p += MMU_PAGE_SIZE;
339
        }
340
        /* Force initial page tables */
341
#if 0   
342
        swapper_pg_dir = (pgd_t *)MMU_get_page();
343
#endif  
344
        init_task.tss.pg_tables = (unsigned long *)swapper_pg_dir;
345
        /* Segment registers */
346
        segs = init_task.tss.segs;
347
        for (i = 0;  i < 16;  i++)
348
        {
349
                segs[i].ks = 0;
350
                segs[i].kp = 1;
351
                segs[i].vsid = i;
352
        }
353
        /* Map kernel TEXT+DATA+BSS */
354
        end_of_DRAM = (unsigned long *)Hash;
355
        /* Hard map in any special local resources */
356
        if (isBeBox[0])
357
        {
358
                /* Map in one page for the BeBox motherboard I/O */
359
                end_of_DRAM = (unsigned long *)((unsigned long)end_of_DRAM - MMU_PAGE_SIZE);
360
#if 0           
361
                BeBox_IO_page = (unsigned char *)0x7FFFF000;
362
#endif
363
                BeBox_IO_page = (unsigned char *)end_of_DRAM;
364
                MMU_map_page(&init_task.tss, BeBox_IO_page, 0x7FFFF000, PAGE_KERNEL);
365
                MMU_disable_cache_for_page(&init_task.tss, BeBox_IO_page);
366
        }
367
        /* Other parts of the kernel expect ALL RAM to be mapped */
368
        for (i = (int)_start;  i < (int)end_of_DRAM;  i += MMU_PAGE_SIZE)
369
        {
370
                MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
371
        }
372
        /* Map hardware HASH table */
373
        for (i = (int)Hash;  i < (int)Hash+Hash_size;  i += MMU_PAGE_SIZE)
374
        {
375
                MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
376
        }
377
#if 0 /* I'm not sure this is necessary */
378
        /* Clear all DRAM not explicitly used by kernel */
379
        bzero(_end, (unsigned long)end_of_DRAM-(unsigned long)_end);
380
#endif
381
        printk("MMU init - done!\n");
382
}
383
 
384
pte *
385
MMU_get_page()
386
{
387
        pte *pg;
388
        if ((pg = (pte *)MMU_get_item(&_free_pages)))
389
        {
390
                bzero((char *)pg, MMU_PAGE_SIZE);
391
        }
392
        printk("MMU Allocate Page at %08X\n", pg);
393
        return(pg);
394
}
395
 
396
MMU_map_page(struct thread_struct *tss, unsigned long va, unsigned long pa, int flags)
397
{
398
        pte *pd, *pg;
399
#if 0
400
if (va < (unsigned long)0x90000000)
401
  printk("Thread: %x, Map VA: %08x -> PA: %08X, Flags: %x\n", tss, va, pa, flags);
402
#endif
403
        if ((pte **)tss->pg_tables == (pte **)NULL)
404
        { /* Allocate upper level page map */
405
                (pte **)tss->pg_tables = (pte **)MMU_get_page();
406
                if ((pte **)tss->pg_tables == (pte **)NULL)
407
                {
408
                        _panic("Out of MMU pages (PD)\n");
409
                }
410
        }
411
        /* Use upper 10 bits of VA to index the first level map */
412
        pd = ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK];
413
        pd = (pte *)((int)pd & 0xFFFFF000);
414
        if (pd == (pte *)NULL)
415
        { /* Need to allocate second-level table */
416
                pd = (pte *)MMU_get_page();
417
                if (pd == (pte *)NULL)
418
                {
419
                        _panic("Out of MMU pages (PG)\n");
420
                }
421
                ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK] = (pte *)((unsigned long)pd | _PAGE_TABLE);
422
        }
423
        /* Use middle 10 bits of VA to index the second-level map */
424
        pg = &pd[(va>>PT_SHIFT)&PT_MASK];
425
        *(long *)pg = 0;  /* Clear out entry */
426
        pg->page_num = pa>>PG_SHIFT;
427
        pg->flags = flags;
428
        MMU_hash_page(tss, va, pg);
429
}
430
 
431
/*
432
 * Insert(create) a hardware page table entry
433
 */
434
MMU_hash_page(struct thread_struct *tss, unsigned long va, pte *pg)
435
{
436
        int hash, page_index, segment, i, h, _h, api, vsid, perms;
437
        PTE *_pte, *empty, *slot;
438
        PTE *slot0, *slot1;
439
        extern char _etext;
440
/* TEMP */
441
if (va < KERNELBASE)
442
{
443
        last_mappings[next_mapping].va = va;
444
        last_mappings[next_mapping].pa = pg?*(int *)pg:0;
445
        last_mappings[next_mapping].pg = pg;
446
        last_mappings[next_mapping].task = current->pid;
447
        if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
448
}
449
/* TEMP */
450
        page_index = ((int)va & 0x0FFFF000) >> 12;
451
        segment = (unsigned int)va >> 28;
452
        api = page_index >> 10;
453
        vsid = ((SEGREG *)tss->segs)[segment].vsid;
454
        empty = slot = (PTE *)NULL;
455
        for (_h = 0;  _h < 2;  _h++)
456
        {
457
                hash = page_index ^ vsid;
458
                if (_h)
459
                {
460
                        hash = ~hash;  /* Secondary hash uses ones-complement */
461
                }
462
                hash &= 0x3FF | (Hash_mask << 10);
463
                hash *= 8;  /* Eight entries / hash bucket */
464
                _pte = &Hash[hash];
465
                /* Save slot addresses in case we have to purge */
466
                if (_h)
467
                {
468
                        slot1 = _pte;
469
                } else
470
                {
471
                        slot0 = _pte;
472
                }
473
                for (i = 0;  i < 8;  i++, _pte++)
474
                {
475
                        if (_pte->v && _pte->vsid == vsid && _pte->h == _h && _pte->api == api)
476
                        { /* Found it! */
477
                                h = _h;
478
                                slot = _pte;
479
                                goto found_it;
480
                        }
481
                        if ((empty == (PTE *)NULL) && !_pte->v)
482
                        {
483
                                h = _h;
484
                                empty = _pte;
485
                        }
486
                }
487
        }
488
        if (slot == (PTE *)NULL)
489
        {
490
                if (pg == (pte *)NULL)
491
                {
492
                        return (0);
493
                }
494
                if (empty == (PTE *)NULL)
495
                { /* Table is totally full! */
496
printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
497
printk("Slot0:\n");
498
_pte = slot0;
499
for (i = 0;  i < 8;  i++, _pte++)
500
{
501
        printk("  V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
502
}
503
printk("Slot1:\n");
504
_pte = slot1;
505
for (i = 0;  i < 8;  i++, _pte++)
506
{
507
        printk("  V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
508
}
509
printk("Last mappings:\n");
510
for (i = 0;  i < NUM_MAPPINGS;  i++)
511
{
512
        printk("  VA: %08x, PA: %08X, TASK: %08X\n",
513
                last_mappings[next_mapping].va,
514
                last_mappings[next_mapping].pa,
515
                last_mappings[next_mapping].task);
516
        if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
517
}
518
                        _panic("Hash table full!\n");
519
                }
520
                slot = empty;
521
        }
522
found_it:
523
#if 0
524
printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
525
#endif
526
        _tlbie(va); /* Clear TLB */
527
        if (pg)
528
        { /* Fill in table */
529
                slot->v = 1;
530
                slot->vsid = vsid;
531
                slot->h = h;
532
                slot->api = api;
533
                if (((pg->page_num << 12) & 0xF0000000) == KERNELBASE)
534
                {
535
                        slot->rpn = pg->page_num - (KERNELBASE>>12);
536
                } else
537
                {
538
                        slot->rpn = pg->page_num;
539
                }
540
                slot->r = 0;
541
                slot->c = 0;
542
                slot->i = 0;
543
                slot->g = 0;
544
                if (cache_is_copyback)
545
                {
546
                        if (kernel_pages_are_copyback || (pg->flags & _PAGE_USER) || (va < (unsigned long)&_etext))
547
                        { /* All User & Kernel TEXT pages are copy-back */
548
                                slot->w = 0;
549
                                slot->m = 1;
550
                        } else
551
                        { /* Kernel DATA pages are write-thru */
552
                                slot->w = 1;
553
                                slot->m = 0;
554
                        }
555
                } else
556
                {
557
                        slot->w = 1;
558
                        slot->m = 0;
559
                }
560
                if (pg->flags & _PAGE_USER)
561
                {
562
                        if (pg->flags & _PAGE_RW)
563
                        { /* Read/write page */
564
                                perms = PP_RWRW;
565
                        } else
566
                        { /* Read only page */
567
                                perms = PP_RWRX;
568
                                perms = PP_RXRX;
569
                        }
570
                } else
571
                { /* Kernel pages */
572
                        perms = PP_RWRW;
573
                        perms = PP_RWXX;
574
                }
575
#ifdef SHOW_FAULTS
576
if (va < KERNELBASE)
577
printk("VA: %08X, PA: %08X, Flags: %x, Perms: %d, Vsid: %x\n", va, pg->page_num<<12, pg->flags, perms, vsid);
578
#endif
579
                slot->pp = perms;
580
                return (0);
581
        } else
582
        { /* Pull entry from tables */
583
                int flags = 0;
584
                if (slot->r) flags |= _PAGE_ACCESSED;
585
                if (slot->c) flags |= _PAGE_DIRTY;
586
                slot->v = 0;
587
#ifdef SHOW_FAULTS
588
printk("Pull VA: %08X, Flags: %x\n", va, flags);
589
#endif
590
                return (flags);
591
        }
592
}
593
 
594
/*
595
 * Disable cache for a particular page
596
 */
597
MMU_disable_cache_for_page(struct thread_struct *tss, unsigned long va)
598
{
599
        int hash, page_index, segment, i, h, _h, api, vsid, perms;
600
        PTE *_pte, *empty, *slot;
601
        PTE *slot0, *slot1;
602
        extern char _etext;
603
        page_index = ((int)va & 0x0FFFF000) >> 12;
604
        segment = (unsigned int)va >> 28;
605
        api = page_index >> 10;
606
        vsid = ((SEGREG *)tss->segs)[segment].vsid;
607
        empty = slot = (PTE *)NULL;
608
        for (_h = 0;  _h < 2;  _h++)
609
        {
610
                hash = page_index ^ vsid;
611
                if (_h)
612
                {
613
                        hash = ~hash;  /* Secondary hash uses ones-complement */
614
                }
615
                hash &= 0x3FF | (Hash_mask << 10);
616
                hash *= 8;  /* Eight entries / hash bucket */
617
                _pte = &Hash[hash];
618
                /* Save slot addresses in case we have to purge */
619
                if (_h)
620
                {
621
                        slot1 = _pte;
622
                } else
623
                {
624
                        slot0 = _pte;
625
                }
626
                for (i = 0;  i < 8;  i++, _pte++)
627
                {
628
                        if (_pte->v && _pte->vsid == vsid && _pte->h == _h && _pte->api == api)
629
                        { /* Found it! */
630
                                h = _h;
631
                                slot = _pte;
632
                                goto found_it;
633
                        }
634
                        if ((empty == (PTE *)NULL) && !_pte->v)
635
                        {
636
                                h = _h;
637
                                empty = _pte;
638
                        }
639
                }
640
        }
641
found_it:
642
        _tlbie(va); /* Clear TLB */
643
        slot->i = 1;
644
        slot->m = 0;
645
}
646
 
647
/*
648
 * Invalidate a hardware [hash] page table entry
649
 * Note: this should never be called [currently] for kernel addresses.
650
 */
651
MMU_invalidate_page(struct mm_struct *mm, unsigned long va, pte *pg)
652
{
653
        int hash, page_index, segment, i, h, _h, api, vsid, perms;
654
        PTE *_pte, *slot;
655
        int flags = 0;
656
        page_index = ((int)va & 0x0FFFF000) >> 12;
657
        segment = (unsigned int)va >> 28;
658
        api = page_index >> 10;
659
        vsid = mm->context | segment;
660
        slot = (PTE *)NULL;
661
        for (_h = 0;  _h < 2;  _h++)
662
        {
663
                hash = page_index ^ vsid;
664
                if (_h)
665
                {
666
                        hash = ~hash;  /* Secondary hash uses ones-complement */
667
                }
668
                hash &= 0x3FF | (Hash_mask << 10);
669
                hash *= 8;  /* Eight entries / hash bucket */
670
                _pte = &Hash[hash];
671
                for (i = 0;  i < 8;  i++, _pte++)
672
                {
673
                        if (_pte->v && _pte->vsid == vsid && _pte->h == _h && _pte->api == api)
674
                        { /* Found it! */
675
                                _tlbie(va); /* Clear TLB */
676
                                if (_pte->r) flags |= _PAGE_ACCESSED;
677
                                if (_pte->c) flags |= _PAGE_DIRTY;
678
                                _pte->v = 0;
679
#ifdef SHOW_FAULTS
680
printk("Pull VA: %08X, Flags: %x\n", va, flags);
681
#endif
682
                                return (flags);
683
                        }
684
                }
685
        }
686
        return (flags);
687
}
688
 
689
/*
690
 * Invalidate the MMU [hardware] tables (for current task?)
691
 */
692
void
693
invalidate(void)
694
{
695
        int i, j, flags;
696
        unsigned long address;
697
        pgd_t *pgd;
698
        pte_t *_pte;
699
        static long _invalidates;
700
#ifdef SHOW_INVALIDATES
701
printk("invalidate()\n");
702
#endif
703
        _invalidates++;
704
#if 0 /* Unnecessary */
705
        _tlbia();  /* Flush TLB entries */
706
#endif
707
        pgd = pgd_offset(current->mm, 0);
708
        if (!pgd) return;  /* No map? */
709
        address = 0;
710
        for (i = 0 ; (i < PTRS_PER_PGD) && (address < KERNELBASE); i++)
711
        {
712
                if (*(long *)pgd)
713
                {
714
                        /* I know there are only two levels, but the macros don't */
715
                        _pte = pte_offset(pmd_offset(pgd,0),0);
716
                        if (_pte)
717
                        {
718
                                for (j = 0;  j < PTRS_PER_PTE;  j++)
719
                                {
720
                                        if (pte_present(*_pte))
721
                                        {
722
                                                flags = MMU_hash_page(&current->tss, address, 0);
723
                                                ((pte *)_pte)->flags |= flags;
724
                                        }
725
                                        _pte++;
726
                                        address += PAGE_SIZE;
727
                                }
728
                        } else
729
                        {
730
                                address += PAGE_SIZE*PTRS_PER_PTE;
731
                        }
732
                } else
733
                {
734
                        address += PAGE_SIZE*PTRS_PER_PTE;
735
                }
736
                pgd++;
737
        }
738
}
739
 
740
/*
741
 * Invalidate the MMU [hardware] tables (for current task?)
742
 */
743
void
744
flush_cache_mm(struct mm_struct *mm)
745
{
746
        int i, j, flags;
747
        unsigned long address;
748
        pgd_t *pgd;
749
        pte_t *_pte;
750
        static long _invalidates;
751
#ifdef SHOW_INVALIDATES
752
printk("invalidate_mm(%x)\n", mm);
753
#endif
754
if (!mm) return;
755
        _invalidates++;
756
#if 0 /* Unnecessary */
757
        _tlbia();  /* Flush TLB entries */
758
#endif
759
        pgd = pgd_offset(mm, 0);
760
        if (!pgd) return;  /* No map? */
761
        address = 0;
762
        for (i = 0 ; (i < PTRS_PER_PGD) && (address < KERNELBASE); i++)
763
        {
764
                if (*(long *)pgd)
765
                {
766
                        /* I know there are only two levels, but the macros don't */
767
                        _pte = pte_offset(pmd_offset(pgd,0),0);
768
                        if (_pte)
769
                        {
770
                                for (j = 0;  j < PTRS_PER_PTE;  j++)
771
                                {
772
                                        if (pte_present(*_pte))
773
                                        {
774
                                                flags = MMU_invalidate_page(mm, address, 0);
775
                                                ((pte *)_pte)->flags |= flags;
776
                                        }
777
                                        _pte++;
778
                                        address += PAGE_SIZE;
779
                                }
780
                        } else
781
                        {
782
                                address += PAGE_SIZE*PTRS_PER_PTE;
783
                        }
784
                } else
785
                {
786
                        address += PAGE_SIZE*PTRS_PER_PTE;
787
                }
788
                pgd++;
789
        }
790
}
791
 
792
/*
793
 * Invalidate the MMU [hardware] tables (for current task?)
794
 */
795
void
796
flush_cache_page(struct vm_area_struct *vma, long va)
797
{
798
        int i, j, flags;
799
        unsigned long address;
800
        pgd_t *pgd;
801
        pte_t *_pte;
802
        static long _invalidates;
803
        struct mm_struct *mm = vma->vm_mm;
804
#ifdef SHOW_INVALIDATES
805
printk("invalidate_page(%x[%x], %x)\n", vma, mm, va);
806
#endif
807
if (!mm) return;  /* In case VMA lookup fails */
808
        _invalidates++;
809
#if 0 /* Unnecessary */
810
        _tlbia();  /* Flush TLB entries */
811
#endif
812
/* Note: this could be MUCH better */
813
        pgd = pgd_offset(mm, 0);
814
        if (!pgd) return;  /* No map? */
815
        address = 0;
816
        for (i = 0 ; (i < PTRS_PER_PGD) && (address < KERNELBASE); i++)
817
        {
818
                if (*(long *)pgd)
819
                {
820
                        /* I know there are only two levels, but the macros don't */
821
                        _pte = pte_offset(pmd_offset(pgd,0),0);
822
                        if (_pte)
823
                        {
824
                                for (j = 0;  j < PTRS_PER_PTE;  j++)
825
                                {
826
                                        if ((va == address) && pte_present(*_pte))
827
                                        {
828
                                                flags = MMU_invalidate_page(mm, address, 0);
829
                                                ((pte *)_pte)->flags |= flags;
830
                                        }
831
                                        _pte++;
832
                                        address += PAGE_SIZE;
833
                                }
834
                        } else
835
                        {
836
                                address += PAGE_SIZE*PTRS_PER_PTE;
837
                        }
838
                } else
839
                {
840
                        address += PAGE_SIZE*PTRS_PER_PTE;
841
                }
842
                pgd++;
843
        }
844
}
845
 
846
/*
847
 * Invalidate the MMU [hardware] tables (for current task?)
848
 */
849
void
850
flush_cache_range(struct mm_struct *mm, unsigned long va_start, unsigned long va_end)
851
{
852
        int i, j, flags;
853
        unsigned long address;
854
        pgd_t *pgd;
855
        pte_t *_pte;
856
        static long _invalidates;
857
#ifdef SHOW_INVALIDATES
858
printk("invalidate_range(%x, %x, %x)\n", mm, va_start, va_end);
859
#endif
860
if (!mm) return;
861
        _invalidates++;
862
#if 0 /* Unnecessary */
863
        _tlbia();  /* Flush TLB entries */
864
#endif
865
/* Note: this could be MUCH better */
866
        pgd = pgd_offset(mm, 0);
867
        if (!pgd) return;  /* No map? */
868
        address = 0;
869
        for (i = 0 ; (i < PTRS_PER_PGD) && (address < KERNELBASE); i++)
870
        {
871
                if (*(long *)pgd)
872
                {
873
                        /* I know there are only two levels, but the macros don't */
874
                        _pte = pte_offset(pmd_offset(pgd,0),0);
875
                        if (_pte)
876
                        {
877
                                for (j = 0;  j < PTRS_PER_PTE;  j++)
878
                                {
879
                                        if ((va_start <= address) && (va_end > address) && pte_present(*_pte))
880
                                        {
881
                                                flags = MMU_invalidate_page(mm, address, 0);
882
                                                ((pte *)_pte)->flags |= flags;
883
                                        }
884
                                        _pte++;
885
                                        address += PAGE_SIZE;
886
                                }
887
                        } else
888
                        {
889
                                address += PAGE_SIZE*PTRS_PER_PTE;
890
                        }
891
                } else
892
                {
893
                        address += PAGE_SIZE*PTRS_PER_PTE;
894
                }
895
                pgd++;
896
        }
897
}
898
 
899
void
900
cache_mode(char *str, int *ints)
901
{
902
        cache_is_copyback = ints[0];
903
}
904
 
905
_verify_addr(long va)
906
{
907
        int hash, page_index, segment, i, h, _h, api, vsid, perms;
908
        struct thread_struct *tss = &current->tss;
909
        PTE *_pte, *empty, *slot;
910
        PTE *slot0, *slot1;
911
        page_index = ((int)va & 0x0FFFF000) >> 12;
912
        segment = (unsigned int)va >> 28;
913
        api = page_index >> 10;
914
        vsid = ((SEGREG *)tss->segs)[segment].vsid;
915
        empty = slot = (PTE *)NULL;
916
        printk("Segment = %x/%x\n", *(long *)&tss->segs[segment], _get_SRx(segment));
917
        for (_h = 0;  _h < 2;  _h++)
918
        {
919
                hash = page_index ^ vsid;
920
                if (_h)
921
                {
922
                        hash = ~hash;  /* Secondary hash uses ones-complement */
923
                }
924
                hash &= 0x3FF | (Hash_mask << 10);
925
                hash *= 8;  /* Eight entries / hash bucket */
926
                _pte = &Hash[hash];
927
/*              dump_buf(_pte, 64);*/
928
                for (i = 0;  i < 8;  i++, _pte++)
929
                {
930
                        if (_pte->v && _pte->vsid == vsid && _pte->h == _h && _pte->api == api)
931
                        { /* Found it! */
932
                                h = _h;
933
                                slot = _pte;
934
                                printk("Found at %x\n", slot);
935
                                goto found_it;
936
                        }
937
                        if ((empty == (PTE *)NULL) && !_pte->v)
938
                        {
939
                                h = _h;
940
                                empty = _pte;
941
                        }
942
                }
943
        }
944
found_it:
945
}
946
 
947
flush_cache_all()
948
{
949
        printk("flush_cache_all()\n");
950
        invalidate();
951
}
952
 
953
flush_tlb_all() {}
954
flush_tlb_mm() {}
955
flush_tlb_page() {}
956
flush_tlb_range() {}
957
flush_page_to_ram() {}
958
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.