OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [uclinux/] [uClinux-2.0.x/] [arch/] [m68k/] [mm/] [init.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
/*
2
 *  linux/arch/m68k/mm/init.c
3
 *
4
 *  Copyright (C) 1995  Hamish Macdonald
5
 */
6
 
7
#include <linux/config.h>
8
#include <linux/signal.h>
9
#include <linux/sched.h>
10
#include <linux/mm.h>
11
#include <linux/swap.h>
12
#include <linux/kernel.h>
13
#include <linux/string.h>
14
#include <linux/types.h>
15
#ifdef CONFIG_BLK_DEV_RAM
16
#include <linux/blk.h>
17
#endif
18
 
19
#include <asm/segment.h>
20
#include <asm/page.h>
21
#include <asm/pgtable.h>
22
#include <asm/system.h>
23
#include <asm/bootinfo.h>
24
#include <asm/machdep.h>
25
 
26
extern void die_if_kernel(char *,struct pt_regs *,long);
27
extern void init_kpointer_table(void);
28
extern void show_net_buffers(void);
29
extern unsigned long mm_phys_to_virt (unsigned long addr);
30
extern char *rd_start;
31
extern int rd_doload;
32
 
33
unsigned long ramdisk_length;
34
 
35
/*
36
 * BAD_PAGE is the page that is used for page faults when linux
37
 * is out-of-memory. Older versions of linux just did a
38
 * do_exit(), but using this instead means there is less risk
39
 * for a process dying in kernel mode, possibly leaving a inode
40
 * unused etc..
41
 *
42
 * BAD_PAGETABLE is the accompanying page-table: it is initialized
43
 * to point to BAD_PAGE entries.
44
 *
45
 * ZERO_PAGE is a special page that is used for zero-initialized
46
 * data and COW.
47
 */
48
static unsigned long empty_bad_page_table;
49
 
50
pte_t *__bad_pagetable(void)
51
{
52
    memset((void *)empty_bad_page_table, 0, PAGE_SIZE);
53
    return (pte_t *)empty_bad_page_table;
54
}
55
 
56
static unsigned long empty_bad_page;
57
 
58
pte_t __bad_page(void)
59
{
60
    memset ((void *)empty_bad_page, 0, PAGE_SIZE);
61
    return pte_mkdirty(mk_pte(empty_bad_page, PAGE_SHARED));
62
}
63
 
64
unsigned long empty_zero_page;
65
 
66
void show_mem(void)
67
{
68
    unsigned long i;
69
    int free = 0, total = 0, reserved = 0, nonshared = 0, shared = 0;
70
 
71
    printk("\nMem-info:\n");
72
    show_free_areas();
73
    printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
74
    i = high_memory >> PAGE_SHIFT;
75
    while (i-- > 0) {
76
        total++;
77
        if (PageReserved(mem_map+i))
78
            reserved++;
79
        else if (!mem_map[i].count)
80
            free++;
81
        else if (mem_map[i].count == 1)
82
            nonshared++;
83
        else
84
            shared += mem_map[i].count-1;
85
    }
86
    printk("%d pages of RAM\n",total);
87
    printk("%d free pages\n",free);
88
    printk("%d reserved pages\n",reserved);
89
    printk("%d pages nonshared\n",nonshared);
90
    printk("%d pages shared\n",shared);
91
    show_buffers();
92
#ifdef CONFIG_NET
93
    show_net_buffers();
94
#endif
95
}
96
 
97
#if 0 /* The 68030 doesn't care about reserved bits. */
98
/*
99
 * Bits to add to page descriptors for "normal" caching mode.
100
 * For 68020/030 this is 0.
101
 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
102
 */
103
unsigned long mm_cachebits;
104
#endif
105
 
106
pte_t *kernel_page_table (unsigned long *memavailp)
107
{
108
        pte_t *ptablep;
109
 
110
        ptablep = (pte_t *)*memavailp;
111
        *memavailp += PAGE_SIZE;
112
 
113
        nocache_page ((unsigned long)ptablep);
114
 
115
        return ptablep;
116
}
117
 
118
static unsigned long map_chunk (unsigned long addr,
119
                                unsigned long size,
120
                                unsigned long *memavailp)
121
{
122
#define ONEMEG  (1024*1024)
123
#define L3TREESIZE (256*1024)
124
 
125
        int is040 = m68k_is040or060;
126
        static unsigned long mem_mapped = 0;
127
        static unsigned long virtaddr = 0;
128
        static pte_t *ktablep = NULL;
129
        unsigned long *kpointerp;
130
        unsigned long physaddr;
131
        extern pte_t *kpt;
132
        int pindex;   /* index into pointer table */
133
        pgd_t *page_dir = pgd_offset_k (virtaddr);
134
 
135
        if (!pgd_present (*page_dir)) {
136
                /* we need a new pointer table */
137
                kpointerp = (unsigned long *) get_kpointer_table ();
138
                pgd_set (page_dir, (pmd_t *) kpointerp);
139
                memset (kpointerp, 0, PTRS_PER_PMD * sizeof (pmd_t));
140
        }
141
        else
142
                kpointerp = (unsigned long *) pgd_page (*page_dir);
143
 
144
        /*
145
         * pindex is the offset into the pointer table for the
146
         * descriptors for the current virtual address being mapped.
147
         */
148
        pindex = (virtaddr >> 18) & 0x7f;
149
 
150
#ifdef DEBUG
151
        printk ("mm=%ld, kernel_pg_dir=%p, kpointerp=%p, pindex=%d\n",
152
                mem_mapped, kernel_pg_dir, kpointerp, pindex);
153
#endif
154
 
155
        /*
156
         * if this is running on an '040, we already allocated a page
157
         * table for the first 4M.  The address is stored in kpt by
158
         * arch/head.S
159
         *
160
         */
161
        if (is040 && mem_mapped == 0)
162
                ktablep = kpt;
163
 
164
        for (physaddr = addr;
165
             physaddr < addr + size;
166
             mem_mapped += L3TREESIZE, virtaddr += L3TREESIZE) {
167
 
168
#ifdef DEBUG
169
                printk ("pa=%#lx va=%#lx ", physaddr, virtaddr);
170
#endif
171
 
172
                if (pindex > 127 && mem_mapped >= 32*ONEMEG) {
173
                        /* we need a new pointer table every 32M */
174
#ifdef DEBUG
175
                        printk ("[new pointer]");
176
#endif
177
 
178
                        kpointerp = (unsigned long *)get_kpointer_table ();
179
                        pgd_set(pgd_offset_k(virtaddr), (pmd_t *)kpointerp);
180
                        pindex = 0;
181
                }
182
 
183
                if (is040) {
184
                        int i;
185
                        unsigned long ktable;
186
 
187
                        /* Don't map the first 4 MB again. The pagetables
188
                         * for this range have already been initialized
189
                         * in boot/head.S. Otherwise the pages used for
190
                         * tables would be reinitialized to copyback mode.
191
                         */
192
 
193
                        if (mem_mapped < 4 * ONEMEG)
194
                        {
195
#ifdef DEBUG
196
                                printk ("Already initialized\n");
197
#endif
198
                                physaddr += L3TREESIZE;
199
                                pindex++;
200
                                continue;
201
                        }
202
#ifdef DEBUG
203
                        printk ("[setup table]");
204
#endif
205
 
206
                        /*
207
                         * 68040, use page tables pointed to by the
208
                         * kernel pointer table.
209
                         */
210
 
211
                        if ((pindex & 15) == 0) {
212
                                /* Need new page table every 4M on the '040 */
213
#ifdef DEBUG
214
                                printk ("[new table]");
215
#endif
216
                                ktablep = kernel_page_table (memavailp);
217
                        }
218
 
219
                        ktable = VTOP(ktablep);
220
 
221
                        /*
222
                         * initialize section of the page table mapping
223
                         * this 256K portion.
224
                         */
225
                        for (i = 0; i < 64; i++) {
226
                                pte_val(ktablep[i]) = physaddr | _PAGE_PRESENT
227
                                        | _PAGE_CACHE040 | _PAGE_GLOBAL040;
228
                                physaddr += PAGE_SIZE;
229
                        }
230
                        ktablep += 64;
231
 
232
                        /*
233
                         * make the kernel pointer table point to the
234
                         * kernel page table.  Each entries point to a
235
                         * 64 entry section of the page table.
236
                         */
237
 
238
                        kpointerp[pindex++] = ktable | _PAGE_TABLE;
239
                } else {
240
                        /*
241
                         * 68030, use early termination page descriptors.
242
                         * Each one points to 64 pages (256K).
243
                         */
244
#ifdef DEBUG
245
                        printk ("[early term] ");
246
#endif
247
                        if (virtaddr == 0UL) {
248
                                /* map the first 256K using a 64 entry
249
                                 * 3rd level page table.
250
                                 * UNMAP the first entry to trap
251
                                 * zero page (NULL pointer) references
252
                                 */
253
                                int i;
254
                                unsigned long *tbl;
255
 
256
                                tbl = (unsigned long *)get_kpointer_table();
257
 
258
                                kpointerp[pindex++] = VTOP(tbl) | _PAGE_TABLE;
259
 
260
                                for (i = 0; i < 64; i++, physaddr += PAGE_SIZE)
261
                                        tbl[i] = physaddr | _PAGE_PRESENT;
262
 
263
                                /* unmap the zero page */
264
                                tbl[0] = 0;
265
                        } else {
266
                                /* not the first 256K */
267
                                kpointerp[pindex++] = physaddr | _PAGE_PRESENT;
268
#ifdef DEBUG
269
                                printk ("%lx=%lx ", VTOP(&kpointerp[pindex-1]),
270
                                        kpointerp[pindex-1]);
271
#endif
272
                                physaddr += 64 * PAGE_SIZE;
273
                        }
274
                }
275
#ifdef DEBUG
276
                printk ("\n");
277
#endif
278
        }
279
 
280
        return mem_mapped;
281
}
282
 
283
extern unsigned long free_area_init(unsigned long, unsigned long);
284
 
285
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
286
 
287
/*
288
 * paging_init() continues the virtual memory environment setup which
289
 * was begun by the code in arch/head.S.
290
 * The parameters are pointers to where to stick the starting and ending
291
 * addresses  of available kernel virtual memory.
292
 */
293
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
294
{
295
        int chunk;
296
        unsigned long mem_avail = 0;
297
        /* pointer to page table for kernel stacks */
298
        extern unsigned long availmem;
299
 
300
#ifdef DEBUG
301
        {
302
                extern pte_t *kpt;
303
                printk ("start of paging_init (%p, %p, %lx, %lx, %lx)\n",
304
                        kernel_pg_dir, kpt, availmem, start_mem, end_mem);
305
        }
306
#endif
307
 
308
        init_kpointer_table();
309
#if 0
310
        /*
311
         * Setup cache bits
312
         */
313
        mm_cachebits = m68k_is040or060 ? _PAGE_CACHE040 : 0;
314
 
315
        /* Initialize protection map.  */
316
        protection_map[0] = PAGE_READONLY;
317
        protection_map[1] = PAGE_READONLY;
318
        protection_map[2] = PAGE_COPY;
319
        protection_map[3] = PAGE_COPY;
320
        protection_map[4] = PAGE_READONLY;
321
        protection_map[5] = PAGE_READONLY;
322
        protection_map[6] = PAGE_COPY;
323
        protection_map[7] = PAGE_COPY;
324
        protection_map[8] = PAGE_READONLY;
325
        protection_map[9] = PAGE_READONLY;
326
        protection_map[10] = PAGE_SHARED;
327
        protection_map[11] = PAGE_SHARED;
328
        protection_map[12] = PAGE_READONLY;
329
        protection_map[13] = PAGE_READONLY;
330
        protection_map[14] = PAGE_SHARED;
331
        protection_map[15] = PAGE_SHARED;
332
#endif
333
 
334
        /*
335
         * Map the physical memory available into the kernel virtual
336
         * address space.  It may allocate some memory for page
337
         * tables and thus modify availmem.
338
         */
339
 
340
        for (chunk = 0; chunk < boot_info.num_memory; chunk++) {
341
                mem_avail = map_chunk (boot_info.memory[chunk].addr,
342
                                       boot_info.memory[chunk].size,
343
                                       &availmem);
344
 
345
        }
346
        flush_tlb_all();
347
#ifdef DEBUG
348
        printk ("memory available is %ldKB\n", mem_avail >> 10);
349
#endif
350
 
351
        /*
352
         * virtual address after end of kernel
353
         * "availmem" is setup by the code in head.S.
354
         */
355
        start_mem = availmem;
356
 
357
#ifdef DEBUG
358
        printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
359
                start_mem, end_mem);
360
#endif
361
 
362
        /*
363
         * initialize the bad page table and bad page to point
364
         * to a couple of allocated pages
365
         */
366
        empty_bad_page_table = start_mem;
367
        start_mem += PAGE_SIZE;
368
        empty_bad_page = start_mem;
369
        start_mem += PAGE_SIZE;
370
        empty_zero_page = start_mem;
371
        start_mem += PAGE_SIZE;
372
        memset((void *)empty_zero_page, 0, PAGE_SIZE);
373
 
374
#if 0
375
        /*
376
         * allocate the "swapper" page directory and
377
         * record in task 0 (swapper) tss
378
         */
379
        swapper_pg_dir = (pgd_t *)get_kpointer_table();
380
 
381
        init_mm.pgd = swapper_pg_dir;
382
#endif
383
 
384
        memset (swapper_pg_dir, 0, sizeof(pgd_t)*PTRS_PER_PGD);
385
        task[0]->tss.pagedir_v = (unsigned long *)swapper_pg_dir;
386
        task[0]->tss.pagedir_p = VTOP (swapper_pg_dir);
387
 
388
#ifdef DEBUG
389
        printk ("task 0 pagedir at %p virt, %#lx phys\n",
390
                task[0]->tss.pagedir_v, task[0]->tss.pagedir_p);
391
#endif
392
 
393
        /* setup CPU root pointer for swapper task */
394
        task[0]->tss.crp[0] = 0x80000000 | _PAGE_SHORT;
395
        task[0]->tss.crp[1] = task[0]->tss.pagedir_p;
396
 
397
        if (m68k_is040or060)
398
                asm ("movel %0,%/d0\n\t"
399
                     ".long 0x4e7b0806" /* movec d0,urp */
400
                     : /* no outputs */
401
                     : "g" (task[0]->tss.crp[1])
402
                     : "d0");
403
        else
404
                asm ("pmove %0@,%/crp"
405
                     : /* no outputs */
406
                     : "a" (task[0]->tss.crp));
407
 
408
#ifdef DEBUG
409
        printk ("set crp\n");
410
#endif
411
 
412
        /*
413
         * Set up SFC/DFC registers (user data space)
414
         */
415
        set_fs (USER_DS);
416
 
417
#ifdef DEBUG
418
        printk ("before free_area_init\n");
419
#endif
420
 
421
#ifdef CONFIG_BLK_DEV_RAM
422
#ifndef CONFIG_BLK_DEV_INITRD
423
        /*
424
         * Since the initialization of the ramdisk's has been changed
425
         * so it fits the new driver initialization scheme, we have to
426
         * make room for our preloaded image here, instead of doing it
427
         * in rd_init() as we cannot kmalloc() a block large enough
428
         * for the image.
429
         */
430
 
431
        ramdisk_length = boot_info.ramdisk_size * 1024;
432
 
433
        if ((ramdisk_length > 0) && (ROOT_DEV == 0)) {
434
          char *rdp;         /* current location of ramdisk */
435
 
436
          rd_start = (char *) start_mem;
437
 
438
          /* get current address of ramdisk */
439
          rdp = (char *)mm_phys_to_virt (boot_info.ramdisk_addr);
440
 
441
          /* copy the ram disk image */
442
          memcpy (rd_start, rdp, ramdisk_length);
443
          start_mem += ramdisk_length;
444
          rd_doload = 1;     /* tell rd_load to load this thing */
445
        }
446
#endif
447
#endif
448
 
449
        return free_area_init (start_mem, end_mem);
450
}
451
 
452
void mem_init(unsigned long start_mem, unsigned long end_mem)
453
{
454
        int codepages = 0;
455
        int datapages = 0;
456
        unsigned long tmp;
457
        extern int _etext;
458
 
459
        end_mem &= PAGE_MASK;
460
        high_memory = end_mem;
461
 
462
        start_mem = PAGE_ALIGN(start_mem);
463
        while (start_mem < high_memory) {
464
                clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags);
465
                start_mem += PAGE_SIZE;
466
        }
467
 
468
#ifdef CONFIG_ATARI
469
 
470
        if (MACH_IS_ATARI) {
471
 
472
                /* If the page with physical address 0 isn't the first kernel
473
                 * code page, it has to be reserved because the first 2 KB of
474
                 * ST-Ram can only be accessed from supervisor mode by
475
                 * hardware.
476
                 */
477
 
478
                unsigned long virt0 = PTOV( 0 ), adr;
479
                extern unsigned long rsvd_stram_beg, rsvd_stram_end;
480
 
481
                if (virt0 != 0) {
482
 
483
                        set_bit(PG_reserved, &mem_map[MAP_NR(virt0)].flags);
484
 
485
                        /* Also, reserve all pages that have been marked by
486
                         * stram_alloc() (e.g. for the screen memory). (This may
487
                         * treat the first ST-Ram page a second time, but that
488
                         * doesn't hurt...) */
489
 
490
                        rsvd_stram_end += PAGE_SIZE - 1;
491
                        rsvd_stram_end &= PAGE_MASK;
492
                        rsvd_stram_beg &= PAGE_MASK;
493
                        for( adr = rsvd_stram_beg; adr < rsvd_stram_end; adr += PAGE_SIZE )
494
                                set_bit(PG_reserved, &mem_map[MAP_NR(adr)].flags);
495
                }
496
        }
497
 
498
#endif
499
#ifdef DEBUG
500
        printk ("task[0] root table is %p\n", task[0]->tss.pagedir_v);
501
#endif
502
 
503
        for (tmp = 0 ; tmp < end_mem ; tmp += PAGE_SIZE) {
504
                if (VTOP (tmp) >= mach_max_dma_address)
505
                        clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
506
                if (PageReserved(mem_map+MAP_NR(tmp))) {
507
                        if (tmp < (unsigned long)&_etext)
508
                                codepages++;
509
                        else
510
                                datapages++;
511
                        continue;
512
                }
513
                mem_map[MAP_NR(tmp)].count = 1;
514
#ifdef CONFIG_BLK_DEV_INITRD
515
                if (!initrd_start || (tmp < initrd_start || tmp >= initrd_end))
516
#endif
517
                        free_page(tmp);
518
        }
519
        tmp = nr_free_pages << PAGE_SHIFT;
520
        printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n",
521
               tmp >> 10,
522
               high_memory >> 10,
523
               codepages << (PAGE_SHIFT-10),
524
               datapages << (PAGE_SHIFT-10));
525
}
526
 
527
void si_meminfo(struct sysinfo *val)
528
{
529
    unsigned long i;
530
 
531
    i = high_memory >> PAGE_SHIFT;
532
    val->totalram = 0;
533
    val->sharedram = 0;
534
    val->freeram = nr_free_pages << PAGE_SHIFT;
535
    val->bufferram = buffermem;
536
    while (i-- > 0) {
537
        if (PageReserved(mem_map+i))
538
            continue;
539
        val->totalram++;
540
        if (!mem_map[i].count)
541
            continue;
542
        val->sharedram += mem_map[i].count-1;
543
    }
544
    val->totalram <<= PAGE_SHIFT;
545
    val->sharedram <<= PAGE_SHIFT;
546
    return;
547
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.