OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [arm/] [mm/] [init.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/arch/arm/mm/init.c
3
 *
4
 *  Copyright (C) 1995-2002 Russell King
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License version 2 as
8
 * published by the Free Software Foundation.
9
 */
10
#include <linux/config.h>
11
#include <linux/signal.h>
12
#include <linux/sched.h>
13
#include <linux/kernel.h>
14
#include <linux/errno.h>
15
#include <linux/string.h>
16
#include <linux/types.h>
17
#include <linux/ptrace.h>
18
#include <linux/mman.h>
19
#include <linux/mm.h>
20
#include <linux/swap.h>
21
#include <linux/swapctl.h>
22
#include <linux/smp.h>
23
#include <linux/init.h>
24
#include <linux/bootmem.h>
25
#include <linux/blk.h>
26
 
27
#include <asm/segment.h>
28
#include <asm/mach-types.h>
29
#include <asm/pgalloc.h>
30
#include <asm/dma.h>
31
#include <asm/hardware.h>
32
#include <asm/setup.h>
33
 
34
#include <asm/mach/arch.h>
35
#include <asm/mach/map.h>
36
 
37
#ifndef CONFIG_DISCONTIGMEM
38
#define NR_NODES        1
39
#else
40
#define NR_NODES        4
41
#endif
42
 
43
#ifdef CONFIG_CPU_32
44
#define TABLE_OFFSET    (PTRS_PER_PTE)
45
#else
46
#define TABLE_OFFSET    0
47
#endif
48
 
49
#define TABLE_SIZE      ((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(pte_t))
50
 
51
static unsigned long totalram_pages;
52
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
53
extern char _stext, _text, _etext, _end, __init_begin, __init_end;
54
extern unsigned long phys_initrd_start;
55
extern unsigned long phys_initrd_size;
56
 
57
/*
58
 * The sole use of this is to pass memory configuration
59
 * data from paging_init to mem_init.
60
 */
61
static struct meminfo meminfo __initdata = { 0, };
62
 
63
/*
64
 * empty_zero_page is a special page that is used for
65
 * zero-initialized data and COW.
66
 */
67
struct page *empty_zero_page;
68
 
69
#ifndef CONFIG_NO_PGT_CACHE
70
struct pgtable_cache_struct quicklists;
71
 
72
int do_check_pgt_cache(int low, int high)
73
{
74
        int freed = 0;
75
 
76
        if(pgtable_cache_size > high) {
77
                do {
78
                        if(pgd_quicklist) {
79
                                free_pgd_slow(get_pgd_fast());
80
                                freed++;
81
                        }
82
                        if(pmd_quicklist) {
83
                                pmd_free_slow(pmd_alloc_one_fast(NULL, 0));
84
                                freed++;
85
                        }
86
                        if(pte_quicklist) {
87
                                pte_free_slow(pte_alloc_one_fast(NULL, 0));
88
                                freed++;
89
                        }
90
                } while(pgtable_cache_size > low);
91
        }
92
        return freed;
93
}
94
#else
95
int do_check_pgt_cache(int low, int high)
96
{
97
        return 0;
98
}
99
#endif
100
 
101
/* This is currently broken
102
 * PG_skip is used on sparc/sparc64 architectures to "skip" certain
103
 * parts of the address space.
104
 *
105
 * #define PG_skip      10
106
 * #define PageSkip(page) (machine_is_riscpc() && test_bit(PG_skip, &(page)->flags))
107
 *                      if (PageSkip(page)) {
108
 *                              page = page->next_hash;
109
 *                              if (page == NULL)
110
 *                                      break;
111
 *                      }
112
 */
113
void show_mem(void)
114
{
115
        int free = 0, total = 0, reserved = 0;
116
        int shared = 0, cached = 0, slab = 0, node;
117
 
118
        printk("Mem-info:\n");
119
        show_free_areas();
120
        printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
121
 
122
        for (node = 0; node < numnodes; node++) {
123
                struct page *page, *end;
124
 
125
                page = NODE_MEM_MAP(node);
126
                end  = page + NODE_DATA(node)->node_size;
127
 
128
                do {
129
                        total++;
130
                        if (PageReserved(page))
131
                                reserved++;
132
                        else if (PageSwapCache(page))
133
                                cached++;
134
                        else if (PageSlab(page))
135
                                slab++;
136
                        else if (!page_count(page))
137
                                free++;
138
                        else
139
                                shared += atomic_read(&page->count) - 1;
140
                        page++;
141
                } while (page < end);
142
        }
143
 
144
        printk("%d pages of RAM\n", total);
145
        printk("%d free pages\n", free);
146
        printk("%d reserved pages\n", reserved);
147
        printk("%d slab pages\n", slab);
148
        printk("%d pages shared\n", shared);
149
        printk("%d pages swap cached\n", cached);
150
#ifndef CONFIG_NO_PGT_CACHE
151
        printk("%ld page tables cached\n", pgtable_cache_size);
152
#endif
153
        show_buffers();
154
}
155
 
156
struct node_info {
157
        unsigned int start;
158
        unsigned int end;
159
        int bootmap_pages;
160
};
161
 
162
#define O_PFN_DOWN(x)   ((x) >> PAGE_SHIFT)
163
#define V_PFN_DOWN(x)   O_PFN_DOWN(__pa(x))
164
 
165
#define O_PFN_UP(x)     (PAGE_ALIGN(x) >> PAGE_SHIFT)
166
#define V_PFN_UP(x)     O_PFN_UP(__pa(x))
167
 
168
#define PFN_SIZE(x)     ((x) >> PAGE_SHIFT)
169
#define PFN_RANGE(s,e)  PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \
170
                                (((unsigned long)(s)) & PAGE_MASK))
171
 
172
/*
173
 * FIXME: We really want to avoid allocating the bootmap bitmap
174
 * over the top of the initrd.  Hopefully, this is located towards
175
 * the start of a bank, so if we allocate the bootmap bitmap at
176
 * the end, we won't clash.
177
 */
178
static unsigned int __init
179
find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
180
{
181
        unsigned int start_pfn, bank, bootmap_pfn;
182
 
183
        start_pfn   = V_PFN_UP(&_end);
184
        bootmap_pfn = 0;
185
 
186
        for (bank = 0; bank < mi->nr_banks; bank ++) {
187
                unsigned int start, end;
188
 
189
                if (mi->bank[bank].node != node)
190
                        continue;
191
 
192
                start = O_PFN_UP(mi->bank[bank].start);
193
                end   = O_PFN_DOWN(mi->bank[bank].size +
194
                                   mi->bank[bank].start);
195
 
196
                if (end < start_pfn)
197
                        continue;
198
 
199
                if (start < start_pfn)
200
                        start = start_pfn;
201
 
202
                if (end <= start)
203
                        continue;
204
 
205
                if (end - start >= bootmap_pages) {
206
                        bootmap_pfn = start;
207
                        break;
208
                }
209
        }
210
 
211
        if (bootmap_pfn == 0)
212
                BUG();
213
 
214
        return bootmap_pfn;
215
}
216
 
217
/*
218
 * Scan the memory info structure and pull out:
219
 *  - the end of memory
220
 *  - the number of nodes
221
 *  - the pfn range of each node
222
 *  - the number of bootmem bitmap pages
223
 */
224
static unsigned int __init
225
find_memend_and_nodes(struct meminfo *mi, struct node_info *np)
226
{
227
        unsigned int i, bootmem_pages = 0, memend_pfn = 0;
228
 
229
        for (i = 0; i < NR_NODES; i++) {
230
                np[i].start = -1U;
231
                np[i].end = 0;
232
                np[i].bootmap_pages = 0;
233
        }
234
 
235
        for (i = 0; i < mi->nr_banks; i++) {
236
                unsigned long start, end;
237
                int node;
238
 
239
                if (mi->bank[i].size == 0) {
240
                        /*
241
                         * Mark this bank with an invalid node number
242
                         */
243
                        mi->bank[i].node = -1;
244
                        continue;
245
                }
246
 
247
                node = mi->bank[i].node;
248
 
249
                if (node >= numnodes) {
250
                        numnodes = node + 1;
251
 
252
                        /*
253
                         * Make sure we haven't exceeded the maximum number
254
                         * of nodes that we have in this configuration.  If
255
                         * we have, we're in trouble.  (maybe we ought to
256
                         * limit, instead of bugging?)
257
                         */
258
                        if (numnodes > NR_NODES)
259
                                BUG();
260
                }
261
 
262
                /*
263
                 * Get the start and end pfns for this bank
264
                 */
265
                start = O_PFN_UP(mi->bank[i].start);
266
                end   = O_PFN_DOWN(mi->bank[i].start + mi->bank[i].size);
267
 
268
                if (np[node].start > start)
269
                        np[node].start = start;
270
 
271
                if (np[node].end < end)
272
                        np[node].end = end;
273
 
274
                if (memend_pfn < end)
275
                        memend_pfn = end;
276
        }
277
 
278
        /*
279
         * Calculate the number of pages we require to
280
         * store the bootmem bitmaps.
281
         */
282
        for (i = 0; i < numnodes; i++) {
283
                if (np[i].end == 0)
284
                        continue;
285
 
286
                np[i].bootmap_pages = bootmem_bootmap_pages(np[i].end -
287
                                                            np[i].start);
288
                bootmem_pages += np[i].bootmap_pages;
289
        }
290
 
291
        /*
292
         * This doesn't seem to be used by the Linux memory
293
         * manager any more.  If we can get rid of it, we
294
         * also get rid of some of the stuff above as well.
295
         */
296
        max_low_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET);
297
//      max_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET);
298
        mi->end = memend_pfn << PAGE_SHIFT;
299
 
300
        return bootmem_pages;
301
}
302
 
303
static int __init check_initrd(struct meminfo *mi)
304
{
305
        int initrd_node = -2;
306
 
307
#ifdef CONFIG_BLK_DEV_INITRD
308
        unsigned long end = phys_initrd_start + phys_initrd_size;
309
 
310
        /*
311
         * Make sure that the initrd is within a valid area of
312
         * memory.
313
         */
314
        if (phys_initrd_size) {
315
                unsigned int i;
316
 
317
                initrd_node = -1;
318
 
319
                for (i = 0; i < mi->nr_banks; i++) {
320
                        unsigned long bank_end;
321
 
322
                        bank_end = mi->bank[i].start + mi->bank[i].size;
323
 
324
                        if (mi->bank[i].start <= phys_initrd_start &&
325
                            end <= bank_end)
326
                                initrd_node = mi->bank[i].node;
327
                }
328
        }
329
 
330
        if (initrd_node == -1) {
331
                printk(KERN_ERR "initrd (0x%08lx - 0x%08lx) extends beyond "
332
                       "physical memory - disabling initrd\n",
333
                       phys_initrd_start, end);
334
                phys_initrd_start = phys_initrd_size = 0;
335
        }
336
#endif
337
 
338
        return initrd_node;
339
}
340
 
341
/*
342
 * Reserve the various regions of node 0
343
 */
344
static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int bootmap_pages)
345
{
346
        pg_data_t *pgdat = NODE_DATA(0);
347
 
348
        /*
349
         * Register the kernel text and data with bootmem.
350
         * Note that this can only be in node 0.
351
         */
352
        reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
353
 
354
#ifdef CONFIG_CPU_32
355
        /*
356
         * Reserve the page tables.  These are already in use,
357
         * and can only be in node 0.
358
         */
359
        reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
360
                             PTRS_PER_PGD * sizeof(pgd_t));
361
#endif
362
        /*
363
         * And don't forget to reserve the allocator bitmap,
364
         * which will be freed later.
365
         */
366
        reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT,
367
                             bootmap_pages << PAGE_SHIFT);
368
 
369
        /*
370
         * Hmm... This should go elsewhere, but we really really
371
         * need to stop things allocating the low memory; we need
372
         * a better implementation of GFP_DMA which does not assume
373
         * that DMA-able memory starts at zero.
374
         */
375
        if (machine_is_integrator())
376
                reserve_bootmem_node(pgdat, 0, __pa(swapper_pg_dir));
377
        /*
378
         * These should likewise go elsewhere.  They pre-reserve
379
         * the screen memory region at the start of main system
380
         * memory.
381
         */
382
        if (machine_is_archimedes() || machine_is_a5k())
383
                reserve_bootmem_node(pgdat, 0x02000000, 0x00080000);
384
        if (machine_is_edb7211() || machine_is_fortunet())
385
                reserve_bootmem_node(pgdat, 0xc0000000, 0x00020000);
386
        if (machine_is_p720t())
387
                reserve_bootmem_node(pgdat, PHYS_OFFSET, 0x00014000);
388
#ifdef CONFIG_SA1111
389
        /*
390
         * Because of the SA1111 DMA bug, we want to preserve
391
         * our precious DMA-able memory...
392
         */
393
        reserve_bootmem_node(pgdat, PHYS_OFFSET, __pa(swapper_pg_dir)-PHYS_OFFSET);
394
#endif
395
}
396
 
397
/*
398
 * Register all available RAM in this node with the bootmem allocator.
399
 */
400
static inline void free_bootmem_node_bank(int node, struct meminfo *mi)
401
{
402
        pg_data_t *pgdat = NODE_DATA(node);
403
        int bank;
404
 
405
        for (bank = 0; bank < mi->nr_banks; bank++)
406
                if (mi->bank[bank].node == node)
407
                        free_bootmem_node(pgdat, mi->bank[bank].start,
408
                                          mi->bank[bank].size);
409
}
410
 
411
/*
412
 * Initialise the bootmem allocator for all nodes.  This is called
413
 * early during the architecture specific initialisation.
414
 */
415
void __init bootmem_init(struct meminfo *mi)
416
{
417
        struct node_info node_info[NR_NODES], *np = node_info;
418
        unsigned int bootmap_pages, bootmap_pfn, map_pg;
419
        int node, initrd_node;
420
 
421
        bootmap_pages = find_memend_and_nodes(mi, np);
422
        bootmap_pfn   = find_bootmap_pfn(0, mi, bootmap_pages);
423
        initrd_node   = check_initrd(mi);
424
 
425
        map_pg = bootmap_pfn;
426
 
427
        /*
428
         * Initialise the bootmem nodes.
429
         *
430
         * What we really want to do is:
431
         *
432
         *   unmap_all_regions_except_kernel();
433
         *   for_each_node_in_reverse_order(node) {
434
         *     map_node(node);
435
         *     allocate_bootmem_map(node);
436
         *     init_bootmem_node(node);
437
         *     free_bootmem_node(node);
438
         *   }
439
         *
440
         * but this is a 2.5-type change.  For now, we just set
441
         * the nodes up in reverse order.
442
         *
443
         * (we could also do with rolling bootmem_init and paging_init
444
         * into one generic "memory_init" type function).
445
         */
446
        np += numnodes - 1;
447
        for (node = numnodes - 1; node >= 0; node--, np--) {
448
                /*
449
                 * If there are no pages in this node, ignore it.
450
                 * Note that node 0 must always have some pages.
451
                 */
452
                if (np->end == 0) {
453
                        if (node == 0)
454
                                BUG();
455
                        continue;
456
                }
457
 
458
                /*
459
                 * Initialise the bootmem allocator.
460
                 */
461
                init_bootmem_node(NODE_DATA(node), map_pg, np->start, np->end);
462
                free_bootmem_node_bank(node, mi);
463
                map_pg += np->bootmap_pages;
464
 
465
                /*
466
                 * If this is node 0, we need to reserve some areas ASAP -
467
                 * we may use bootmem on node 0 to setup the other nodes.
468
                 */
469
                if (node == 0)
470
                        reserve_node_zero(bootmap_pfn, bootmap_pages);
471
        }
472
 
473
 
474
#ifdef CONFIG_BLK_DEV_INITRD
475
        if (phys_initrd_size && initrd_node >= 0) {
476
                reserve_bootmem_node(NODE_DATA(initrd_node), phys_initrd_start,
477
                                     phys_initrd_size);
478
                initrd_start = __phys_to_virt(phys_initrd_start);
479
                initrd_end = initrd_start + phys_initrd_size;
480
        }
481
#endif
482
 
483
        if (map_pg != bootmap_pfn + bootmap_pages)
484
                BUG();
485
 
486
}
487
 
488
/*
489
 * paging_init() sets up the page tables, initialises the zone memory
490
 * maps, and sets up the zero page, bad page and bad page tables.
491
 */
492
void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
493
{
494
        void *zero_page;
495
        int node;
496
 
497
        memcpy(&meminfo, mi, sizeof(meminfo));
498
 
499
        /*
500
         * allocate the zero page.  Note that we count on this going ok.
501
         */
502
        zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
503
 
504
        /*
505
         * initialise the page tables.
506
         */
507
        memtable_init(mi);
508
        if (mdesc->map_io)
509
                mdesc->map_io();
510
        flush_cache_all();
511
        flush_tlb_all();
512
 
513
        /*
514
         * initialise the zones within each node
515
         */
516
        for (node = 0; node < numnodes; node++) {
517
                unsigned long zone_size[MAX_NR_ZONES];
518
                unsigned long zhole_size[MAX_NR_ZONES];
519
                struct bootmem_data *bdata;
520
                pg_data_t *pgdat;
521
                int i;
522
 
523
                /*
524
                 * Initialise the zone size information.
525
                 */
526
                for (i = 0; i < MAX_NR_ZONES; i++) {
527
                        zone_size[i]  = 0;
528
                        zhole_size[i] = 0;
529
                }
530
 
531
                pgdat = NODE_DATA(node);
532
                bdata = pgdat->bdata;
533
 
534
                /*
535
                 * The size of this node has already been determined.
536
                 * If we need to do anything fancy with the allocation
537
                 * of this memory to the zones, now is the time to do
538
                 * it.
539
                 */
540
                zone_size[0] = bdata->node_low_pfn -
541
                                (bdata->node_boot_start >> PAGE_SHIFT);
542
 
543
                /*
544
                 * If this zone has zero size, skip it.
545
                 */
546
                if (!zone_size[0])
547
                        continue;
548
 
549
                /*
550
                 * For each bank in this node, calculate the size of the
551
                 * holes.  holes = node_size - sum(bank_sizes_in_node)
552
                 */
553
                zhole_size[0] = zone_size[0];
554
                for (i = 0; i < mi->nr_banks; i++) {
555
                        if (mi->bank[i].node != node)
556
                                continue;
557
 
558
                        zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT;
559
                }
560
 
561
                /*
562
                 * Adjust the sizes according to any special
563
                 * requirements for this machine type.
564
                 */
565
                arch_adjust_zones(node, zone_size, zhole_size);
566
 
567
                free_area_init_node(node, pgdat, 0, zone_size,
568
                                bdata->node_boot_start, zhole_size);
569
        }
570
 
571
        /*
572
         * finish off the bad pages once
573
         * the mem_map is initialised
574
         */
575
        memzero(zero_page, PAGE_SIZE);
576
        empty_zero_page = virt_to_page(zero_page);
577
        flush_dcache_page(empty_zero_page);
578
}
579
 
580
static inline void free_area(unsigned long addr, unsigned long end, char *s)
581
{
582
        unsigned int size = (end - addr) >> 10;
583
 
584
        for (; addr < end; addr += PAGE_SIZE) {
585
                struct page *page = virt_to_page(addr);
586
                ClearPageReserved(page);
587
                set_page_count(page, 1);
588
                free_page(addr);
589
                totalram_pages++;
590
        }
591
 
592
        if (size && s)
593
                printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
594
}
595
 
596
/*
597
 * mem_init() marks the free areas in the mem_map and tells us how much
598
 * memory is free.  This is done after various parts of the system have
599
 * claimed their memory after the kernel image.
600
 */
601
void __init mem_init(void)
602
{
603
        unsigned int codepages, datapages, initpages;
604
        int i, node;
605
 
606
        codepages = &_etext - &_text;
607
        datapages = &_end - &_etext;
608
        initpages = &__init_end - &__init_begin;
609
 
610
        high_memory = (void *)__va(meminfo.end);
611
        max_mapnr   = virt_to_page(high_memory) - mem_map;
612
 
613
        /*
614
         * We may have non-contiguous memory.
615
         */
616
        if (meminfo.nr_banks != 1)
617
                create_memmap_holes(&meminfo);
618
 
619
        /* this will put all unused low memory onto the freelists */
620
        for (node = 0; node < numnodes; node++) {
621
                pg_data_t *pgdat = NODE_DATA(node);
622
 
623
                if (pgdat->node_size != 0)
624
                        totalram_pages += free_all_bootmem_node(pgdat);
625
        }
626
 
627
#ifdef CONFIG_SA1111
628
        /* now that our DMA memory is actually so designated, we can free it */
629
        free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL);
630
#endif
631
 
632
        /*
633
         * Since our memory may not be contiguous, calculate the
634
         * real number of pages we have in this system
635
         */
636
        printk(KERN_INFO "Memory:");
637
 
638
        num_physpages = 0;
639
        for (i = 0; i < meminfo.nr_banks; i++) {
640
                num_physpages += meminfo.bank[i].size >> PAGE_SHIFT;
641
                printk(" %ldMB", meminfo.bank[i].size >> 20);
642
        }
643
 
644
        printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
645
        printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
646
                "%dK data, %dK init)\n",
647
                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
648
                codepages >> 10, datapages >> 10, initpages >> 10);
649
 
650
        if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
651
                extern int sysctl_overcommit_memory;
652
                /*
653
                 * On a machine this small we won't get
654
                 * anywhere without overcommit, so turn
655
                 * it on by default.
656
                 */
657
                sysctl_overcommit_memory = 1;
658
        }
659
}
660
 
661
void free_initmem(void)
662
{
663
        if (!machine_is_integrator()) {
664
                free_area((unsigned long)(&__init_begin),
665
                          (unsigned long)(&__init_end),
666
                          "init");
667
        }
668
}
669
 
670
#ifdef CONFIG_BLK_DEV_INITRD
671
 
672
static int keep_initrd;
673
 
674
void free_initrd_mem(unsigned long start, unsigned long end)
675
{
676
        if (!keep_initrd)
677
                free_area(start, end, "initrd");
678
}
679
 
680
static int __init keepinitrd_setup(char *__unused)
681
{
682
        keep_initrd = 1;
683
        return 1;
684
}
685
 
686
__setup("keepinitrd", keepinitrd_setup);
687
#endif
688
 
689
void si_meminfo(struct sysinfo *val)
690
{
691
        val->totalram  = totalram_pages;
692
        val->sharedram = 0;
693
        val->freeram   = nr_free_pages();
694
        val->bufferram = atomic_read(&buffermem_pages);
695
        val->totalhigh = 0;
696
        val->freehigh  = 0;
697
        val->mem_unit  = PAGE_SIZE;
698
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.