OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [arm/] [mm/] [init.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *  linux/arch/arm/mm/init.c
3
 *
4
 *  Copyright (C) 1995-2005 Russell King
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License version 2 as
8
 * published by the Free Software Foundation.
9
 */
10
#include <linux/kernel.h>
11
#include <linux/errno.h>
12
#include <linux/swap.h>
13
#include <linux/init.h>
14
#include <linux/bootmem.h>
15
#include <linux/mman.h>
16
#include <linux/nodemask.h>
17
#include <linux/initrd.h>
18
 
19
#include <asm/mach-types.h>
20
#include <asm/setup.h>
21
#include <asm/sizes.h>
22
#include <asm/tlb.h>
23
 
24
#include <asm/mach/arch.h>
25
#include <asm/mach/map.h>
26
 
27
#include "mm.h"
28
 
29
extern void _text, _etext, __data_start, _end, __init_begin, __init_end;
30
extern unsigned long phys_initrd_start;
31
extern unsigned long phys_initrd_size;
32
 
33
/*
34
 * This is used to pass memory configuration data from paging_init
35
 * to mem_init, and by show_mem() to skip holes in the memory map.
36
 */
37
static struct meminfo meminfo = { 0, };
38
 
39
#define for_each_nodebank(iter,mi,no)                   \
40
        for (iter = 0; iter < mi->nr_banks; iter++)      \
41
                if (mi->bank[iter].node == no)
42
 
43
void show_mem(void)
44
{
45
        int free = 0, total = 0, reserved = 0;
46
        int shared = 0, cached = 0, slab = 0, node, i;
47
        struct meminfo * mi = &meminfo;
48
 
49
        printk("Mem-info:\n");
50
        show_free_areas();
51
        printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
52
 
53
        for_each_online_node(node) {
54
                pg_data_t *n = NODE_DATA(node);
55
                struct page *map = n->node_mem_map - n->node_start_pfn;
56
 
57
                for_each_nodebank (i,mi,node) {
58
                        unsigned int pfn1, pfn2;
59
                        struct page *page, *end;
60
 
61
                        pfn1 = __phys_to_pfn(mi->bank[i].start);
62
                        pfn2 = __phys_to_pfn(mi->bank[i].size + mi->bank[i].start);
63
 
64
                        page = map + pfn1;
65
                        end  = map + pfn2;
66
 
67
                        do {
68
                                total++;
69
                                if (PageReserved(page))
70
                                        reserved++;
71
                                else if (PageSwapCache(page))
72
                                        cached++;
73
                                else if (PageSlab(page))
74
                                        slab++;
75
                                else if (!page_count(page))
76
                                        free++;
77
                                else
78
                                        shared += page_count(page) - 1;
79
                                page++;
80
                        } while (page < end);
81
                }
82
        }
83
 
84
        printk("%d pages of RAM\n", total);
85
        printk("%d free pages\n", free);
86
        printk("%d reserved pages\n", reserved);
87
        printk("%d slab pages\n", slab);
88
        printk("%d pages shared\n", shared);
89
        printk("%d pages swap cached\n", cached);
90
}
91
 
92
/*
93
 * FIXME: We really want to avoid allocating the bootmap bitmap
94
 * over the top of the initrd.  Hopefully, this is located towards
95
 * the start of a bank, so if we allocate the bootmap bitmap at
96
 * the end, we won't clash.
97
 */
98
static unsigned int __init
99
find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
100
{
101
        unsigned int start_pfn, bank, bootmap_pfn;
102
 
103
        start_pfn   = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT;
104
        bootmap_pfn = 0;
105
 
106
        for_each_nodebank(bank, mi, node) {
107
                unsigned int start, end;
108
 
109
                start = mi->bank[bank].start >> PAGE_SHIFT;
110
                end   = (mi->bank[bank].size +
111
                         mi->bank[bank].start) >> PAGE_SHIFT;
112
 
113
                if (end < start_pfn)
114
                        continue;
115
 
116
                if (start < start_pfn)
117
                        start = start_pfn;
118
 
119
                if (end <= start)
120
                        continue;
121
 
122
                if (end - start >= bootmap_pages) {
123
                        bootmap_pfn = start;
124
                        break;
125
                }
126
        }
127
 
128
        if (bootmap_pfn == 0)
129
                BUG();
130
 
131
        return bootmap_pfn;
132
}
133
 
134
static int __init check_initrd(struct meminfo *mi)
135
{
136
        int initrd_node = -2;
137
#ifdef CONFIG_BLK_DEV_INITRD
138
        unsigned long end = phys_initrd_start + phys_initrd_size;
139
 
140
        /*
141
         * Make sure that the initrd is within a valid area of
142
         * memory.
143
         */
144
        if (phys_initrd_size) {
145
                unsigned int i;
146
 
147
                initrd_node = -1;
148
 
149
                for (i = 0; i < mi->nr_banks; i++) {
150
                        unsigned long bank_end;
151
 
152
                        bank_end = mi->bank[i].start + mi->bank[i].size;
153
 
154
                        if (mi->bank[i].start <= phys_initrd_start &&
155
                            end <= bank_end)
156
                                initrd_node = mi->bank[i].node;
157
                }
158
        }
159
 
160
        if (initrd_node == -1) {
161
                printk(KERN_ERR "initrd (0x%08lx - 0x%08lx) extends beyond "
162
                       "physical memory - disabling initrd\n",
163
                       phys_initrd_start, end);
164
                phys_initrd_start = phys_initrd_size = 0;
165
        }
166
#endif
167
 
168
        return initrd_node;
169
}
170
 
171
static inline void map_memory_bank(struct membank *bank)
172
{
173
#ifdef CONFIG_MMU
174
        struct map_desc map;
175
 
176
        map.pfn = __phys_to_pfn(bank->start);
177
        map.virtual = __phys_to_virt(bank->start);
178
        map.length = bank->size;
179
        map.type = MT_MEMORY;
180
 
181
        create_mapping(&map);
182
#endif
183
}
184
 
185
static unsigned long __init
186
bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
187
{
188
        unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
189
        unsigned long start_pfn, end_pfn, boot_pfn;
190
        unsigned int boot_pages;
191
        pg_data_t *pgdat;
192
        int i;
193
 
194
        start_pfn = -1UL;
195
        end_pfn = 0;
196
 
197
        /*
198
         * Calculate the pfn range, and map the memory banks for this node.
199
         */
200
        for_each_nodebank(i, mi, node) {
201
                struct membank *bank = &mi->bank[i];
202
                unsigned long start, end;
203
 
204
                start = bank->start >> PAGE_SHIFT;
205
                end = (bank->start + bank->size) >> PAGE_SHIFT;
206
 
207
                if (start_pfn > start)
208
                        start_pfn = start;
209
                if (end_pfn < end)
210
                        end_pfn = end;
211
 
212
                map_memory_bank(bank);
213
        }
214
 
215
        /*
216
         * If there is no memory in this node, ignore it.
217
         */
218
        if (end_pfn == 0)
219
                return end_pfn;
220
 
221
        /*
222
         * Allocate the bootmem bitmap page.
223
         */
224
        boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
225
        boot_pfn = find_bootmap_pfn(node, mi, boot_pages);
226
 
227
        /*
228
         * Initialise the bootmem allocator for this node, handing the
229
         * memory banks over to bootmem.
230
         */
231
        node_set_online(node);
232
        pgdat = NODE_DATA(node);
233
        init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
234
 
235
        for_each_nodebank(i, mi, node)
236
                free_bootmem_node(pgdat, mi->bank[i].start, mi->bank[i].size);
237
 
238
        /*
239
         * Reserve the bootmem bitmap for this node.
240
         */
241
        reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
242
                             boot_pages << PAGE_SHIFT);
243
 
244
#ifdef CONFIG_BLK_DEV_INITRD
245
        /*
246
         * If the initrd is in this node, reserve its memory.
247
         */
248
        if (node == initrd_node) {
249
                reserve_bootmem_node(pgdat, phys_initrd_start,
250
                                     phys_initrd_size);
251
                initrd_start = __phys_to_virt(phys_initrd_start);
252
                initrd_end = initrd_start + phys_initrd_size;
253
        }
254
#endif
255
 
256
        /*
257
         * Finally, reserve any node zero regions.
258
         */
259
        if (node == 0)
260
                reserve_node_zero(pgdat);
261
 
262
        /*
263
         * initialise the zones within this node.
264
         */
265
        memset(zone_size, 0, sizeof(zone_size));
266
        memset(zhole_size, 0, sizeof(zhole_size));
267
 
268
        /*
269
         * The size of this node has already been determined.  If we need
270
         * to do anything fancy with the allocation of this memory to the
271
         * zones, now is the time to do it.
272
         */
273
        zone_size[0] = end_pfn - start_pfn;
274
 
275
        /*
276
         * For each bank in this node, calculate the size of the holes.
277
         *  holes = node_size - sum(bank_sizes_in_node)
278
         */
279
        zhole_size[0] = zone_size[0];
280
        for_each_nodebank(i, mi, node)
281
                zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT;
282
 
283
        /*
284
         * Adjust the sizes according to any special requirements for
285
         * this machine type.
286
         */
287
        arch_adjust_zones(node, zone_size, zhole_size);
288
 
289
        free_area_init_node(node, pgdat, zone_size, start_pfn, zhole_size);
290
 
291
        return end_pfn;
292
}
293
 
294
void __init bootmem_init(struct meminfo *mi)
295
{
296
        unsigned long memend_pfn = 0;
297
        int node, initrd_node, i;
298
 
299
        /*
300
         * Invalidate the node number for empty or invalid memory banks
301
         */
302
        for (i = 0; i < mi->nr_banks; i++)
303
                if (mi->bank[i].size == 0 || mi->bank[i].node >= MAX_NUMNODES)
304
                        mi->bank[i].node = -1;
305
 
306
        memcpy(&meminfo, mi, sizeof(meminfo));
307
 
308
        /*
309
         * Locate which node contains the ramdisk image, if any.
310
         */
311
        initrd_node = check_initrd(mi);
312
 
313
        /*
314
         * Run through each node initialising the bootmem allocator.
315
         */
316
        for_each_node(node) {
317
                unsigned long end_pfn;
318
 
319
                end_pfn = bootmem_init_node(node, initrd_node, mi);
320
 
321
                /*
322
                 * Remember the highest memory PFN.
323
                 */
324
                if (end_pfn > memend_pfn)
325
                        memend_pfn = end_pfn;
326
        }
327
 
328
        high_memory = __va(memend_pfn << PAGE_SHIFT);
329
 
330
        /*
331
         * This doesn't seem to be used by the Linux memory manager any
332
         * more, but is used by ll_rw_block.  If we can get rid of it, we
333
         * also get rid of some of the stuff above as well.
334
         *
335
         * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
336
         * the system, not the maximum PFN.
337
         */
338
        max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
339
}
340
 
341
static inline void free_area(unsigned long addr, unsigned long end, char *s)
342
{
343
        unsigned int size = (end - addr) >> 10;
344
 
345
        for (; addr < end; addr += PAGE_SIZE) {
346
                struct page *page = virt_to_page(addr);
347
                ClearPageReserved(page);
348
                init_page_count(page);
349
                free_page(addr);
350
                totalram_pages++;
351
        }
352
 
353
        if (size && s)
354
                printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
355
}
356
 
357
static inline void
358
free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
359
{
360
        struct page *start_pg, *end_pg;
361
        unsigned long pg, pgend;
362
 
363
        /*
364
         * Convert start_pfn/end_pfn to a struct page pointer.
365
         */
366
        start_pg = pfn_to_page(start_pfn);
367
        end_pg = pfn_to_page(end_pfn);
368
 
369
        /*
370
         * Convert to physical addresses, and
371
         * round start upwards and end downwards.
372
         */
373
        pg = PAGE_ALIGN(__pa(start_pg));
374
        pgend = __pa(end_pg) & PAGE_MASK;
375
 
376
        /*
377
         * If there are free pages between these,
378
         * free the section of the memmap array.
379
         */
380
        if (pg < pgend)
381
                free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
382
}
383
 
384
/*
385
 * The mem_map array can get very big.  Free the unused area of the memory map.
386
 */
387
static void __init free_unused_memmap_node(int node, struct meminfo *mi)
388
{
389
        unsigned long bank_start, prev_bank_end = 0;
390
        unsigned int i;
391
 
392
        /*
393
         * [FIXME] This relies on each bank being in address order.  This
394
         * may not be the case, especially if the user has provided the
395
         * information on the command line.
396
         */
397
        for_each_nodebank(i, mi, node) {
398
                bank_start = mi->bank[i].start >> PAGE_SHIFT;
399
                if (bank_start < prev_bank_end) {
400
                        printk(KERN_ERR "MEM: unordered memory banks.  "
401
                                "Not freeing memmap.\n");
402
                        break;
403
                }
404
 
405
                /*
406
                 * If we had a previous bank, and there is a space
407
                 * between the current bank and the previous, free it.
408
                 */
409
                if (prev_bank_end && prev_bank_end != bank_start)
410
                        free_memmap(node, prev_bank_end, bank_start);
411
 
412
                prev_bank_end = (mi->bank[i].start +
413
                                 mi->bank[i].size) >> PAGE_SHIFT;
414
        }
415
}
416
 
417
/*
418
 * mem_init() marks the free areas in the mem_map and tells us how much
419
 * memory is free.  This is done after various parts of the system have
420
 * claimed their memory after the kernel image.
421
 */
422
void __init mem_init(void)
423
{
424
        unsigned int codepages, datapages, initpages;
425
        int i, node;
426
 
427
        codepages = &_etext - &_text;
428
        datapages = &_end - &__data_start;
429
        initpages = &__init_end - &__init_begin;
430
 
431
#ifndef CONFIG_DISCONTIGMEM
432
        max_mapnr   = virt_to_page(high_memory) - mem_map;
433
#endif
434
 
435
        /* this will put all unused low memory onto the freelists */
436
        for_each_online_node(node) {
437
                pg_data_t *pgdat = NODE_DATA(node);
438
 
439
                free_unused_memmap_node(node, &meminfo);
440
 
441
                if (pgdat->node_spanned_pages != 0)
442
                        totalram_pages += free_all_bootmem_node(pgdat);
443
        }
444
 
445
#ifdef CONFIG_SA1111
446
        /* now that our DMA memory is actually so designated, we can free it */
447
        free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL);
448
#endif
449
 
450
        /*
451
         * Since our memory may not be contiguous, calculate the
452
         * real number of pages we have in this system
453
         */
454
        printk(KERN_INFO "Memory:");
455
 
456
        num_physpages = 0;
457
        for (i = 0; i < meminfo.nr_banks; i++) {
458
                num_physpages += meminfo.bank[i].size >> PAGE_SHIFT;
459
                printk(" %ldMB", meminfo.bank[i].size >> 20);
460
        }
461
 
462
        printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
463
        printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
464
                "%dK data, %dK init)\n",
465
                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
466
                codepages >> 10, datapages >> 10, initpages >> 10);
467
 
468
        if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
469
                extern int sysctl_overcommit_memory;
470
                /*
471
                 * On a machine this small we won't get
472
                 * anywhere without overcommit, so turn
473
                 * it on by default.
474
                 */
475
                sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
476
        }
477
}
478
 
479
void free_initmem(void)
480
{
481
        if (!machine_is_integrator() && !machine_is_cintegrator()) {
482
                free_area((unsigned long)(&__init_begin),
483
                          (unsigned long)(&__init_end),
484
                          "init");
485
        }
486
}
487
 
488
#ifdef CONFIG_BLK_DEV_INITRD
489
 
490
static int keep_initrd;
491
 
492
void free_initrd_mem(unsigned long start, unsigned long end)
493
{
494
        if (!keep_initrd)
495
                free_area(start, end, "initrd");
496
}
497
 
498
static int __init keepinitrd_setup(char *__unused)
499
{
500
        keep_initrd = 1;
501
        return 1;
502
}
503
 
504
__setup("keepinitrd", keepinitrd_setup);
505
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.