OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [x86/] [kernel/] [setup_32.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *  Copyright (C) 1995  Linus Torvalds
3
 *
4
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5
 *
6
 *  Memory region support
7
 *      David Parsons <orc@pell.chi.il.us>, July-August 1999
8
 *
9
 *  Added E820 sanitization routine (removes overlapping memory regions);
10
 *  Brian Moyle <bmoyle@mvista.com>, February 2001
11
 *
12
 * Moved CPU detection code to cpu/${cpu}.c
13
 *    Patrick Mochel <mochel@osdl.org>, March 2002
14
 *
15
 *  Provisions for empty E820 memory regions (reported by certain BIOSes).
16
 *  Alex Achenbach <xela@slit.de>, December 2002.
17
 *
18
 */
19
 
20
/*
21
 * This file handles the architecture-dependent parts of initialization
22
 */
23
 
24
#include <linux/sched.h>
25
#include <linux/mm.h>
26
#include <linux/mmzone.h>
27
#include <linux/screen_info.h>
28
#include <linux/ioport.h>
29
#include <linux/acpi.h>
30
#include <linux/apm_bios.h>
31
#include <linux/initrd.h>
32
#include <linux/bootmem.h>
33
#include <linux/seq_file.h>
34
#include <linux/console.h>
35
#include <linux/mca.h>
36
#include <linux/root_dev.h>
37
#include <linux/highmem.h>
38
#include <linux/module.h>
39
#include <linux/efi.h>
40
#include <linux/init.h>
41
#include <linux/edd.h>
42
#include <linux/nodemask.h>
43
#include <linux/kexec.h>
44
#include <linux/crash_dump.h>
45
#include <linux/dmi.h>
46
#include <linux/pfn.h>
47
 
48
#include <video/edid.h>
49
 
50
#include <asm/apic.h>
51
#include <asm/e820.h>
52
#include <asm/mpspec.h>
53
#include <asm/mmzone.h>
54
#include <asm/setup.h>
55
#include <asm/arch_hooks.h>
56
#include <asm/sections.h>
57
#include <asm/io_apic.h>
58
#include <asm/ist.h>
59
#include <asm/io.h>
60
#include <asm/vmi.h>
61
#include <setup_arch.h>
62
#include <bios_ebda.h>
63
#include <asm/cacheflush.h>
64
 
65
/* This value is set up by the early boot code to point to the value
66
   immediately after the boot time page tables.  It contains a *physical*
67
   address, and must not be in the .bss segment! */
68
unsigned long init_pg_tables_end __initdata = ~0UL;
69
 
70
int disable_pse __cpuinitdata = 0;
71
 
72
/*
73
 * Machine setup..
74
 */
75
extern struct resource code_resource;
76
extern struct resource data_resource;
77
extern struct resource bss_resource;
78
 
79
/* cpu data as detected by the assembly code in head.S */
80
struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
81
/* common cpu data for all cpus */
82
struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
83
EXPORT_SYMBOL(boot_cpu_data);
84
 
85
unsigned long mmu_cr4_features;
86
 
87
/* for MCA, but anyone else can use it if they want */
88
unsigned int machine_id;
89
unsigned int machine_submodel_id;
90
unsigned int BIOS_revision;
91
unsigned int mca_pentium_flag;
92
 
93
/* Boot loader ID as an integer, for the benefit of proc_dointvec */
94
int bootloader_type;
95
 
96
/* user-defined highmem size */
97
static unsigned int highmem_pages = -1;
98
 
99
/*
100
 * Setup options
101
 */
102
struct screen_info screen_info;
103
EXPORT_SYMBOL(screen_info);
104
struct apm_info apm_info;
105
EXPORT_SYMBOL(apm_info);
106
struct edid_info edid_info;
107
EXPORT_SYMBOL_GPL(edid_info);
108
struct ist_info ist_info;
109
#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
110
        defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
111
EXPORT_SYMBOL(ist_info);
112
#endif
113
 
114
extern void early_cpu_init(void);
115
extern int root_mountflags;
116
 
117
unsigned long saved_videomode;
118
 
119
#define RAMDISK_IMAGE_START_MASK        0x07FF
120
#define RAMDISK_PROMPT_FLAG             0x8000
121
#define RAMDISK_LOAD_FLAG               0x4000  
122
 
123
static char __initdata command_line[COMMAND_LINE_SIZE];
124
 
125
struct boot_params __initdata boot_params;
126
 
127
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
128
struct edd edd;
129
#ifdef CONFIG_EDD_MODULE
130
EXPORT_SYMBOL(edd);
131
#endif
132
/**
133
 * copy_edd() - Copy the BIOS EDD information
134
 *              from boot_params into a safe place.
135
 *
136
 */
137
static inline void copy_edd(void)
138
{
139
     memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
140
            sizeof(edd.mbr_signature));
141
     memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
142
     edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
143
     edd.edd_info_nr = boot_params.eddbuf_entries;
144
}
145
#else
146
static inline void copy_edd(void)
147
{
148
}
149
#endif
150
 
151
int __initdata user_defined_memmap = 0;
152
 
153
/*
154
 * "mem=nopentium" disables the 4MB page tables.
155
 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
156
 * to <mem>, overriding the bios size.
157
 * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
158
 * <start> to <start>+<mem>, overriding the bios size.
159
 *
160
 * HPA tells me bootloaders need to parse mem=, so no new
161
 * option should be mem=  [also see Documentation/i386/boot.txt]
162
 */
163
static int __init parse_mem(char *arg)
164
{
165
        if (!arg)
166
                return -EINVAL;
167
 
168
        if (strcmp(arg, "nopentium") == 0) {
169
                clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
170
                disable_pse = 1;
171
        } else {
172
                /* If the user specifies memory size, we
173
                 * limit the BIOS-provided memory map to
174
                 * that size. exactmap can be used to specify
175
                 * the exact map. mem=number can be used to
176
                 * trim the existing memory map.
177
                 */
178
                unsigned long long mem_size;
179
 
180
                mem_size = memparse(arg, &arg);
181
                limit_regions(mem_size);
182
                user_defined_memmap = 1;
183
        }
184
        return 0;
185
}
186
early_param("mem", parse_mem);
187
 
188
#ifdef CONFIG_PROC_VMCORE
189
/* elfcorehdr= specifies the location of elf core header
190
 * stored by the crashed kernel.
191
 */
192
static int __init parse_elfcorehdr(char *arg)
193
{
194
        if (!arg)
195
                return -EINVAL;
196
 
197
        elfcorehdr_addr = memparse(arg, &arg);
198
        return 0;
199
}
200
early_param("elfcorehdr", parse_elfcorehdr);
201
#endif /* CONFIG_PROC_VMCORE */
202
 
203
/*
204
 * highmem=size forces highmem to be exactly 'size' bytes.
205
 * This works even on boxes that have no highmem otherwise.
206
 * This also works to reduce highmem size on bigger boxes.
207
 */
208
static int __init parse_highmem(char *arg)
209
{
210
        if (!arg)
211
                return -EINVAL;
212
 
213
        highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
214
        return 0;
215
}
216
early_param("highmem", parse_highmem);
217
 
218
/*
219
 * vmalloc=size forces the vmalloc area to be exactly 'size'
220
 * bytes. This can be used to increase (or decrease) the
221
 * vmalloc area - the default is 128m.
222
 */
223
static int __init parse_vmalloc(char *arg)
224
{
225
        if (!arg)
226
                return -EINVAL;
227
 
228
        __VMALLOC_RESERVE = memparse(arg, &arg);
229
        return 0;
230
}
231
early_param("vmalloc", parse_vmalloc);
232
 
233
/*
234
 * reservetop=size reserves a hole at the top of the kernel address space which
235
 * a hypervisor can load into later.  Needed for dynamically loaded hypervisors,
236
 * so relocating the fixmap can be done before paging initialization.
237
 */
238
static int __init parse_reservetop(char *arg)
239
{
240
        unsigned long address;
241
 
242
        if (!arg)
243
                return -EINVAL;
244
 
245
        address = memparse(arg, &arg);
246
        reserve_top_address(address);
247
        return 0;
248
}
249
early_param("reservetop", parse_reservetop);
250
 
251
/*
252
 * Determine low and high memory ranges:
253
 */
254
unsigned long __init find_max_low_pfn(void)
255
{
256
        unsigned long max_low_pfn;
257
 
258
        max_low_pfn = max_pfn;
259
        if (max_low_pfn > MAXMEM_PFN) {
260
                if (highmem_pages == -1)
261
                        highmem_pages = max_pfn - MAXMEM_PFN;
262
                if (highmem_pages + MAXMEM_PFN < max_pfn)
263
                        max_pfn = MAXMEM_PFN + highmem_pages;
264
                if (highmem_pages + MAXMEM_PFN > max_pfn) {
265
                        printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
266
                        highmem_pages = 0;
267
                }
268
                max_low_pfn = MAXMEM_PFN;
269
#ifndef CONFIG_HIGHMEM
270
                /* Maximum memory usable is what is directly addressable */
271
                printk(KERN_WARNING "Warning only %ldMB will be used.\n",
272
                                        MAXMEM>>20);
273
                if (max_pfn > MAX_NONPAE_PFN)
274
                        printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
275
                else
276
                        printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
277
                max_pfn = MAXMEM_PFN;
278
#else /* !CONFIG_HIGHMEM */
279
#ifndef CONFIG_HIGHMEM64G
280
                if (max_pfn > MAX_NONPAE_PFN) {
281
                        max_pfn = MAX_NONPAE_PFN;
282
                        printk(KERN_WARNING "Warning only 4GB will be used.\n");
283
                        printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
284
                }
285
#endif /* !CONFIG_HIGHMEM64G */
286
#endif /* !CONFIG_HIGHMEM */
287
        } else {
288
                if (highmem_pages == -1)
289
                        highmem_pages = 0;
290
#ifdef CONFIG_HIGHMEM
291
                if (highmem_pages >= max_pfn) {
292
                        printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
293
                        highmem_pages = 0;
294
                }
295
                if (highmem_pages) {
296
                        if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
297
                                printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
298
                                highmem_pages = 0;
299
                        }
300
                        max_low_pfn -= highmem_pages;
301
                }
302
#else
303
                if (highmem_pages)
304
                        printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
305
#endif
306
        }
307
        return max_low_pfn;
308
}
309
 
310
/*
311
 * workaround for Dell systems that neglect to reserve EBDA
312
 */
313
static void __init reserve_ebda_region(void)
314
{
315
        unsigned int addr;
316
        addr = get_bios_ebda();
317
        if (addr)
318
                reserve_bootmem(addr, PAGE_SIZE);
319
}
320
 
321
#ifndef CONFIG_NEED_MULTIPLE_NODES
322
void __init setup_bootmem_allocator(void);
323
static unsigned long __init setup_memory(void)
324
{
325
        /*
326
         * partially used pages are not usable - thus
327
         * we are rounding upwards:
328
         */
329
        min_low_pfn = PFN_UP(init_pg_tables_end);
330
 
331
        find_max_pfn();
332
 
333
        max_low_pfn = find_max_low_pfn();
334
 
335
#ifdef CONFIG_HIGHMEM
336
        highstart_pfn = highend_pfn = max_pfn;
337
        if (max_pfn > max_low_pfn) {
338
                highstart_pfn = max_low_pfn;
339
        }
340
        printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
341
                pages_to_mb(highend_pfn - highstart_pfn));
342
        num_physpages = highend_pfn;
343
        high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
344
#else
345
        num_physpages = max_low_pfn;
346
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
347
#endif
348
#ifdef CONFIG_FLATMEM
349
        max_mapnr = num_physpages;
350
#endif
351
        printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
352
                        pages_to_mb(max_low_pfn));
353
 
354
        setup_bootmem_allocator();
355
 
356
        return max_low_pfn;
357
}
358
 
359
void __init zone_sizes_init(void)
360
{
361
        unsigned long max_zone_pfns[MAX_NR_ZONES];
362
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
363
        max_zone_pfns[ZONE_DMA] =
364
                virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
365
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
366
#ifdef CONFIG_HIGHMEM
367
        max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
368
        add_active_range(0, 0, highend_pfn);
369
#else
370
        add_active_range(0, 0, max_low_pfn);
371
#endif
372
 
373
        free_area_init_nodes(max_zone_pfns);
374
}
375
#else
376
extern unsigned long __init setup_memory(void);
377
extern void zone_sizes_init(void);
378
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
379
 
380
static inline unsigned long long get_total_mem(void)
381
{
382
        unsigned long long total;
383
 
384
        total = max_low_pfn - min_low_pfn;
385
#ifdef CONFIG_HIGHMEM
386
        total += highend_pfn - highstart_pfn;
387
#endif
388
 
389
        return total << PAGE_SHIFT;
390
}
391
 
392
#ifdef CONFIG_KEXEC
393
static void __init reserve_crashkernel(void)
394
{
395
        unsigned long long total_mem;
396
        unsigned long long crash_size, crash_base;
397
        int ret;
398
 
399
        total_mem = get_total_mem();
400
 
401
        ret = parse_crashkernel(boot_command_line, total_mem,
402
                        &crash_size, &crash_base);
403
        if (ret == 0 && crash_size > 0) {
404
                if (crash_base > 0) {
405
                        printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
406
                                        "for crashkernel (System RAM: %ldMB)\n",
407
                                        (unsigned long)(crash_size >> 20),
408
                                        (unsigned long)(crash_base >> 20),
409
                                        (unsigned long)(total_mem >> 20));
410
                        crashk_res.start = crash_base;
411
                        crashk_res.end   = crash_base + crash_size - 1;
412
                        reserve_bootmem(crash_base, crash_size);
413
                } else
414
                        printk(KERN_INFO "crashkernel reservation failed - "
415
                                        "you have to specify a base address\n");
416
        }
417
}
418
#else
419
static inline void __init reserve_crashkernel(void)
420
{}
421
#endif
422
 
423
void __init setup_bootmem_allocator(void)
424
{
425
        unsigned long bootmap_size;
426
        /*
427
         * Initialize the boot-time allocator (with low memory only):
428
         */
429
        bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
430
 
431
        register_bootmem_low_pages(max_low_pfn);
432
 
433
        /*
434
         * Reserve the bootmem bitmap itself as well. We do this in two
435
         * steps (first step was init_bootmem()) because this catches
436
         * the (very unlikely) case of us accidentally initializing the
437
         * bootmem allocator with an invalid RAM area.
438
         */
439
        reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
440
                         bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
441
 
442
        /*
443
         * reserve physical page 0 - it's a special BIOS page on many boxes,
444
         * enabling clean reboots, SMP operation, laptop functions.
445
         */
446
        reserve_bootmem(0, PAGE_SIZE);
447
 
448
        /* reserve EBDA region, it's a 4K region */
449
        reserve_ebda_region();
450
 
451
    /* could be an AMD 768MPX chipset. Reserve a page  before VGA to prevent
452
       PCI prefetch into it (errata #56). Usually the page is reserved anyways,
453
       unless you have no PS/2 mouse plugged in. */
454
        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
455
            boot_cpu_data.x86 == 6)
456
             reserve_bootmem(0xa0000 - 4096, 4096);
457
 
458
#ifdef CONFIG_SMP
459
        /*
460
         * But first pinch a few for the stack/trampoline stuff
461
         * FIXME: Don't need the extra page at 4K, but need to fix
462
         * trampoline before removing it. (see the GDT stuff)
463
         */
464
        reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
465
#endif
466
#ifdef CONFIG_ACPI_SLEEP
467
        /*
468
         * Reserve low memory region for sleep support.
469
         */
470
        acpi_reserve_bootmem();
471
#endif
472
#ifdef CONFIG_X86_FIND_SMP_CONFIG
473
        /*
474
         * Find and reserve possible boot-time SMP configuration:
475
         */
476
        find_smp_config();
477
#endif
478
        numa_kva_reserve();
479
#ifdef CONFIG_BLK_DEV_INITRD
480
        if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
481
                unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
482
                unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
483
                unsigned long ramdisk_end   = ramdisk_image + ramdisk_size;
484
                unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT;
485
 
486
                if (ramdisk_end <= end_of_lowmem) {
487
                        reserve_bootmem(ramdisk_image, ramdisk_size);
488
                        initrd_start = ramdisk_image + PAGE_OFFSET;
489
                        initrd_end = initrd_start+ramdisk_size;
490
                } else {
491
                        printk(KERN_ERR "initrd extends beyond end of memory "
492
                               "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
493
                               ramdisk_end, end_of_lowmem);
494
                        initrd_start = 0;
495
                }
496
        }
497
#endif
498
        reserve_crashkernel();
499
}
500
 
501
/*
502
 * The node 0 pgdat is initialized before all of these because
503
 * it's needed for bootmem.  node>0 pgdats have their virtual
504
 * space allocated before the pagetables are in place to access
505
 * them, so they can't be cleared then.
506
 *
507
 * This should all compile down to nothing when NUMA is off.
508
 */
509
static void __init remapped_pgdat_init(void)
510
{
511
        int nid;
512
 
513
        for_each_online_node(nid) {
514
                if (nid != 0)
515
                        memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
516
        }
517
}
518
 
519
#ifdef CONFIG_MCA
520
static void set_mca_bus(int x)
521
{
522
        MCA_bus = x;
523
}
524
#else
525
static void set_mca_bus(int x) { }
526
#endif
527
 
528
/* Overridden in paravirt.c if CONFIG_PARAVIRT */
529
char * __init __attribute__((weak)) memory_setup(void)
530
{
531
        return machine_specific_memory_setup();
532
}
533
 
534
/*
535
 * Determine if we were loaded by an EFI loader.  If so, then we have also been
536
 * passed the efi memmap, systab, etc., so we should use these data structures
537
 * for initialization.  Note, the efi init code path is determined by the
538
 * global efi_enabled. This allows the same kernel image to be used on existing
539
 * systems (with a traditional BIOS) as well as on EFI systems.
540
 */
541
void __init setup_arch(char **cmdline_p)
542
{
543
        unsigned long max_low_pfn;
544
 
545
        memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
546
        pre_setup_arch_hook();
547
        early_cpu_init();
548
 
549
        /*
550
         * FIXME: This isn't an official loader_type right
551
         * now but does currently work with elilo.
552
         * If we were configured as an EFI kernel, check to make
553
         * sure that we were loaded correctly from elilo and that
554
         * the system table is valid.  If not, then initialize normally.
555
         */
556
#ifdef CONFIG_EFI
557
        if ((boot_params.hdr.type_of_loader == 0x50) &&
558
            boot_params.efi_info.efi_systab)
559
                efi_enabled = 1;
560
#endif
561
 
562
        ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
563
        screen_info = boot_params.screen_info;
564
        edid_info = boot_params.edid_info;
565
        apm_info.bios = boot_params.apm_bios_info;
566
        ist_info = boot_params.ist_info;
567
        saved_videomode = boot_params.hdr.vid_mode;
568
        if( boot_params.sys_desc_table.length != 0 ) {
569
                set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2);
570
                machine_id = boot_params.sys_desc_table.table[0];
571
                machine_submodel_id = boot_params.sys_desc_table.table[1];
572
                BIOS_revision = boot_params.sys_desc_table.table[2];
573
        }
574
        bootloader_type = boot_params.hdr.type_of_loader;
575
 
576
#ifdef CONFIG_BLK_DEV_RAM
577
        rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
578
        rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
579
        rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
580
#endif
581
        ARCH_SETUP
582
        if (efi_enabled)
583
                efi_init();
584
        else {
585
                printk(KERN_INFO "BIOS-provided physical RAM map:\n");
586
                print_memory_map(memory_setup());
587
        }
588
 
589
        copy_edd();
590
 
591
        if (!boot_params.hdr.root_flags)
592
                root_mountflags &= ~MS_RDONLY;
593
        init_mm.start_code = (unsigned long) _text;
594
        init_mm.end_code = (unsigned long) _etext;
595
        init_mm.end_data = (unsigned long) _edata;
596
        init_mm.brk = init_pg_tables_end + PAGE_OFFSET;
597
 
598
        code_resource.start = virt_to_phys(_text);
599
        code_resource.end = virt_to_phys(_etext)-1;
600
        data_resource.start = virt_to_phys(_etext);
601
        data_resource.end = virt_to_phys(_edata)-1;
602
        bss_resource.start = virt_to_phys(&__bss_start);
603
        bss_resource.end = virt_to_phys(&__bss_stop)-1;
604
 
605
        parse_early_param();
606
 
607
        if (user_defined_memmap) {
608
                printk(KERN_INFO "user-defined physical RAM map:\n");
609
                print_memory_map("user");
610
        }
611
 
612
        strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
613
        *cmdline_p = command_line;
614
 
615
        max_low_pfn = setup_memory();
616
 
617
#ifdef CONFIG_VMI
618
        /*
619
         * Must be after max_low_pfn is determined, and before kernel
620
         * pagetables are setup.
621
         */
622
        vmi_init();
623
#endif
624
 
625
        /*
626
         * NOTE: before this point _nobody_ is allowed to allocate
627
         * any memory using the bootmem allocator.  Although the
628
         * allocator is now initialised only the first 8Mb of the kernel
629
         * virtual address space has been mapped.  All allocations before
630
         * paging_init() has completed must use the alloc_bootmem_low_pages()
631
         * variant (which allocates DMA'able memory) and care must be taken
632
         * not to exceed the 8Mb limit.
633
         */
634
 
635
#ifdef CONFIG_SMP
636
        smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
637
#endif
638
        paging_init();
639
        remapped_pgdat_init();
640
        sparse_init();
641
        zone_sizes_init();
642
 
643
        /*
644
         * NOTE: at this point the bootmem allocator is fully available.
645
         */
646
 
647
        paravirt_post_allocator_init();
648
 
649
        dmi_scan_machine();
650
 
651
#ifdef CONFIG_X86_GENERICARCH
652
        generic_apic_probe();
653
#endif  
654
        if (efi_enabled)
655
                efi_map_memmap();
656
 
657
#ifdef CONFIG_ACPI
658
        /*
659
         * Parse the ACPI tables for possible boot-time SMP configuration.
660
         */
661
        acpi_boot_table_init();
662
#endif
663
 
664
#ifdef CONFIG_PCI
665
        early_quirks();
666
#endif
667
 
668
#ifdef CONFIG_ACPI
669
        acpi_boot_init();
670
 
671
#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
672
        if (def_to_bigsmp)
673
                printk(KERN_WARNING "More than 8 CPUs detected and "
674
                        "CONFIG_X86_PC cannot handle it.\nUse "
675
                        "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
676
#endif
677
#endif
678
#ifdef CONFIG_X86_LOCAL_APIC
679
        if (smp_found_config)
680
                get_smp_config();
681
#endif
682
 
683
        e820_register_memory();
684
        e820_mark_nosave_regions();
685
 
686
#ifdef CONFIG_VT
687
#if defined(CONFIG_VGA_CONSOLE)
688
        if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
689
                conswitchp = &vga_con;
690
#elif defined(CONFIG_DUMMY_CONSOLE)
691
        conswitchp = &dummy_con;
692
#endif
693
#endif
694
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.